Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next...
authorJakub Kicinski <kuba@kernel.org>
Sat, 30 Jul 2022 04:26:09 +0000 (21:26 -0700)
committerJakub Kicinski <kuba@kernel.org>
Sat, 30 Jul 2022 04:26:10 +0000 (21:26 -0700)
Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2022-07-28

This series contains updates to ice driver only.

Michal allows for VF true promiscuous mode to be set for multiple VFs
and adds clearing of promiscuous filters when VF trust is removed.

Maciej refactors ice_set_features() to track/check changed features
instead of constantly checking against netdev features and adds support for
NETIF_F_LOOPBACK.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: allow toggling loopback mode via ndo_set_features callback
  ice: compress branches in ice_set_features()
  ice: Fix promiscuous mode not turning off
  ice: Introduce enabling promiscuous mode on multiple VF's
====================

Link: https://lore.kernel.org/r/20220728195538.3391360-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
319 files changed:
.mailmap
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/net/ethernet-controller.yaml
Documentation/devicetree/bindings/net/fsl,fec.yaml
Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml
Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml
Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
Documentation/networking/devlink/devlink-selftests.rst [new file with mode: 0644]
Documentation/networking/ip-sysctl.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/lan966x.dtsi
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-pxa/icontrol.c
arch/arm/mach-pxa/littleton.c
arch/arm/mach-pxa/magician.c
arch/arm/mach-pxa/spitz.c
arch/arm/mach-pxa/z2.c
arch/riscv/Makefile
arch/riscv/boot/dts/canaan/canaan_kd233.dts
arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
arch/riscv/kernel/Makefile
arch/riscv/kernel/elf_kexec.c
arch/s390/include/asm/archrandom.h
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/events/intel/lbr.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/x86.c
certs/Kconfig
drivers/acpi/cppc_acpi.c
drivers/clk/clk-lan966x.c
drivers/firewire/net.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-xilinx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/drm_gem_ttm_helper.c
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
drivers/gpu/drm/i915/gt/intel_lrc.h
drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
drivers/gpu/drm/i915/gt/uc/intel_guc.h
drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
drivers/gpu/drm/imx/dcss/dcss-dev.c
drivers/gpu/drm/panel/panel-edp.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/md/raid5.c
drivers/misc/lkdtm/Makefile
drivers/mmc/host/sdhci-omap.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/net/amt.c
drivers/net/dsa/qca/Makefile
drivers/net/dsa/qca/qca8k-8xxx.c [new file with mode: 0644]
drivers/net/dsa/qca/qca8k-common.c [new file with mode: 0644]
drivers/net/dsa/qca/qca8k.c [deleted file]
drivers/net/dsa/qca/qca8k.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
drivers/net/ethernet/fungible/funeth/funeth_rx.c
drivers/net/ethernet/fungible/funeth/funeth_tx.c
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_sriov.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
drivers/net/ethernet/marvell/prestera/prestera_main.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/catas.c
drivers/net/ethernet/mellanox/mlx4/crdump.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
drivers/net/ethernet/netronome/nfp/bpf/jit.c
drivers/net/ethernet/sfc/Makefile
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/ef100.c
drivers/net/ethernet/sfc/ef100_netdev.c
drivers/net/ethernet/sfc/ef100_nic.c
drivers/net/ethernet/sfc/ef100_nic.h
drivers/net/ethernet/sfc/ef100_rep.c
drivers/net/ethernet/sfc/ef100_rep.h
drivers/net/ethernet/sfc/ef100_rx.c
drivers/net/ethernet/sfc/ef10_sriov.c
drivers/net/ethernet/sfc/ethtool_common.c
drivers/net/ethernet/sfc/filter.h
drivers/net/ethernet/sfc/mae.c
drivers/net/ethernet/sfc/mae.h
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mcdi_filters.c
drivers/net/ethernet/sfc/mcdi_filters.h
drivers/net/ethernet/sfc/mcdi_pcol_mae.h [new file with mode: 0644]
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/sfc/rx_common.c
drivers/net/ethernet/sfc/tc.c [new file with mode: 0644]
drivers/net/ethernet/sfc/tc.h [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
drivers/net/geneve.c
drivers/net/ipa/ipa_qmi_msg.h
drivers/net/macsec.c
drivers/net/netdevsim/bpf.c
drivers/net/pcs/pcs-xpcs.c
drivers/net/sungem_phy.c
drivers/net/usb/catc.c
drivers/net/usb/cdc_subset.c
drivers/net/usb/kaweth.c
drivers/net/usb/plusb.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vxlan/vxlan_core.c
drivers/net/wireless/ath/ath11k/ahb.c
drivers/net/wireless/ath/ath11k/core.c
drivers/net/wireless/ath/ath11k/core.h
drivers/net/wireless/ath/ath11k/hw.h
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/mac.h
drivers/net/wireless/ath/ath11k/pci.c
drivers/net/wireless/ath/ath11k/pcic.c
drivers/net/wireless/ath/ath11k/pcic.h
drivers/net/wireless/ath/ath11k/wmi.c
drivers/net/wireless/ath/ath11k/wmi.h
drivers/net/wireless/ath/wcn36xx/Makefile
drivers/net/wireless/ath/wcn36xx/debug.c
drivers/net/wireless/ath/wcn36xx/debug.h
drivers/net/wireless/ath/wcn36xx/firmware.c [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/firmware.h [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/hal.h
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/ath/wcn36xx/smd.h
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/broadcom/b43/main.c
drivers/net/wireless/broadcom/b43legacy/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/marvell/libertas/if_usb.c
drivers/net/wireless/marvell/mwifiex/pcie.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/marvell/mwl8k.c
drivers/net/wireless/microchip/wilc1000/cfg80211.c
drivers/net/wireless/microchip/wilc1000/hif.c
drivers/net/wireless/microchip/wilc1000/hif.h
drivers/net/wireless/microchip/wilc1000/netdev.c
drivers/net/wireless/microchip/wilc1000/netdev.h
drivers/net/wireless/microchip/wilc1000/sdio.c
drivers/net/wireless/microchip/wilc1000/spi.c
drivers/net/wireless/microchip/wilc1000/wlan.c
drivers/net/wireless/microchip/wilc1000/wlan.h
drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
drivers/net/wireless/purelifi/plfxlc/usb.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wireless/realtek/rtw88/main.c
drivers/net/wireless/realtek/rtw89/pci.c
drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
drivers/net/wireless/ti/wl12xx/main.c
drivers/pinctrl/Kconfig
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/ralink/pinctrl-ralink.c
drivers/pinctrl/sunplus/sppctl.c
drivers/ptp/Kconfig
drivers/s390/net/qeth_core_main.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-cadence.c
drivers/spi/spi-rspi.c
drivers/virt/coco/sev-guest/sev-guest.c
fs/io_uring.c
fs/ntfs/attrib.c
fs/ocfs2/ocfs2.h
fs/ocfs2/slot_map.c
fs/ocfs2/super.c
fs/read_write.c
fs/userfaultfd.c
include/asm-generic/io.h
include/asm-generic/tlb.h
include/drm/gpu_scheduler.h
include/linux/mm.h
include/net/addrconf.h
include/net/bluetooth/l2cap.h
include/net/devlink.h
include/net/firewire.h
include/net/inet_connection_sock.h
include/net/ip_tunnels.h
include/net/sock.h
include/net/tcp.h
include/net/tls.h
include/uapi/asm-generic/fcntl.h
include/uapi/linux/devlink.h
include/uapi/linux/kvm.h
include/uapi/linux/seg6_iptunnel.h
kernel/bpf/btf.c
kernel/bpf/devmap.c
kernel/bpf/trampoline.c
kernel/rcu/srcutree.c
kernel/sched/deadline.c
kernel/watch_queue.c
mm/gup.c
mm/hugetlb.c
mm/kfence/core.c
mm/memory.c
mm/memremap.c
mm/secretmem.c
mm/shmem.c
net/bluetooth/hci_sync.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bridge/br_netlink.c
net/caif/caif_socket.c
net/core/devlink.c
net/core/filter.c
net/decnet/af_decnet.c
net/decnet/dn_route.c
net/dsa/switch.c
net/ipv4/fib_trie.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_output.c
net/ipv6/mcast.c
net/ipv6/ping.c
net/ipv6/seg6_iptunnel.c
net/ipv6/tcp_ipv6.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/subflow.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_queue.c
net/packet/af_packet.c
net/sctp/associola.c
net/sctp/stream.c
net/sctp/stream_sched.c
net/tipc/socket.c
net/tls/tls_device.c
net/tls/tls_strp.c
net/tls/tls_sw.c
sound/soc/rockchip/rockchip_i2s.c
tools/bpf/bpftool/gen.c
tools/bpf/bpftool/prog.c
tools/include/uapi/asm-generic/fcntl.h
tools/include/uapi/linux/kvm.h
tools/lib/bpf/bpf.c
tools/lib/bpf/bpf.h
tools/lib/bpf/bpf_tracing.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.map
tools/testing/selftests/bpf/DENYLIST [new file with mode: 0644]
tools/testing/selftests/bpf/DENYLIST.s390x [new file with mode: 0644]
tools/testing/selftests/bpf/config
tools/testing/selftests/bpf/config.s390x [new file with mode: 0644]
tools/testing/selftests/bpf/config.x86_64 [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/probe_user.c
tools/testing/selftests/bpf/prog_tests/send_signal.c
tools/testing/selftests/bpf/prog_tests/test_tunnel.c
tools/testing/selftests/bpf/progs/test_probe_user.c
tools/testing/selftests/bpf/progs/test_tunnel_kern.c
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/vmtest.sh
tools/testing/selftests/drivers/net/dsa/Makefile [new file with mode: 0644]
tools/testing/selftests/gpio/Makefile
tools/testing/selftests/kvm/rseq_test.c
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh [new file with mode: 0755]
tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh [new file with mode: 0755]
tools/testing/selftests/net/tls.c

index 13e4f50..71577c3 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -60,6 +60,10 @@ Arnd Bergmann <arnd@arndb.de>
 Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com>
 Axel Dyks <xl@xlsigned.net>
 Axel Lin <axel.lin@gmail.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@linaro.org>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@spreadtrum.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
 Ben Gardner <bgardner@wabtec.com>
@@ -135,6 +139,8 @@ Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
 Frank Zago <fzago@systemfabricworks.com>
 Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
+Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
+Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
@@ -371,6 +377,7 @@ Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
 Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
 Sebastian Reichel <sre@kernel.org> <sre@debian.org>
 Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
+Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
 Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
 Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
 Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
index f2d26cb..c0fdb04 100644 (file)
                        expediting.  Set to zero to disable automatic
                        expediting.
 
+       srcutree.srcu_max_nodelay [KNL]
+                       Specifies the number of no-delay instances
+                       per jiffy for which the SRCU grace period
+                       worker thread will be rescheduled with zero
+                       delay. Beyond this limit, worker thread will
+                       be rescheduled with a sleep delay of one jiffy.
+
+       srcutree.srcu_max_nodelay_phase [KNL]
+                       Specifies the per-grace-period phase, number of
+                       non-sleeping polls of readers. Beyond this limit,
+                       grace period worker thread will be rescheduled
+                       with a sleep delay of one jiffy, between each
+                       rescan of the readers, for a grace period phase.
+
+       srcutree.srcu_retry_check_delay [KNL]
+                       Specifies number of microseconds of non-sleeping
+                       delay between each non-sleeping poll of readers.
+
        srcutree.small_contention_lim [KNL]
                        Specifies the number of update-side contention
                        events per jiffy will be tolerated before
index 56d9aca..c138a10 100644 (file)
@@ -155,70 +155,65 @@ properties:
       - in-band-status
 
   fixed-link:
-    allOf:
-      - if:
-          type: array
-        then:
-          deprecated: true
-          items:
-            - minimum: 0
-              maximum: 31
-              description:
-                Emulated PHY ID, choose any but unique to the all
-                specified fixed-links
-
-            - enum: [0, 1]
-              description:
-                Duplex configuration. 0 for half duplex or 1 for
-                full duplex
-
-            - enum: [10, 100, 1000, 2500, 10000]
-              description:
-                Link speed in Mbits/sec.
-
-            - enum: [0, 1]
-              description:
-                Pause configuration. 0 for no pause, 1 for pause
-
-            - enum: [0, 1]
-              description:
-                Asymmetric pause configuration. 0 for no asymmetric
-                pause, 1 for asymmetric pause
-
-
-      - if:
-          type: object
-        then:
-          properties:
-            speed:
-              description:
-                Link speed.
-              $ref: /schemas/types.yaml#/definitions/uint32
-              enum: [10, 100, 1000, 2500, 10000]
-
-            full-duplex:
-              $ref: /schemas/types.yaml#/definitions/flag
-              description:
-                Indicates that full-duplex is used. When absent, half
-                duplex is assumed.
-
-            pause:
-              $ref: /schemas/types.yaml#definitions/flag
-              description:
-                Indicates that pause should be enabled.
-
-            asym-pause:
-              $ref: /schemas/types.yaml#/definitions/flag
-              description:
-                Indicates that asym_pause should be enabled.
-
-            link-gpios:
-              maxItems: 1
-              description:
-                GPIO to determine if the link is up
-
-          required:
-            - speed
+    oneOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+        deprecated: true
+        items:
+          - minimum: 0
+            maximum: 31
+            description:
+              Emulated PHY ID, choose any but unique to the all
+              specified fixed-links
+
+          - enum: [0, 1]
+            description:
+              Duplex configuration. 0 for half duplex or 1 for
+              full duplex
+
+          - enum: [10, 100, 1000, 2500, 10000]
+            description:
+              Link speed in Mbits/sec.
+
+          - enum: [0, 1]
+            description:
+              Pause configuration. 0 for no pause, 1 for pause
+
+          - enum: [0, 1]
+            description:
+              Asymmetric pause configuration. 0 for no asymmetric
+              pause, 1 for asymmetric pause
+      - type: object
+        additionalProperties: false
+        properties:
+          speed:
+            description:
+              Link speed.
+            $ref: /schemas/types.yaml#/definitions/uint32
+            enum: [10, 100, 1000, 2500, 10000]
+
+          full-duplex:
+            $ref: /schemas/types.yaml#/definitions/flag
+            description:
+              Indicates that full-duplex is used. When absent, half
+              duplex is assumed.
+
+          pause:
+            $ref: /schemas/types.yaml#definitions/flag
+            description:
+              Indicates that pause should be enabled.
+
+          asym-pause:
+            $ref: /schemas/types.yaml#/definitions/flag
+            description:
+              Indicates that asym_pause should be enabled.
+
+          link-gpios:
+            maxItems: 1
+            description:
+              GPIO to determine if the link is up
+
+        required:
+          - speed
 
 allOf:
   - if:
index 85a8d8f..924af2d 100644 (file)
@@ -187,6 +187,7 @@ properties:
       Should specify the gpio for phy reset.
 
   phy-reset-duration:
+    $ref: /schemas/types.yaml#/definitions/uint32
     deprecated: true
     description:
       Reset duration in milliseconds.  Should present only if property
@@ -195,12 +196,14 @@ properties:
       and 1 millisecond will be used instead.
 
   phy-reset-active-high:
+    type: boolean
     deprecated: true
     description:
       If present then the reset sequence using the GPIO specified in the
       "phy-reset-gpios" property is reversed (H=reset state, L=operation state).
 
   phy-reset-post-delay:
+    $ref: /schemas/types.yaml#/definitions/uint32
     deprecated: true
     description:
       Post reset delay in milliseconds. If present then a delay of phy-reset-post-delay
index 1bcaf6b..a191a04 100644 (file)
@@ -58,7 +58,6 @@ properties:
 
   spi-cpha: true
   spi-cpol: true
-  spi-max-frequency: true
 
 required:
   - compatible
@@ -85,6 +84,7 @@ allOf:
           contains:
             const: marvell,nfc-spi
     then:
+      $ref: /schemas/spi/spi-peripheral-props.yaml#
       properties:
         break-control: false
         flow-control: false
@@ -108,7 +108,7 @@ allOf:
         spi-max-frequency: false
         reg: false
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index ef11550..1dcbddb 100644 (file)
@@ -30,8 +30,6 @@ properties:
   reg:
     maxItems: 1
 
-  spi-max-frequency: true
-
   uicc-present:
     type: boolean
     description: |
@@ -55,10 +53,11 @@ then:
   properties:
     spi-max-frequency: false
 else:
+  $ref: /schemas/spi/spi-peripheral-props.yaml#
   required:
     - spi-max-frequency
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 963d953..6475690 100644 (file)
@@ -25,8 +25,6 @@ properties:
   st95hfvin-supply:
     description: ST95HF transceiver's Vin regulator supply
 
-  spi-max-frequency: true
-
 required:
   - compatible
   - enable-gpio
@@ -34,7 +32,10 @@ required:
   - reg
   - spi-max-frequency
 
-additionalProperties: false
+allOf:
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
 
 examples:
   - |
index 404c8df..9cc236e 100644 (file)
@@ -40,8 +40,6 @@ properties:
   reg:
     maxItems: 1
 
-  spi-max-frequency: true
-
   ti,enable-gpios:
     minItems: 1
     maxItems: 2
@@ -65,7 +63,10 @@ required:
   - ti,enable-gpios
   - vin-supply
 
-additionalProperties: false
+allOf:
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
 
 examples:
   - |
index c11f23b..53b4153 100644 (file)
@@ -75,6 +75,16 @@ properties:
     items:
       pattern: '^[A-Z][A-Z]-[A-Z][0-9A-Z]-[0-9]+$'
 
+  brcm,ccode-map-trivial:
+    description: |
+      Use a trivial mapping of ISO3166 country codes to brcmfmac firmware
+      country code and revision: cc -> { cc, 0 }. In other words, assume that
+      the CLM blob firmware uses ISO3166 country codes as well, and that all
+      revisions are zero. This property is mutually exclusive with
+      brcm,ccode-map. If both properties are specified, then brcm,ccode-map
+      takes precedence.
+    type: boolean
+
 required:
   - compatible
   - reg
diff --git a/Documentation/networking/devlink/devlink-selftests.rst b/Documentation/networking/devlink/devlink-selftests.rst
new file mode 100644 (file)
index 0000000..c0aa1f3
--- /dev/null
@@ -0,0 +1,38 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+=================
+Devlink Selftests
+=================
+
+The ``devlink-selftests`` API allows executing selftests on the device.
+
+Tests Mask
+==========
+The ``devlink-selftests`` command should be run with a mask indicating
+the tests to be executed.
+
+Tests Description
+=================
+The following is a list of tests that drivers may execute.
+
+.. list-table:: List of tests
+   :widths: 5 90
+
+   * - Name
+     - Description
+   * - ``DEVLINK_SELFTEST_FLASH``
+     - Devices may have the firmware on non-volatile memory on the board, e.g.
+       flash. This particular test helps to run a flash selftest on the device.
+       Implementation of the test is left to the driver/firmware.
+
+example usage
+-------------
+
+.. code:: shell
+
+    # Query selftests supported on the devlink device
+    $ devlink dev selftests show DEV
+    # Query selftests supported on all devlink devices
+    $ devlink dev selftests show
+    # Executes selftests on the device
+    $ devlink dev selftests run DEV id flash
index 5879ef3..56cd4ea 100644 (file)
@@ -636,6 +636,16 @@ tcp_recovery - INTEGER
 
        Default: 0x1
 
+tcp_reflect_tos - BOOLEAN
+       For listening sockets, reuse the DSCP value of the initial SYN message
+       for outgoing packets. This allows to have both directions of a TCP
+       stream to use the same DSCP value, assuming DSCP remains unchanged for
+       the lifetime of the connection.
+
+       This options affects both IPv4 and IPv6.
+
+       Default: 0 (disabled)
+
 tcp_reordering - INTEGER
        Initial reordering level of packets in a TCP stream.
        TCP stack can then dynamically adjust flow reordering level
@@ -2884,7 +2894,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
        Default: 4K
 
 sctp_wmem  - vector of 3 INTEGERs: min, default, max
-       Currently this tunable has no effect.
+       Only the first value ("min") is used, "default" and "max" are
+       ignored.
+
+       min: Minimum size of send buffer that can be used by SCTP sockets.
+       It is guaranteed to each SCTP socket (but not association) even
+       under moderate memory pressure.
+
+       Default: 4K
 
 addr_scope_policy - INTEGER
        Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00
index 6e090fb..98a2839 100644 (file)
@@ -5658,7 +5658,7 @@ by a string of size ``name_size``.
        #define KVM_STATS_UNIT_SECONDS          (0x2 << KVM_STATS_UNIT_SHIFT)
        #define KVM_STATS_UNIT_CYCLES           (0x3 << KVM_STATS_UNIT_SHIFT)
        #define KVM_STATS_UNIT_BOOLEAN          (0x4 << KVM_STATS_UNIT_SHIFT)
-       #define KVM_STATS_UNIT_MAX              KVM_STATS_UNIT_CYCLES
+       #define KVM_STATS_UNIT_MAX              KVM_STATS_UNIT_BOOLEAN
 
        #define KVM_STATS_BASE_SHIFT            8
        #define KVM_STATS_BASE_MASK             (0xF << KVM_STATS_BASE_SHIFT)
index 46b345d..1920d82 100644 (file)
@@ -15862,7 +15862,7 @@ PIN CONTROLLER - FREESCALE
 M:     Dong Aisheng <aisheng.dong@nxp.com>
 M:     Fabio Estevam <festevam@gmail.com>
 M:     Shawn Guo <shawnguo@kernel.org>
-M:     Stefan Agner <stefan@agner.ch>
+M:     Jacky Bai <ping.bai@nxp.com>
 R:     Pengutronix Kernel Team <kernel@pengutronix.de>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
index 00fd80c..b79c1c1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Superb Owl
 
 # *DOCUMENTATION*
index 3cb02ff..38e90a3 100644 (file)
@@ -38,7 +38,7 @@
                sys_clk: sys_clk {
                        compatible = "fixed-clock";
                        #clock-cells = <0>;
-                       clock-frequency = <162500000>;
+                       clock-frequency = <165625000>;
                };
 
                cpu_clk: cpu_clk {
index c546356..5738496 100644 (file)
@@ -549,7 +549,7 @@ static struct pxa2xx_spi_controller corgi_spi_info = {
 };
 
 static struct gpiod_lookup_table corgi_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.1",
+       .dev_id = "spi1",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
index 2ae06ed..2fd6659 100644 (file)
@@ -635,7 +635,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
 };
 
 static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_HX4700_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
index 753fe16..6240882 100644 (file)
@@ -140,7 +140,7 @@ struct platform_device pxa_spi_ssp4 = {
 };
 
 static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
-       .dev_id = "pxa2xx-spi.3",
+       .dev_id = "spi3",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS1, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS2, "cs", 1, GPIO_ACTIVE_LOW),
@@ -149,7 +149,7 @@ static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
 };
 
 static struct gpiod_lookup_table pxa_ssp4_gpio_table = {
-       .dev_id = "pxa2xx-spi.4",
+       .dev_id = "spi4",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS3, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS4, "cs", 1, GPIO_ACTIVE_LOW),
index f98dc61..98423a9 100644 (file)
@@ -207,7 +207,7 @@ static struct spi_board_info littleton_spi_devices[] __initdata = {
 };
 
 static struct gpiod_lookup_table littleton_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", LITTLETON_GPIO_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
index 20456a5..0827ebc 100644 (file)
@@ -994,7 +994,7 @@ static struct pxa2xx_spi_controller magician_spi_info = {
 };
 
 static struct gpiod_lookup_table magician_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                /* NOTICE must be GPIO, incompatibility with hw PXA SPI framing */
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO14_MAGICIAN_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
index dd88953..9964729 100644 (file)
@@ -578,7 +578,7 @@ static struct pxa2xx_spi_controller spitz_spi_info = {
 };
 
 static struct gpiod_lookup_table spitz_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
index d035205..c4d4162 100644 (file)
@@ -623,7 +623,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
 };
 
 static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
-       .dev_id = "pxa2xx-spi.1",
+       .dev_id = "spi1",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO24_ZIPITZ2_WIFI_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
@@ -631,7 +631,7 @@ static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
 };
 
 static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_ZIPITZ2_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
index 34cf8a5..a4c46a0 100644 (file)
@@ -73,6 +73,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
 endif
 
 KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
+KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
 
 # GCC versions that support the "-mstrict-align" option default to allowing
 # unaligned accesses.  While unaligned accesses are explicitly allowed in the
index 039b92a..f72540b 100644 (file)
@@ -35,7 +35,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               key0 {
+               key {
                        label = "KEY0";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
index b9e30df..8abdbe2 100644 (file)
@@ -47,7 +47,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               boot {
+               key-boot {
                        label = "BOOT";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
index 8d23401..3c6df1e 100644 (file)
@@ -52,7 +52,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               boot {
+               key-boot {
                        label = "BOOT";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
index 24fd83b..03c9843 100644 (file)
        gpio-keys {
                compatible = "gpio-keys";
 
-               up {
+               key-up {
                        label = "UP";
                        linux,code = <BTN_1>;
                        gpios = <&gpio1_0 7 GPIO_ACTIVE_LOW>;
                };
 
-               press {
+               key-press {
                        label = "PRESS";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
                };
 
-               down {
+               key-down {
                        label = "DOWN";
                        linux,code = <BTN_2>;
                        gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
index 25341f3..7164ad0 100644 (file)
@@ -23,7 +23,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               boot {
+               key-boot {
                        label = "BOOT";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
index c71d659..33bb60a 100644 (file)
@@ -78,7 +78,7 @@ obj-$(CONFIG_SMP) += cpu_ops_sbi.o
 endif
 obj-$(CONFIG_HOTPLUG_CPU)      += cpu-hotplug.o
 obj-$(CONFIG_KGDB)             += kgdb.o
-obj-$(CONFIG_KEXEC)            += kexec_relocate.o crash_save_regs.o machine_kexec.o
+obj-$(CONFIG_KEXEC_CORE)       += kexec_relocate.o crash_save_regs.o machine_kexec.o
 obj-$(CONFIG_KEXEC_FILE)       += elf_kexec.o machine_kexec_file.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 
index 9cb8509..0cb9499 100644 (file)
@@ -349,7 +349,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
 {
        const char *strtab, *name, *shstrtab;
        const Elf_Shdr *sechdrs;
-       Elf_Rela *relas;
+       Elf64_Rela *relas;
        int i, r_type;
 
        /* String & section header string table */
index 2c6e1c6..4120c42 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Kernel interface for the s390 arch_random_* functions
  *
- * Copyright IBM Corp. 2017, 2020
+ * Copyright IBM Corp. 2017, 2022
  *
  * Author: Harald Freudenberger <freude@de.ibm.com>
  *
@@ -14,6 +14,7 @@
 #ifdef CONFIG_ARCH_RANDOM
 
 #include <linux/static_key.h>
+#include <linux/preempt.h>
 #include <linux/atomic.h>
 #include <asm/cpacf.h>
 
@@ -32,7 +33,8 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
 
 static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
 {
-       if (static_branch_likely(&s390_arch_random_available)) {
+       if (static_branch_likely(&s390_arch_random_available) &&
+           in_task()) {
                cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
                atomic64_add(sizeof(*v), &s390_arch_random_counter);
                return true;
@@ -42,7 +44,8 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
 
 static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
 {
-       if (static_branch_likely(&s390_arch_random_available)) {
+       if (static_branch_likely(&s390_arch_random_available) &&
+           in_task()) {
                cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
                atomic64_add(sizeof(*v), &s390_arch_random_counter);
                return true;
index 7fff10e..52a7f91 100644 (file)
@@ -2474,7 +2474,7 @@ config RETHUNK
        bool "Enable return-thunks"
        depends on RETPOLINE && CC_HAS_RETURN_THUNK
        select OBJTOOL if HAVE_OBJTOOL
-       default y
+       default y if X86_64
        help
          Compile the kernel with the return-thunks compiler option to guard
          against kernel-to-user data leaks by avoiding return speculation.
@@ -2483,21 +2483,21 @@ config RETHUNK
 
 config CPU_UNRET_ENTRY
        bool "Enable UNRET on kernel entry"
-       depends on CPU_SUP_AMD && RETHUNK
+       depends on CPU_SUP_AMD && RETHUNK && X86_64
        default y
        help
          Compile the kernel with support for the retbleed=unret mitigation.
 
 config CPU_IBPB_ENTRY
        bool "Enable IBPB on kernel entry"
-       depends on CPU_SUP_AMD
+       depends on CPU_SUP_AMD && X86_64
        default y
        help
          Compile the kernel with support for the retbleed=ibpb mitigation.
 
 config CPU_IBRS_ENTRY
        bool "Enable IBRS on kernel entry"
-       depends on CPU_SUP_INTEL
+       depends on CPU_SUP_INTEL && X86_64
        default y
        help
          Compile the kernel with support for the spectre_v2=ibrs mitigation.
index 1f40dad..7854685 100644 (file)
@@ -27,6 +27,7 @@ RETHUNK_CFLAGS                := -mfunction-return=thunk-extern
 RETPOLINE_CFLAGS       += $(RETHUNK_CFLAGS)
 endif
 
+export RETHUNK_CFLAGS
 export RETPOLINE_CFLAGS
 export RETPOLINE_VDSO_CFLAGS
 
index 13179f3..4f70fb6 100644 (file)
@@ -278,9 +278,9 @@ enum {
 };
 
 /*
- * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
- * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
- * TSX is not supported they have no consistent behavior:
+ * For format LBR_FORMAT_EIP_FLAGS2, bits 61:62 in MSR_LAST_BRANCH_FROM_x
+ * are the TSX flags when TSX is supported, but when TSX is not supported
+ * they have no consistent behavior:
  *
  *   - For wrmsr(), bits 61:62 are considered part of the sign extension.
  *   - For HW updates (branch captures) bits 61:62 are always OFF and are not
@@ -288,7 +288,7 @@ enum {
  *
  * Therefore, if:
  *
- *   1) LBR has TSX format
+ *   1) LBR format LBR_FORMAT_EIP_FLAGS2
  *   2) CPU has no TSX support enabled
  *
  * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
@@ -300,7 +300,7 @@ static inline bool lbr_from_signext_quirk_needed(void)
        bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
                           boot_cpu_has(X86_FEATURE_RTM);
 
-       return !tsx_support && x86_pmu.lbr_has_tsx;
+       return !tsx_support;
 }
 
 static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
@@ -1609,9 +1609,6 @@ void intel_pmu_lbr_init_hsw(void)
        x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
 
        x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
-
-       if (lbr_from_signext_quirk_needed())
-               static_branch_enable(&lbr_from_quirk_key);
 }
 
 /* skylake */
@@ -1702,7 +1699,11 @@ void intel_pmu_lbr_init(void)
        switch (x86_pmu.intel_cap.lbr_format) {
        case LBR_FORMAT_EIP_FLAGS2:
                x86_pmu.lbr_has_tsx = 1;
-               fallthrough;
+               x86_pmu.lbr_from_flags = 1;
+               if (lbr_from_signext_quirk_needed())
+                       static_branch_enable(&lbr_from_quirk_key);
+               break;
+
        case LBR_FORMAT_EIP_FLAGS:
                x86_pmu.lbr_from_flags = 1;
                break;
index 00f5227..a77b915 100644 (file)
 #define X86_FEATURE_RETPOLINE_LFENCE   (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
 #define X86_FEATURE_RETHUNK            (11*32+14) /* "" Use REturn THUNK */
 #define X86_FEATURE_UNRET              (11*32+15) /* "" AMD BTB untrain return */
+#define X86_FEATURE_USE_IBPB_FW                (11*32+16) /* "" Use IBPB during runtime firmware calls */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
index 10a3bfc..38a3e86 100644 (file)
@@ -297,6 +297,8 @@ do {                                                                        \
        alternative_msr_write(MSR_IA32_SPEC_CTRL,                       \
                              spec_ctrl_current() | SPEC_CTRL_IBRS,     \
                              X86_FEATURE_USE_IBRS_FW);                 \
+       alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,         \
+                             X86_FEATURE_USE_IBPB_FW);                 \
 } while (0)
 
 #define firmware_restrict_branch_speculation_end()                     \
index d685853..62f6b8b 100644 (file)
@@ -555,7 +555,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
                        dest = addr + insn.length + insn.immediate.value;
 
                if (__static_call_fixup(addr, op, dest) ||
-                   WARN_ON_ONCE(dest != &__x86_return_thunk))
+                   WARN_ONCE(dest != &__x86_return_thunk,
+                             "missing return thunk: %pS-%pS: %*ph",
+                             addr, dest, 5, addr))
                        continue;
 
                DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
index aa34f90..6454bc7 100644 (file)
@@ -975,6 +975,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
+#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
 
 #ifdef CONFIG_BPF_SYSCALL
 void unpriv_ebpf_notify(int new_state)
@@ -1415,6 +1416,8 @@ static void __init spectre_v2_select_mitigation(void)
 
        case SPECTRE_V2_IBRS:
                setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
+               if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
+                       pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
                break;
 
        case SPECTRE_V2_LFENCE:
@@ -1516,7 +1519,16 @@ static void __init spectre_v2_select_mitigation(void)
         * the CPU supports Enhanced IBRS, kernel might un-intentionally not
         * enable IBRS around firmware calls.
         */
-       if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
+       if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
+           (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+            boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
+
+               if (retbleed_cmd != RETBLEED_CMD_IBPB) {
+                       setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
+                       pr_info("Enabling Speculation Barrier for firmware calls\n");
+               }
+
+       } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
                pr_info("Enabling Restricted Speculation for firmware calls\n");
        }
index 143e372..e5fa335 100644 (file)
@@ -6029,6 +6029,11 @@ split_irqchip_unlock:
                r = 0;
                break;
        case KVM_CAP_X86_USER_SPACE_MSR:
+               r = -EINVAL;
+               if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL |
+                                    KVM_MSR_EXIT_REASON_UNKNOWN |
+                                    KVM_MSR_EXIT_REASON_FILTER))
+                       break;
                kvm->arch.user_space_msr_mask = cap->args[0];
                r = 0;
                break;
@@ -6183,6 +6188,9 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
        if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
                return -EFAULT;
 
+       if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
+               return -EINVAL;
+
        for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
                empty &= !filter.ranges[i].nmsrs;
 
index 4767557..bf9b511 100644 (file)
@@ -43,6 +43,7 @@ config SYSTEM_TRUSTED_KEYRING
        bool "Provide system-wide ring of trusted keys"
        depends on KEYS
        depends on ASYMMETRIC_KEY_TYPE
+       depends on X509_CERTIFICATE_PARSER
        help
          Provide a system keyring to which trusted keys can be added.  Keys in
          the keyring are considered to be trusted.  Keys may be added at will
index 6ff1901..3c6d4ef 100644 (file)
@@ -782,7 +782,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
 
                                        if (!osc_cpc_flexible_adr_space_confirmed) {
                                                pr_debug("Flexible address space capability not supported\n");
-                                               goto out_free;
+                                               if (!cpc_supported_by_cpu())
+                                                       goto out_free;
                                        }
 
                                        addr = ioremap(gas_t->address, gas_t->bit_width/8);
@@ -809,7 +810,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                                }
                                if (!osc_cpc_flexible_adr_space_confirmed) {
                                        pr_debug("Flexible address space capability not supported\n");
-                                       goto out_free;
+                                       if (!cpc_supported_by_cpu())
+                                               goto out_free;
                                }
                        } else {
                                if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
index d1535ac..81cb909 100644 (file)
@@ -213,7 +213,7 @@ static int lan966x_gate_clk_register(struct device *dev,
 
                hw_data->hws[i] =
                        devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
-                                                 "lan966x", 0, base,
+                                                 "lan966x", 0, gate_base,
                                                  clk_gate_desc[idx].bit_idx,
                                                  0, &clk_gate_lock);
 
index dcc1410..af22be8 100644 (file)
@@ -201,15 +201,6 @@ struct fwnet_packet_task {
        u8 enqueued;
 };
 
-/*
- * Get fifo address embedded in hwaddr
- */
-static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha)
-{
-       return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32
-              | get_unaligned_be32(&ha->uc.fifo_lo);
-}
-
 /*
  * saddr == NULL means use device source address.
  * daddr == NULL means leave destination address (eg unresolved arp).
@@ -1306,7 +1297,7 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
                max_payload        = peer->max_payload;
                datagram_label_ptr = &peer->datagram_label;
 
-               ptask->fifo_addr   = fwnet_hwaddr_fifo(ha);
+               ptask->fifo_addr   = get_unaligned_be48(ha->uc.fifo);
                ptask->generation  = generation;
                ptask->dest_node   = dest_node;
                ptask->speed       = peer->speed;
@@ -1494,8 +1485,7 @@ static int fwnet_probe(struct fw_unit *unit,
        ha.uc.uniq_id = cpu_to_be64(card->guid);
        ha.uc.max_rec = dev->card->max_receive;
        ha.uc.sspd = dev->card->link_speed;
-       ha.uc.fifo_hi = cpu_to_be16(dev->local_fifo >> 32);
-       ha.uc.fifo_lo = cpu_to_be32(dev->local_fifo & 0xffffffff);
+       put_unaligned_be48(dev->local_fifo, ha.uc.fifo);
        dev_addr_set(net, ha.u);
 
        memset(net->broadcast, -1, net->addr_len);
index 08bc52c..ecd7d16 100644 (file)
@@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = {
        .reg_bits = 8,
        .val_bits = 8,
 
+       .use_single_read = true,
+       .use_single_write = true,
+
        .readable_reg = pca953x_readable_register,
        .writeable_reg = pca953x_writeable_register,
        .volatile_reg = pca953x_volatile_register,
@@ -906,15 +909,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
 static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
 {
        DECLARE_BITMAP(val, MAX_LINE);
+       u8 regaddr;
        int ret;
 
-       ret = regcache_sync_region(chip->regmap, chip->regs->output,
-                                  chip->regs->output + NBANK(chip));
+       regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+       ret = regcache_sync_region(chip->regmap, regaddr,
+                                  regaddr + NBANK(chip) - 1);
        if (ret)
                goto out;
 
-       ret = regcache_sync_region(chip->regmap, chip->regs->direction,
-                                  chip->regs->direction + NBANK(chip));
+       regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+       ret = regcache_sync_region(chip->regmap, regaddr,
+                                  regaddr + NBANK(chip) - 1);
        if (ret)
                goto out;
 
@@ -1127,14 +1133,14 @@ static int pca953x_regcache_sync(struct device *dev)
         * sync these registers first and only then sync the rest.
         */
        regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
-       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
        if (ret) {
                dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
                return ret;
        }
 
        regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
-       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
        if (ret) {
                dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
                return ret;
@@ -1144,7 +1150,7 @@ static int pca953x_regcache_sync(struct device *dev)
        if (chip->driver_data & PCA_PCAL) {
                regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
                ret = regcache_sync_region(chip->regmap, regaddr,
-                                          regaddr + NBANK(chip));
+                                          regaddr + NBANK(chip) - 1);
                if (ret) {
                        dev_err(dev, "Failed to sync INT latch registers: %d\n",
                                ret);
@@ -1153,7 +1159,7 @@ static int pca953x_regcache_sync(struct device *dev)
 
                regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
                ret = regcache_sync_region(chip->regmap, regaddr,
-                                          regaddr + NBANK(chip));
+                                          regaddr + NBANK(chip) - 1);
                if (ret) {
                        dev_err(dev, "Failed to sync INT mask registers: %d\n",
                                ret);
index b6d3a57..7f8e2fe 100644 (file)
@@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
        const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
 
        map[index] &= ~(0xFFFFFFFFul << offset);
-       map[index] |= v << offset;
+       map[index] |= (unsigned long)v << offset;
 }
 
 static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
index 6b6d46e..4608599 100644 (file)
@@ -1364,16 +1364,10 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm)
 {
        struct amdkfd_process_info *process_info = vm->process_info;
-       struct amdgpu_bo *pd = vm->root.bo;
 
        if (!process_info)
                return;
 
-       /* Release eviction fence from PD */
-       amdgpu_bo_reserve(pd, false);
-       amdgpu_bo_fence(pd, NULL, false);
-       amdgpu_bo_unreserve(pd);
-
        /* Update process info */
        mutex_lock(&process_info->lock);
        process_info->n_vms--;
index 714178f..2168163 100644 (file)
@@ -40,7 +40,7 @@ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
 {
        struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
                                                   rhead);
-
+       mutex_destroy(&list->bo_list_mutex);
        kvfree(list);
 }
 
@@ -136,6 +136,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
 
        trace_amdgpu_cs_bo_status(list->num_entries, total_size);
 
+       mutex_init(&list->bo_list_mutex);
        *result = list;
        return 0;
 
index 529d52a..9caea16 100644 (file)
@@ -47,6 +47,10 @@ struct amdgpu_bo_list {
        struct amdgpu_bo *oa_obj;
        unsigned first_userptr;
        unsigned num_entries;
+
+       /* Protect access during command submission.
+        */
+       struct mutex bo_list_mutex;
 };
 
 int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
index b28af04..d8f1335 100644 (file)
@@ -519,6 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                        return r;
        }
 
+       mutex_lock(&p->bo_list->bo_list_mutex);
+
        /* One for TTM and one for the CS job */
        amdgpu_bo_list_for_each_entry(e, p->bo_list)
                e->tv.num_shared = 2;
@@ -651,6 +653,7 @@ out_free_user_pages:
                        kvfree(e->user_pages);
                        e->user_pages = NULL;
                }
+               mutex_unlock(&p->bo_list->bo_list_mutex);
        }
        return r;
 }
@@ -690,9 +693,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 {
        unsigned i;
 
-       if (error && backoff)
+       if (error && backoff) {
                ttm_eu_backoff_reservation(&parser->ticket,
                                           &parser->validated);
+               mutex_unlock(&parser->bo_list->bo_list_mutex);
+       }
 
        for (i = 0; i < parser->num_post_deps; i++) {
                drm_syncobj_put(parser->post_deps[i].syncobj);
@@ -832,12 +837,16 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                        continue;
 
                r = amdgpu_vm_bo_update(adev, bo_va, false);
-               if (r)
+               if (r) {
+                       mutex_unlock(&p->bo_list->bo_list_mutex);
                        return r;
+               }
 
                r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
-               if (r)
+               if (r) {
+                       mutex_unlock(&p->bo_list->bo_list_mutex);
                        return r;
+               }
        }
 
        r = amdgpu_vm_handle_moved(adev, vm);
@@ -1278,6 +1287,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
        mutex_unlock(&p->adev->notifier_lock);
+       mutex_unlock(&p->bo_list->bo_list_mutex);
 
        return 0;
 
index 93ac33a..3087dd1 100644 (file)
@@ -1653,7 +1653,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
        adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
 #endif
-       if (dc_enable_dmub_notifications(adev->dm.dc)) {
+       if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
                init_completion(&adev->dm.dmub_aux_transfer_done);
                adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
                if (!adev->dm.dmub_notify) {
@@ -1689,6 +1689,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                goto error;
        }
 
+       /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+        * It is expected that DMUB will resend any pending notifications at this point, for
+        * example HPD from DPIA.
+        */
+       if (dc_is_dmub_outbox_supported(adev->dm.dc))
+               dc_enable_dmub_outbox(adev->dm.dc);
+
        /* create fake encoders for MST */
        dm_dp_create_fake_mst_encoders(adev);
 
@@ -2678,9 +2685,6 @@ static int dm_resume(void *handle)
                 */
                link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
 
-               if (dc_enable_dmub_notifications(adev->dm.dc))
-                       amdgpu_dm_outbox_init(adev);
-
                r = dm_dmub_hw_init(adev);
                if (r)
                        DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -2698,6 +2702,11 @@ static int dm_resume(void *handle)
                        }
                }
 
+               if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+                       amdgpu_dm_outbox_init(adev);
+                       dc_enable_dmub_outbox(adev->dm.dc);
+               }
+
                WARN_ON(!dc_commit_state(dm->dc, dc_state));
 
                dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2719,13 +2728,15 @@ static int dm_resume(void *handle)
        /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
        dc_resource_state_construct(dm->dc, dm_state->context);
 
-       /* Re-enable outbox interrupts for DPIA. */
-       if (dc_enable_dmub_notifications(adev->dm.dc))
-               amdgpu_dm_outbox_init(adev);
-
        /* Before powering on DC we need to re-initialize DMUB. */
        dm_dmub_hw_resume(adev);
 
+       /* Re-enable outbox interrupts for DPIA. */
+       if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+               amdgpu_dm_outbox_init(adev);
+               dc_enable_dmub_outbox(adev->dm.dc);
+       }
+
        /* power on hardware */
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 
index d5962a3..e5fc875 100644 (file)
@@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
                     struct iosys_map *map)
 {
        struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+       int ret;
+
+       dma_resv_lock(gem->resv, NULL);
+       ret = ttm_bo_vmap(bo, map);
+       dma_resv_unlock(gem->resv);
 
-       return ttm_bo_vmap(bo, map);
+       return ret;
 }
 EXPORT_SYMBOL(drm_gem_ttm_vmap);
 
@@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
 {
        struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
 
+       dma_resv_lock(gem->resv, NULL);
        ttm_bo_vunmap(bo, map);
+       dma_resv_unlock(gem->resv);
 }
 EXPORT_SYMBOL(drm_gem_ttm_vunmap);
 
index 09f8254..44e7339 100644 (file)
@@ -273,10 +273,17 @@ struct intel_context {
                u8 child_index;
                /** @guc: GuC specific members for parallel submission */
                struct {
-                       /** @wqi_head: head pointer in work queue */
+                       /** @wqi_head: cached head pointer in work queue */
                        u16 wqi_head;
-                       /** @wqi_tail: tail pointer in work queue */
+                       /** @wqi_tail: cached tail pointer in work queue */
                        u16 wqi_tail;
+                       /** @wq_head: pointer to the actual head in work queue */
+                       u32 *wq_head;
+                       /** @wq_tail: pointer to the actual head in work queue */
+                       u32 *wq_tail;
+                       /** @wq_status: pointer to the status in work queue */
+                       u32 *wq_status;
+
                        /**
                         * @parent_page: page in context state (ce->state) used
                         * by parent for work queue, process descriptor
index 86f7a9a..2b0266c 100644 (file)
@@ -661,6 +661,16 @@ static inline void execlists_schedule_out(struct i915_request *rq)
        i915_request_put(rq);
 }
 
+static u32 map_i915_prio_to_lrc_desc_prio(int prio)
+{
+       if (prio > I915_PRIORITY_NORMAL)
+               return GEN12_CTX_PRIORITY_HIGH;
+       else if (prio < I915_PRIORITY_NORMAL)
+               return GEN12_CTX_PRIORITY_LOW;
+       else
+               return GEN12_CTX_PRIORITY_NORMAL;
+}
+
 static u64 execlists_update_context(struct i915_request *rq)
 {
        struct intel_context *ce = rq->context;
@@ -669,7 +679,7 @@ static u64 execlists_update_context(struct i915_request *rq)
 
        desc = ce->lrc.desc;
        if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
-               desc |= lrc_desc_priority(rq_prio(rq));
+               desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
 
        /*
         * WaIdleLiteRestore:bdw,skl
index 31be734..a390f08 100644 (file)
@@ -111,16 +111,6 @@ enum {
 #define XEHP_SW_COUNTER_SHIFT                  58
 #define XEHP_SW_COUNTER_WIDTH                  6
 
-static inline u32 lrc_desc_priority(int prio)
-{
-       if (prio > I915_PRIORITY_NORMAL)
-               return GEN12_CTX_PRIORITY_HIGH;
-       else if (prio < I915_PRIORITY_NORMAL)
-               return GEN12_CTX_PRIORITY_LOW;
-       else
-               return GEN12_CTX_PRIORITY_NORMAL;
-}
-
 static inline void lrc_runtime_start(struct intel_context *ce)
 {
        struct intel_context_stats *stats = &ce->stats;
index 4ef9990..29ef8af 100644 (file)
@@ -122,6 +122,9 @@ enum intel_guc_action {
        INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
        INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
        INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
+       INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY = 0x1005,
+       INTEL_GUC_ACTION_V69_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
+       INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
        INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
        INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
        INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
index 966e69a..9feda10 100644 (file)
@@ -170,6 +170,11 @@ struct intel_guc {
        /** @ads_engine_usage_size: size of engine usage in the ADS */
        u32 ads_engine_usage_size;
 
+       /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
+       struct i915_vma *lrc_desc_pool_v69;
+       /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */
+       void *lrc_desc_pool_vaddr_v69;
+
        /**
         * @context_lookup: used to resolve intel_context from guc_id, if a
         * context is present in this structure it is registered with the GuC
index 42cb7a9..89a7e5e 100644 (file)
@@ -203,6 +203,20 @@ struct guc_wq_item {
        u32 fence_id;
 } __packed;
 
+struct guc_process_desc_v69 {
+       u32 stage_id;
+       u64 db_base_addr;
+       u32 head;
+       u32 tail;
+       u32 error_offset;
+       u64 wq_base_addr;
+       u32 wq_size_bytes;
+       u32 wq_status;
+       u32 engine_presence;
+       u32 priority;
+       u32 reserved[36];
+} __packed;
+
 struct guc_sched_wq_desc {
        u32 head;
        u32 tail;
@@ -227,6 +241,37 @@ struct guc_ctxt_registration_info {
 };
 #define CONTEXT_REGISTRATION_FLAG_KMD  BIT(0)
 
+/* Preempt to idle on quantum expiry */
+#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69        BIT(0)
+
+/*
+ * GuC Context registration descriptor.
+ * FIXME: This is only required to exist during context registration.
+ * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
+ * is not required.
+ */
+struct guc_lrc_desc_v69 {
+       u32 hw_context_desc;
+       u32 slpm_perf_mode_hint;        /* SPLC v1 only */
+       u32 slpm_freq_hint;
+       u32 engine_submit_mask;         /* In logical space */
+       u8 engine_class;
+       u8 reserved0[3];
+       u32 priority;
+       u32 process_desc;
+       u32 wq_addr;
+       u32 wq_size;
+       u32 context_flags;              /* CONTEXT_REGISTRATION_* */
+       /* Time for one workload to execute. (in micro seconds) */
+       u32 execution_quantum;
+       /* Time to wait for a preemption request to complete before issuing a
+        * reset. (in micro seconds).
+        */
+       u32 preemption_timeout;
+       u32 policy_flags;               /* CONTEXT_POLICY_* */
+       u32 reserved1[19];
+} __packed;
+
 /* 32-bit KLV structure as used by policy updates and others */
 struct guc_klv_generic_dw_t {
        u32 kl;
index 1726f0f..9ffb343 100644 (file)
@@ -414,12 +414,15 @@ struct sync_semaphore {
 };
 
 struct parent_scratch {
-       struct guc_sched_wq_desc wq_desc;
+       union guc_descs {
+               struct guc_sched_wq_desc wq_desc;
+               struct guc_process_desc_v69 pdesc;
+       } descs;
 
        struct sync_semaphore go;
        struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
 
-       u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) -
+       u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
                sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
 
        u32 wq[WQ_SIZE / sizeof(u32)];
@@ -456,17 +459,23 @@ __get_parent_scratch(struct intel_context *ce)
                   LRC_STATE_OFFSET) / sizeof(u32)));
 }
 
+static struct guc_process_desc_v69 *
+__get_process_desc_v69(struct intel_context *ce)
+{
+       struct parent_scratch *ps = __get_parent_scratch(ce);
+
+       return &ps->descs.pdesc;
+}
+
 static struct guc_sched_wq_desc *
-__get_wq_desc(struct intel_context *ce)
+__get_wq_desc_v70(struct intel_context *ce)
 {
        struct parent_scratch *ps = __get_parent_scratch(ce);
 
-       return &ps->wq_desc;
+       return &ps->descs.wq_desc;
 }
 
-static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
-                          struct intel_context *ce,
-                          u32 wqi_size)
+static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
 {
        /*
         * Check for space in work queue. Caching a value of head pointer in
@@ -476,7 +485,7 @@ static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
 #define AVAILABLE_SPACE        \
        CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
        if (wqi_size > AVAILABLE_SPACE) {
-               ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head);
+               ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
 
                if (wqi_size > AVAILABLE_SPACE)
                        return NULL;
@@ -495,11 +504,55 @@ static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
        return ce;
 }
 
+static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
+{
+       struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
+
+       if (!base)
+               return NULL;
+
+       GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
+
+       return &base[index];
+}
+
+static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
+{
+       u32 size;
+       int ret;
+
+       size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
+                         GUC_MAX_CONTEXT_ID);
+       ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
+                                            (void **)&guc->lrc_desc_pool_vaddr_v69);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
+{
+       if (!guc->lrc_desc_pool_vaddr_v69)
+               return;
+
+       guc->lrc_desc_pool_vaddr_v69 = NULL;
+       i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
+}
+
 static inline bool guc_submission_initialized(struct intel_guc *guc)
 {
        return guc->submission_initialized;
 }
 
+static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
+{
+       struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
+
+       if (desc)
+               memset(desc, 0, sizeof(*desc));
+}
+
 static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
 {
        return __get_context(guc, id);
@@ -526,6 +579,8 @@ static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
        if (unlikely(!guc_submission_initialized(guc)))
                return;
 
+       _reset_lrc_desc_v69(guc, id);
+
        /*
         * xarray API doesn't have xa_erase_irqsave wrapper, so calling
         * the lower level functions directly.
@@ -611,7 +666,7 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
                                              true, timeout);
 }
 
-static int guc_context_policy_init(struct intel_context *ce, bool loop);
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
 static int try_context_registration(struct intel_context *ce, bool loop);
 
 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
@@ -639,7 +694,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
        GEM_BUG_ON(context_guc_id_invalid(ce));
 
        if (context_policy_required(ce)) {
-               err = guc_context_policy_init(ce, false);
+               err = guc_context_policy_init_v70(ce, false);
                if (err)
                        return err;
        }
@@ -737,9 +792,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce)
        return (WQ_SIZE - ce->parallel.guc.wqi_tail);
 }
 
-static void write_wqi(struct guc_sched_wq_desc *wq_desc,
-                     struct intel_context *ce,
-                     u32 wqi_size)
+static void write_wqi(struct intel_context *ce, u32 wqi_size)
 {
        BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
 
@@ -750,13 +803,12 @@ static void write_wqi(struct guc_sched_wq_desc *wq_desc,
 
        ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
                (WQ_SIZE - 1);
-       WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail);
+       WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
 }
 
 static int guc_wq_noop_append(struct intel_context *ce)
 {
-       struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
-       u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce));
+       u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
        u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
 
        if (!wqi)
@@ -775,7 +827,6 @@ static int __guc_wq_item_append(struct i915_request *rq)
 {
        struct intel_context *ce = request_to_scheduling_context(rq);
        struct intel_context *child;
-       struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
        unsigned int wqi_size = (ce->parallel.number_children + 4) *
                sizeof(u32);
        u32 *wqi;
@@ -795,7 +846,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
                        return ret;
        }
 
-       wqi = get_wq_pointer(wq_desc, ce, wqi_size);
+       wqi = get_wq_pointer(ce, wqi_size);
        if (!wqi)
                return -EBUSY;
 
@@ -810,7 +861,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
        for_each_child(ce, child)
                *wqi++ = child->ring->tail / sizeof(u64);
 
-       write_wqi(wq_desc, ce, wqi_size);
+       write_wqi(ce, wqi_size);
 
        return 0;
 }
@@ -1868,20 +1919,34 @@ static void reset_fail_worker_func(struct work_struct *w);
 int intel_guc_submission_init(struct intel_guc *guc)
 {
        struct intel_gt *gt = guc_to_gt(guc);
+       int ret;
 
        if (guc->submission_initialized)
                return 0;
 
+       if (guc->fw.major_ver_found < 70) {
+               ret = guc_lrc_desc_pool_create_v69(guc);
+               if (ret)
+                       return ret;
+       }
+
        guc->submission_state.guc_ids_bitmap =
                bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
-       if (!guc->submission_state.guc_ids_bitmap)
-               return -ENOMEM;
+       if (!guc->submission_state.guc_ids_bitmap) {
+               ret = -ENOMEM;
+               goto destroy_pool;
+       }
 
        guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
        guc->timestamp.shift = gpm_timestamp_shift(gt);
        guc->submission_initialized = true;
 
        return 0;
+
+destroy_pool:
+       guc_lrc_desc_pool_destroy_v69(guc);
+
+       return ret;
 }
 
 void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1890,6 +1955,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
                return;
 
        guc_flush_destroyed_contexts(guc);
+       guc_lrc_desc_pool_destroy_v69(guc);
        i915_sched_engine_put(guc->sched_engine);
        bitmap_free(guc->submission_state.guc_ids_bitmap);
        guc->submission_initialized = false;
@@ -2147,10 +2213,34 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
        spin_unlock_irqrestore(&guc->submission_state.lock, flags);
 }
 
-static int __guc_action_register_multi_lrc(struct intel_guc *guc,
-                                          struct intel_context *ce,
-                                          struct guc_ctxt_registration_info *info,
-                                          bool loop)
+static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
+                                              struct intel_context *ce,
+                                              u32 guc_id,
+                                              u32 offset,
+                                              bool loop)
+{
+       struct intel_context *child;
+       u32 action[4 + MAX_ENGINE_INSTANCE];
+       int len = 0;
+
+       GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
+
+       action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+       action[len++] = guc_id;
+       action[len++] = ce->parallel.number_children + 1;
+       action[len++] = offset;
+       for_each_child(ce, child) {
+               offset += sizeof(struct guc_lrc_desc_v69);
+               action[len++] = offset;
+       }
+
+       return guc_submission_send_busy_loop(guc, action, len, 0, loop);
+}
+
+static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
+                                              struct intel_context *ce,
+                                              struct guc_ctxt_registration_info *info,
+                                              bool loop)
 {
        struct intel_context *child;
        u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
@@ -2190,9 +2280,24 @@ static int __guc_action_register_multi_lrc(struct intel_guc *guc,
        return guc_submission_send_busy_loop(guc, action, len, 0, loop);
 }
 
-static int __guc_action_register_context(struct intel_guc *guc,
-                                        struct guc_ctxt_registration_info *info,
-                                        bool loop)
+static int __guc_action_register_context_v69(struct intel_guc *guc,
+                                            u32 guc_id,
+                                            u32 offset,
+                                            bool loop)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_REGISTER_CONTEXT,
+               guc_id,
+               offset,
+       };
+
+       return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+                                            0, loop);
+}
+
+static int __guc_action_register_context_v70(struct intel_guc *guc,
+                                            struct guc_ctxt_registration_info *info,
+                                            bool loop)
 {
        u32 action[] = {
                INTEL_GUC_ACTION_REGISTER_CONTEXT,
@@ -2213,24 +2318,52 @@ static int __guc_action_register_context(struct intel_guc *guc,
                                             0, loop);
 }
 
-static void prepare_context_registration_info(struct intel_context *ce,
-                                             struct guc_ctxt_registration_info *info);
+static void prepare_context_registration_info_v69(struct intel_context *ce);
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+                                                 struct guc_ctxt_registration_info *info);
 
-static int register_context(struct intel_context *ce, bool loop)
+static int
+register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
+{
+       u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
+               ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
+
+       prepare_context_registration_info_v69(ce);
+
+       if (intel_context_is_parent(ce))
+               return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
+                                                          offset, loop);
+       else
+               return __guc_action_register_context_v69(guc, ce->guc_id.id,
+                                                        offset, loop);
+}
+
+static int
+register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
 {
        struct guc_ctxt_registration_info info;
+
+       prepare_context_registration_info_v70(ce, &info);
+
+       if (intel_context_is_parent(ce))
+               return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
+       else
+               return __guc_action_register_context_v70(guc, &info, loop);
+}
+
+static int register_context(struct intel_context *ce, bool loop)
+{
        struct intel_guc *guc = ce_to_guc(ce);
        int ret;
 
        GEM_BUG_ON(intel_context_is_child(ce));
        trace_intel_context_register(ce);
 
-       prepare_context_registration_info(ce, &info);
-
-       if (intel_context_is_parent(ce))
-               ret = __guc_action_register_multi_lrc(guc, ce, &info, loop);
+       if (guc->fw.major_ver_found >= 70)
+               ret = register_context_v70(guc, ce, loop);
        else
-               ret = __guc_action_register_context(guc, &info, loop);
+               ret = register_context_v69(guc, ce, loop);
+
        if (likely(!ret)) {
                unsigned long flags;
 
@@ -2238,7 +2371,8 @@ static int register_context(struct intel_context *ce, bool loop)
                set_context_registered(ce);
                spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
-               guc_context_policy_init(ce, loop);
+               if (guc->fw.major_ver_found >= 70)
+                       guc_context_policy_init_v70(ce, loop);
        }
 
        return ret;
@@ -2335,7 +2469,7 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
                                        0, loop);
 }
 
-static int guc_context_policy_init(struct intel_context *ce, bool loop)
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
 {
        struct intel_engine_cs *engine = ce->engine;
        struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2394,8 +2528,108 @@ static int guc_context_policy_init(struct intel_context *ce, bool loop)
        return ret;
 }
 
-static void prepare_context_registration_info(struct intel_context *ce,
-                                             struct guc_ctxt_registration_info *info)
+static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
+                                       struct guc_lrc_desc_v69 *desc)
+{
+       desc->policy_flags = 0;
+
+       if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+               desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
+
+       /* NB: For both of these, zero means disabled. */
+       desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
+       desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+}
+
+static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
+{
+       /*
+        * this matches the mapping we do in map_i915_prio_to_guc_prio()
+        * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
+        */
+       switch (prio) {
+       default:
+               MISSING_CASE(prio);
+               fallthrough;
+       case GUC_CLIENT_PRIORITY_KMD_NORMAL:
+               return GEN12_CTX_PRIORITY_NORMAL;
+       case GUC_CLIENT_PRIORITY_NORMAL:
+               return GEN12_CTX_PRIORITY_LOW;
+       case GUC_CLIENT_PRIORITY_HIGH:
+       case GUC_CLIENT_PRIORITY_KMD_HIGH:
+               return GEN12_CTX_PRIORITY_HIGH;
+       }
+}
+
+static void prepare_context_registration_info_v69(struct intel_context *ce)
+{
+       struct intel_engine_cs *engine = ce->engine;
+       struct intel_guc *guc = &engine->gt->uc.guc;
+       u32 ctx_id = ce->guc_id.id;
+       struct guc_lrc_desc_v69 *desc;
+       struct intel_context *child;
+
+       GEM_BUG_ON(!engine->mask);
+
+       /*
+        * Ensure LRC + CT vmas are is same region as write barrier is done
+        * based on CT vma region.
+        */
+       GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
+                  i915_gem_object_is_lmem(ce->ring->vma->obj));
+
+       desc = __get_lrc_desc_v69(guc, ctx_id);
+       desc->engine_class = engine_class_to_guc_class(engine->class);
+       desc->engine_submit_mask = engine->logical_mask;
+       desc->hw_context_desc = ce->lrc.lrca;
+       desc->priority = ce->guc_state.prio;
+       desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+       guc_context_policy_init_v69(engine, desc);
+
+       /*
+        * If context is a parent, we need to register a process descriptor
+        * describing a work queue and register all child contexts.
+        */
+       if (intel_context_is_parent(ce)) {
+               struct guc_process_desc_v69 *pdesc;
+
+               ce->parallel.guc.wqi_tail = 0;
+               ce->parallel.guc.wqi_head = 0;
+
+               desc->process_desc = i915_ggtt_offset(ce->state) +
+                       __get_parent_scratch_offset(ce);
+               desc->wq_addr = i915_ggtt_offset(ce->state) +
+                       __get_wq_offset(ce);
+               desc->wq_size = WQ_SIZE;
+
+               pdesc = __get_process_desc_v69(ce);
+               memset(pdesc, 0, sizeof(*(pdesc)));
+               pdesc->stage_id = ce->guc_id.id;
+               pdesc->wq_base_addr = desc->wq_addr;
+               pdesc->wq_size_bytes = desc->wq_size;
+               pdesc->wq_status = WQ_STATUS_ACTIVE;
+
+               ce->parallel.guc.wq_head = &pdesc->head;
+               ce->parallel.guc.wq_tail = &pdesc->tail;
+               ce->parallel.guc.wq_status = &pdesc->wq_status;
+
+               for_each_child(ce, child) {
+                       desc = __get_lrc_desc_v69(guc, child->guc_id.id);
+
+                       desc->engine_class =
+                               engine_class_to_guc_class(engine->class);
+                       desc->hw_context_desc = child->lrc.lrca;
+                       desc->priority = ce->guc_state.prio;
+                       desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+                       guc_context_policy_init_v69(engine, desc);
+               }
+
+               clear_children_join_go_memory(ce);
+       }
+}
+
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+                                                 struct guc_ctxt_registration_info *info)
 {
        struct intel_engine_cs *engine = ce->engine;
        struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2420,6 +2654,8 @@ static void prepare_context_registration_info(struct intel_context *ce,
         */
        info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
        info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
+       if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
+               info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
        info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
 
        /*
@@ -2443,10 +2679,14 @@ static void prepare_context_registration_info(struct intel_context *ce,
                info->wq_base_hi = upper_32_bits(wq_base_offset);
                info->wq_size = WQ_SIZE;
 
-               wq_desc = __get_wq_desc(ce);
+               wq_desc = __get_wq_desc_v70(ce);
                memset(wq_desc, 0, sizeof(*wq_desc));
                wq_desc->wq_status = WQ_STATUS_ACTIVE;
 
+               ce->parallel.guc.wq_head = &wq_desc->head;
+               ce->parallel.guc.wq_tail = &wq_desc->tail;
+               ce->parallel.guc.wq_status = &wq_desc->wq_status;
+
                clear_children_join_go_memory(ce);
        }
 }
@@ -2761,11 +3001,21 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
                                                 u16 guc_id,
                                                 u32 preemption_timeout)
 {
-       struct context_policy policy;
+       if (guc->fw.major_ver_found >= 70) {
+               struct context_policy policy;
 
-       __guc_context_policy_start_klv(&policy, guc_id);
-       __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
-       __guc_context_set_context_policies(guc, &policy, true);
+               __guc_context_policy_start_klv(&policy, guc_id);
+               __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+               __guc_context_set_context_policies(guc, &policy, true);
+       } else {
+               u32 action[] = {
+                       INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
+                       guc_id,
+                       preemption_timeout
+               };
+
+               intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+       }
 }
 
 static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
@@ -3013,11 +3263,21 @@ static int guc_context_alloc(struct intel_context *ce)
 static void __guc_context_set_prio(struct intel_guc *guc,
                                   struct intel_context *ce)
 {
-       struct context_policy policy;
+       if (guc->fw.major_ver_found >= 70) {
+               struct context_policy policy;
 
-       __guc_context_policy_start_klv(&policy, ce->guc_id.id);
-       __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
-       __guc_context_set_context_policies(guc, &policy, true);
+               __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+               __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+               __guc_context_set_context_policies(guc, &policy, true);
+       } else {
+               u32 action[] = {
+                       INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
+                       ce->guc_id.id,
+                       ce->guc_state.prio,
+               };
+
+               guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+       }
 }
 
 static void guc_context_set_prio(struct intel_guc *guc,
@@ -4527,17 +4787,19 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
                guc_log_context_priority(p, ce);
 
                if (intel_context_is_parent(ce)) {
-                       struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
                        struct intel_context *child;
 
                        drm_printf(p, "\t\tNumber children: %u\n",
                                   ce->parallel.number_children);
-                       drm_printf(p, "\t\tWQI Head: %u\n",
-                                  READ_ONCE(wq_desc->head));
-                       drm_printf(p, "\t\tWQI Tail: %u\n",
-                                  READ_ONCE(wq_desc->tail));
-                       drm_printf(p, "\t\tWQI Status: %u\n\n",
-                                  READ_ONCE(wq_desc->wq_status));
+
+                       if (ce->parallel.guc.wq_status) {
+                               drm_printf(p, "\t\tWQI Head: %u\n",
+                                          READ_ONCE(*ce->parallel.guc.wq_head));
+                               drm_printf(p, "\t\tWQI Tail: %u\n",
+                                          READ_ONCE(*ce->parallel.guc.wq_tail));
+                               drm_printf(p, "\t\tWQI Status: %u\n\n",
+                                          READ_ONCE(*ce->parallel.guc.wq_status));
+                       }
 
                        if (ce->engine->emit_bb_start ==
                            emit_bb_start_parent_no_preempt_mid_batch) {
index 2ff55b9..703f42b 100644 (file)
@@ -70,6 +70,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
        fw_def(BROXTON,      0, guc_def(bxt,  70, 1, 1)) \
        fw_def(SKYLAKE,      0, guc_def(skl,  70, 1, 1))
 
+#define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
+       fw_def(ALDERLAKE_P,  0, guc_def(adlp, 69, 0, 3)) \
+       fw_def(ALDERLAKE_S,  0, guc_def(tgl,  69, 0, 3))
+
 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
        fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
        fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
@@ -105,6 +109,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
        MODULE_FIRMWARE(uc_);
 
 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
+INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
 
 /* The below structs and macros are used to iterate across the list of blobs */
@@ -149,6 +154,9 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
        static const struct uc_fw_platform_requirement blobs_guc[] = {
                INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
        };
+       static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
+               INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
+       };
        static const struct uc_fw_platform_requirement blobs_huc[] = {
                INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
        };
@@ -179,12 +187,29 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
                if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
                        const struct uc_fw_blob *blob = &fw_blobs[i].blob;
                        uc_fw->path = blob->path;
+                       uc_fw->wanted_path = blob->path;
                        uc_fw->major_ver_wanted = blob->major;
                        uc_fw->minor_ver_wanted = blob->minor;
                        break;
                }
        }
 
+       if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
+               const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
+               u32 count = ARRAY_SIZE(blobs_guc_fallback);
+
+               for (i = 0; i < count && p <= blobs[i].p; i++) {
+                       if (p == blobs[i].p && rev >= blobs[i].rev) {
+                               const struct uc_fw_blob *blob = &blobs[i].blob;
+
+                               uc_fw->fallback.path = blob->path;
+                               uc_fw->fallback.major_ver = blob->major;
+                               uc_fw->fallback.minor_ver = blob->minor;
+                               break;
+                       }
+               }
+       }
+
        /* make sure the list is ordered as expected */
        if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
                for (i = 1; i < fw_count; i++) {
@@ -338,7 +363,24 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
        __force_fw_fetch_failures(uc_fw, -EINVAL);
        __force_fw_fetch_failures(uc_fw, -ESTALE);
 
-       err = request_firmware(&fw, uc_fw->path, dev);
+       err = firmware_request_nowarn(&fw, uc_fw->path, dev);
+       if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
+               err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
+               if (!err) {
+                       drm_notice(&i915->drm,
+                                  "%s firmware %s is recommended, but only %s was found\n",
+                                  intel_uc_fw_type_repr(uc_fw->type),
+                                  uc_fw->wanted_path,
+                                  uc_fw->fallback.path);
+                       drm_info(&i915->drm,
+                                "Consider updating your linux-firmware pkg or downloading from %s\n",
+                                INTEL_UC_FIRMWARE_URL);
+
+                       uc_fw->path = uc_fw->fallback.path;
+                       uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
+                       uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
+               }
+       }
        if (err)
                goto fail;
 
@@ -437,8 +479,8 @@ fail:
                                  INTEL_UC_FIRMWARE_MISSING :
                                  INTEL_UC_FIRMWARE_ERROR);
 
-       drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
-                  intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+       i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
+                        intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
        drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
                 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
 
@@ -796,7 +838,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
 {
        drm_printf(p, "%s firmware: %s\n",
-                  intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+                  intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
+       if (uc_fw->fallback.path) {
+               drm_printf(p, "%s firmware fallback: %s\n",
+                          intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
+               drm_printf(p, "fallback selected: %s\n",
+                          str_yes_no(uc_fw->path == uc_fw->fallback.path));
+       }
        drm_printf(p, "\tstatus: %s\n",
                   intel_uc_fw_status_repr(uc_fw->status));
        drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
index 3229018..562acdf 100644 (file)
@@ -74,6 +74,7 @@ struct intel_uc_fw {
                const enum intel_uc_fw_status status;
                enum intel_uc_fw_status __status; /* no accidental overwrites */
        };
+       const char *wanted_path;
        const char *path;
        bool user_overridden;
        size_t size;
@@ -98,6 +99,12 @@ struct intel_uc_fw {
        u16 major_ver_found;
        u16 minor_ver_found;
 
+       struct {
+               const char *path;
+               u16 major_ver;
+               u16 minor_ver;
+       } fallback;
+
        u32 rsa_size;
        u32 ucode_size;
 
index c849533..3f5750c 100644 (file)
@@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
 
        ret = dcss_submodules_init(dcss);
        if (ret) {
+               of_node_put(dcss->of_port);
                dev_err(dev, "submodules initialization failed\n");
                goto clks_err;
        }
@@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
                dcss_clocks_disable(dcss);
        }
 
+       of_node_put(dcss->of_port);
+
        pm_runtime_disable(dcss->dev);
 
        dcss_submodules_stop(dcss);
index c960144..a189982 100644 (file)
@@ -713,7 +713,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
        of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms);
        desc->delay.hpd_reliable = reliable_ms;
        of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms);
-       desc->delay.hpd_reliable = absent_ms;
+       desc->delay.hpd_absent = absent_ms;
 
        /* Power the panel on so we can read the EDID */
        ret = pm_runtime_get_sync(dev);
index 191c560..6b25b2f 100644 (file)
@@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
 }
 EXPORT_SYMBOL(drm_sched_entity_flush);
 
-static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk)
+static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
 {
        struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
 
@@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
        struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
                                                 finish_cb);
 
-       init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work);
-       irq_work_queue(&job->work);
+       INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+       schedule_work(&job->work);
 }
 
 static struct dma_fence *
index 3d6f8ee..630cfa4 100644 (file)
@@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr)
  */
 static irqreturn_t cdns_i2c_master_isr(void *ptr)
 {
-       unsigned int isr_status, avail_bytes, updatetx;
+       unsigned int isr_status, avail_bytes;
        unsigned int bytes_to_send;
-       bool hold_quirk;
+       bool updatetx;
        struct cdns_i2c *id = ptr;
        /* Signal completion only after everything is updated */
        int done_flag = 0;
@@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
         * Check if transfer size register needs to be updated again for a
         * large data receive operation.
         */
-       updatetx = 0;
-       if (id->recv_count > id->curr_recv_count)
-               updatetx = 1;
-
-       hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
+       updatetx = id->recv_count > id->curr_recv_count;
 
        /* When receiving, handle data interrupt and completion interrupt */
        if (id->p_recv_buf &&
@@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
                                break;
                        }
 
-                       if (cdns_is_holdquirk(id, hold_quirk))
+                       if (cdns_is_holdquirk(id, updatetx))
                                break;
                }
 
@@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
                 * maintain transfer size non-zero while performing a large
                 * receive operation.
                 */
-               if (cdns_is_holdquirk(id, hold_quirk)) {
+               if (cdns_is_holdquirk(id, updatetx)) {
                        /* wait while fifo is full */
                        while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
                               (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
                                                  CDNS_I2C_XFER_SIZE_OFFSET);
                                id->curr_recv_count = id->recv_count;
                        }
-               } else if (id->recv_count && !hold_quirk &&
-                                               !id->curr_recv_count) {
-
-                       /* Set the slave address in address register*/
-                       cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
-                                               CDNS_I2C_ADDR_OFFSET);
-
-                       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
-                               cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-                               id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
-                       } else {
-                               cdns_i2c_writereg(id->recv_count,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-                               id->curr_recv_count = id->recv_count;
-                       }
                }
 
                /* Clear hold (if not repeated start) and signal completion */
index e9e2db6..78fb1a4 100644 (file)
@@ -66,7 +66,7 @@
 
 /* IMX I2C registers:
  * the I2C register offset is different between SoCs,
- * to provid support for all these chips, split the
+ * to provide support for all these chips, split the
  * register offset into a fixed base address and a
  * variable shift value, then the full register offset
  * will be calculated by
index 56aa424..815cc56 100644 (file)
@@ -49,7 +49,7 @@
 #define MLXCPLD_LPCI2C_NACK_IND                2
 
 #define MLXCPLD_I2C_FREQ_1000KHZ_SET   0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0c
+#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0e
 #define MLXCPLD_I2C_FREQ_100KHZ_SET    0x42
 
 enum mlxcpld_i2c_frequency {
index 20e53b1..c8539d0 100644 (file)
@@ -7304,7 +7304,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                goto abort;
        conf->mddev = mddev;
 
-       if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
+       ret = -ENOMEM;
+       conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!conf->stripe_hashtbl)
                goto abort;
 
        /* We init hash_locks[0] separately to that it can be used
index 2e0aa74..95ef971 100644 (file)
@@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM)               += cfi.o
 lkdtm-$(CONFIG_LKDTM)          += fortify.o
 lkdtm-$(CONFIG_PPC_64S_HASH_MMU)       += powerpc.o
 
-KASAN_SANITIZE_rodata.o                := n
 KASAN_SANITIZE_stackleak.o     := n
-KCOV_INSTRUMENT_rodata.o       := n
-CFLAGS_REMOVE_rodata.o         += $(CC_FLAGS_LTO)
+
+KASAN_SANITIZE_rodata.o                        := n
+KCSAN_SANITIZE_rodata.o                        := n
+KCOV_INSTRUMENT_rodata.o               := n
+OBJECT_FILES_NON_STANDARD_rodata.o     := y
+CFLAGS_REMOVE_rodata.o                 += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
 
 OBJCOPYFLAGS :=
 OBJCOPYFLAGS_rodata_objcopy.o  := \
index 86e867f..033be55 100644 (file)
@@ -1298,8 +1298,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        /*
         * omap_device_pm_domain has callbacks to enable the main
         * functional clock, interface clock and also configure the
-        * SYSCONFIG register of omap devices. The callback will be invoked
-        * as part of pm_runtime_get_sync.
+        * SYSCONFIG register to clear any boot loader set voltage
+        * capabilities before calling sdhci_setup_host(). The
+        * callback will be invoked as part of pm_runtime_get_sync.
         */
        pm_runtime_use_autosuspend(dev);
        pm_runtime_set_autosuspend_delay(dev, 50);
@@ -1441,7 +1442,8 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
 
-       sdhci_runtime_suspend_host(host);
+       if (omap_host->con != -EINVAL)
+               sdhci_runtime_suspend_host(host);
 
        sdhci_omap_context_save(omap_host);
 
@@ -1458,10 +1460,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
 
        pinctrl_pm_select_default_state(dev);
 
-       if (omap_host->con != -EINVAL)
+       if (omap_host->con != -EINVAL) {
                sdhci_omap_context_restore(omap_host);
-
-       sdhci_runtime_resume_host(host, 0);
+               sdhci_runtime_resume_host(host, 0);
+       }
 
        return 0;
 }
index 889e403..93da236 100644 (file)
@@ -850,9 +850,10 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
        unsigned int tRP_ps;
        bool use_half_period;
        int sample_delay_ps, sample_delay_factor;
-       u16 busy_timeout_cycles;
+       unsigned int busy_timeout_cycles;
        u8 wrn_dly_sel;
        unsigned long clk_rate, min_rate;
+       u64 busy_timeout_ps;
 
        if (sdr->tRC_min >= 30000) {
                /* ONFI non-EDO modes [0-3] */
@@ -885,7 +886,8 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
        addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
        data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
        data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
-       busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
+       busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
+       busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
 
        hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
                      BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
index febfcf2..9a247eb 100644 (file)
@@ -449,7 +449,7 @@ out:
        dev_put(amt->dev);
 }
 
-/* Non-existant group is created as INCLUDE {empty}:
+/* Non-existent group is created as INCLUDE {empty}:
  *
  * RFC 3376 - 5.1. Action on Change of Interface State
  *
index 40bb7c2..701f1d1 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_NET_DSA_AR9331)   += ar9331.o
 obj-$(CONFIG_NET_DSA_QCA8K)    += qca8k.o
+qca8k-y                        += qca8k-common.o qca8k-8xxx.o
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
new file mode 100644 (file)
index 0000000..1d3e778
--- /dev/null
@@ -0,0 +1,2064 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+#include <linux/gpio/consumer.h>
+#include <linux/etherdevice.h>
+#include <linux/dsa/tag_qca.h>
+
+#include "qca8k.h"
+
+static void
+qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
+{
+       regaddr >>= 1;
+       *r1 = regaddr & 0x1e;
+
+       regaddr >>= 5;
+       *r2 = regaddr & 0x7;
+
+       regaddr >>= 3;
+       *page = regaddr & 0x3ff;
+}
+
+static int
+qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
+{
+       u16 *cached_lo = &priv->mdio_cache.lo;
+       struct mii_bus *bus = priv->bus;
+       int ret;
+
+       if (lo == *cached_lo)
+               return 0;
+
+       ret = bus->write(bus, phy_id, regnum, lo);
+       if (ret < 0)
+               dev_err_ratelimited(&bus->dev,
+                                   "failed to write qca8k 32bit lo register\n");
+
+       *cached_lo = lo;
+       return 0;
+}
+
+static int
+qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
+{
+       u16 *cached_hi = &priv->mdio_cache.hi;
+       struct mii_bus *bus = priv->bus;
+       int ret;
+
+       if (hi == *cached_hi)
+               return 0;
+
+       ret = bus->write(bus, phy_id, regnum, hi);
+       if (ret < 0)
+               dev_err_ratelimited(&bus->dev,
+                                   "failed to write qca8k 32bit hi register\n");
+
+       *cached_hi = hi;
+       return 0;
+}
+
+static int
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+{
+       int ret;
+
+       ret = bus->read(bus, phy_id, regnum);
+       if (ret >= 0) {
+               *val = ret;
+               ret = bus->read(bus, phy_id, regnum + 1);
+               *val |= ret << 16;
+       }
+
+       if (ret < 0) {
+               dev_err_ratelimited(&bus->dev,
+                                   "failed to read qca8k 32bit register\n");
+               *val = 0;
+               return ret;
+       }
+
+       return 0;
+}
+
+static void
+qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
+{
+       u16 lo, hi;
+       int ret;
+
+       lo = val & 0xffff;
+       hi = (u16)(val >> 16);
+
+       ret = qca8k_set_lo(priv, phy_id, regnum, lo);
+       if (ret >= 0)
+               ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
+}
+
+static int
+qca8k_set_page(struct qca8k_priv *priv, u16 page)
+{
+       u16 *cached_page = &priv->mdio_cache.page;
+       struct mii_bus *bus = priv->bus;
+       int ret;
+
+       if (page == *cached_page)
+               return 0;
+
+       ret = bus->write(bus, 0x18, 0, page);
+       if (ret < 0) {
+               dev_err_ratelimited(&bus->dev,
+                                   "failed to set qca8k page\n");
+               return ret;
+       }
+
+       *cached_page = page;
+       usleep_range(1000, 2000);
+       return 0;
+}
+
+static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+       struct qca8k_mgmt_eth_data *mgmt_eth_data;
+       struct qca8k_priv *priv = ds->priv;
+       struct qca_mgmt_ethhdr *mgmt_ethhdr;
+       u8 len, cmd;
+
+       mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
+       mgmt_eth_data = &priv->mgmt_eth_data;
+
+       cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
+       len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
+
+       /* Make sure the seq match the requested packet */
+       if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
+               mgmt_eth_data->ack = true;
+
+       if (cmd == MDIO_READ) {
+               mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
+
+               /* Get the rest of the 12 byte of data.
+                * The read/write function will extract the requested data.
+                */
+               if (len > QCA_HDR_MGMT_DATA1_LEN)
+                       memcpy(mgmt_eth_data->data + 1, skb->data,
+                              QCA_HDR_MGMT_DATA2_LEN);
+       }
+
+       complete(&mgmt_eth_data->rw_done);
+}
+
+static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
+                                              int priority, unsigned int len)
+{
+       struct qca_mgmt_ethhdr *mgmt_ethhdr;
+       unsigned int real_len;
+       struct sk_buff *skb;
+       u32 *data2;
+       u16 hdr;
+
+       skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
+       if (!skb)
+               return NULL;
+
+       /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
+        * Actually for some reason the steps are:
+        * 0: nothing
+        * 1-4: first 4 byte
+        * 5-6: first 12 byte
+        * 7-15: all 16 byte
+        */
+       if (len == 16)
+               real_len = 15;
+       else
+               real_len = len;
+
+       skb_reset_mac_header(skb);
+       skb_set_network_header(skb, skb->len);
+
+       mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
+
+       hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
+       hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
+       hdr |= QCA_HDR_XMIT_FROM_CPU;
+       hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
+       hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
+
+       mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
+       mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
+       mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
+       mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
+                                          QCA_HDR_MGMT_CHECK_CODE_VAL);
+
+       if (cmd == MDIO_WRITE)
+               mgmt_ethhdr->mdio_data = *val;
+
+       mgmt_ethhdr->hdr = htons(hdr);
+
+       data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
+       if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
+               memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+
+       return skb;
+}
+
+static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
+{
+       struct qca_mgmt_ethhdr *mgmt_ethhdr;
+
+       mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
+       mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
+}
+
+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+       struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+       struct sk_buff *skb;
+       bool ack;
+       int ret;
+
+       skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
+                                     QCA8K_ETHERNET_MDIO_PRIORITY, len);
+       if (!skb)
+               return -ENOMEM;
+
+       mutex_lock(&mgmt_eth_data->mutex);
+
+       /* Check mgmt_master if is operational */
+       if (!priv->mgmt_master) {
+               kfree_skb(skb);
+               mutex_unlock(&mgmt_eth_data->mutex);
+               return -EINVAL;
+       }
+
+       skb->dev = priv->mgmt_master;
+
+       reinit_completion(&mgmt_eth_data->rw_done);
+
+       /* Increment seq_num and set it in the mdio pkt */
+       mgmt_eth_data->seq++;
+       qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+       mgmt_eth_data->ack = false;
+
+       dev_queue_xmit(skb);
+
+       ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+                                         msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+       *val = mgmt_eth_data->data[0];
+       if (len > QCA_HDR_MGMT_DATA1_LEN)
+               memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+
+       ack = mgmt_eth_data->ack;
+
+       mutex_unlock(&mgmt_eth_data->mutex);
+
+       if (ret <= 0)
+               return -ETIMEDOUT;
+
+       if (!ack)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+       struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+       struct sk_buff *skb;
+       bool ack;
+       int ret;
+
+       skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
+                                     QCA8K_ETHERNET_MDIO_PRIORITY, len);
+       if (!skb)
+               return -ENOMEM;
+
+       mutex_lock(&mgmt_eth_data->mutex);
+
+       /* Check mgmt_master if is operational */
+       if (!priv->mgmt_master) {
+               kfree_skb(skb);
+               mutex_unlock(&mgmt_eth_data->mutex);
+               return -EINVAL;
+       }
+
+       skb->dev = priv->mgmt_master;
+
+       reinit_completion(&mgmt_eth_data->rw_done);
+
+       /* Increment seq_num and set it in the mdio pkt */
+       mgmt_eth_data->seq++;
+       qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+       mgmt_eth_data->ack = false;
+
+       dev_queue_xmit(skb);
+
+       ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+                                         msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+       ack = mgmt_eth_data->ack;
+
+       mutex_unlock(&mgmt_eth_data->mutex);
+
+       if (ret <= 0)
+               return -ETIMEDOUT;
+
+       if (!ack)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int
+qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+       u32 val = 0;
+       int ret;
+
+       ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
+       if (ret)
+               return ret;
+
+       val &= ~mask;
+       val |= write_val;
+
+       return qca8k_write_eth(priv, reg, &val, sizeof(val));
+}
+
+static int
+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+       struct mii_bus *bus = priv->bus;
+       u16 r1, r2, page;
+       int ret;
+
+       if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
+               return 0;
+
+       qca8k_split_addr(reg, &r1, &r2, &page);
+
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       ret = qca8k_set_page(priv, page);
+       if (ret < 0)
+               goto exit;
+
+       ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
+
+exit:
+       mutex_unlock(&bus->mdio_lock);
+       return ret;
+}
+
+static int
+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+       struct mii_bus *bus = priv->bus;
+       u16 r1, r2, page;
+       int ret;
+
+       if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
+               return 0;
+
+       qca8k_split_addr(reg, &r1, &r2, &page);
+
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       ret = qca8k_set_page(priv, page);
+       if (ret < 0)
+               goto exit;
+
+       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+
+exit:
+       mutex_unlock(&bus->mdio_lock);
+       return ret;
+}
+
+static int
+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+       struct mii_bus *bus = priv->bus;
+       u16 r1, r2, page;
+       u32 val;
+       int ret;
+
+       if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
+               return 0;
+
+       qca8k_split_addr(reg, &r1, &r2, &page);
+
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       ret = qca8k_set_page(priv, page);
+       if (ret < 0)
+               goto exit;
+
+       ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+       if (ret < 0)
+               goto exit;
+
+       val &= ~mask;
+       val |= write_val;
+       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+
+exit:
+       mutex_unlock(&bus->mdio_lock);
+
+       return ret;
+}
+
+static struct regmap_config qca8k_regmap_config = {
+       .reg_bits = 16,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = 0x16ac, /* end MIB - Port6 range */
+       .reg_read = qca8k_regmap_read,
+       .reg_write = qca8k_regmap_write,
+       .reg_update_bits = qca8k_regmap_update_bits,
+       .rd_table = &qca8k_readable_table,
+       .disable_locking = true, /* Locking is handled by qca8k read/write */
+       .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
+};
+
+static int
+qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+                       struct sk_buff *read_skb, u32 *val)
+{
+       struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
+       bool ack;
+       int ret;
+
+       reinit_completion(&mgmt_eth_data->rw_done);
+
+       /* Increment seq_num and set it in the copy pkt */
+       mgmt_eth_data->seq++;
+       qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+       mgmt_eth_data->ack = false;
+
+       dev_queue_xmit(skb);
+
+       ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+                                         QCA8K_ETHERNET_TIMEOUT);
+
+       ack = mgmt_eth_data->ack;
+
+       if (ret <= 0)
+               return -ETIMEDOUT;
+
+       if (!ack)
+               return -EINVAL;
+
+       *val = mgmt_eth_data->data[0];
+
+       return 0;
+}
+
+static int
+qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+                     int regnum, u16 data)
+{
+       struct sk_buff *write_skb, *clear_skb, *read_skb;
+       struct qca8k_mgmt_eth_data *mgmt_eth_data;
+       u32 write_val, clear_val = 0, val;
+       struct net_device *mgmt_master;
+       int ret, ret1;
+       bool ack;
+
+       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+               return -EINVAL;
+
+       mgmt_eth_data = &priv->mgmt_eth_data;
+
+       write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+                   QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+                   QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+       if (read) {
+               write_val |= QCA8K_MDIO_MASTER_READ;
+       } else {
+               write_val |= QCA8K_MDIO_MASTER_WRITE;
+               write_val |= QCA8K_MDIO_MASTER_DATA(data);
+       }
+
+       /* Prealloc all the needed skb before the lock */
+       write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
+                                           QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
+       if (!write_skb)
+               return -ENOMEM;
+
+       clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+                                           QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+       if (!clear_skb) {
+               ret = -ENOMEM;
+               goto err_clear_skb;
+       }
+
+       read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+                                          QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+       if (!read_skb) {
+               ret = -ENOMEM;
+               goto err_read_skb;
+       }
+
+       /* Actually start the request:
+        * 1. Send mdio master packet
+        * 2. Busy Wait for mdio master command
+        * 3. Get the data if we are reading
+        * 4. Reset the mdio master (even with error)
+        */
+       mutex_lock(&mgmt_eth_data->mutex);
+
+       /* Check if mgmt_master is operational */
+       mgmt_master = priv->mgmt_master;
+       if (!mgmt_master) {
+               mutex_unlock(&mgmt_eth_data->mutex);
+               ret = -EINVAL;
+               goto err_mgmt_master;
+       }
+
+       read_skb->dev = mgmt_master;
+       clear_skb->dev = mgmt_master;
+       write_skb->dev = mgmt_master;
+
+       reinit_completion(&mgmt_eth_data->rw_done);
+
+       /* Increment seq_num and set it in the write pkt */
+       mgmt_eth_data->seq++;
+       qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
+       mgmt_eth_data->ack = false;
+
+       dev_queue_xmit(write_skb);
+
+       ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+                                         QCA8K_ETHERNET_TIMEOUT);
+
+       ack = mgmt_eth_data->ack;
+
+       if (ret <= 0) {
+               ret = -ETIMEDOUT;
+               kfree_skb(read_skb);
+               goto exit;
+       }
+
+       if (!ack) {
+               ret = -EINVAL;
+               kfree_skb(read_skb);
+               goto exit;
+       }
+
+       ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
+                               !(val & QCA8K_MDIO_MASTER_BUSY), 0,
+                               QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+                               mgmt_eth_data, read_skb, &val);
+
+       if (ret < 0 && ret1 < 0) {
+               ret = ret1;
+               goto exit;
+       }
+
+       if (read) {
+               reinit_completion(&mgmt_eth_data->rw_done);
+
+               /* Increment seq_num and set it in the read pkt */
+               mgmt_eth_data->seq++;
+               qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
+               mgmt_eth_data->ack = false;
+
+               dev_queue_xmit(read_skb);
+
+               ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+                                                 QCA8K_ETHERNET_TIMEOUT);
+
+               ack = mgmt_eth_data->ack;
+
+               if (ret <= 0) {
+                       ret = -ETIMEDOUT;
+                       goto exit;
+               }
+
+               if (!ack) {
+                       ret = -EINVAL;
+                       goto exit;
+               }
+
+               ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
+       } else {
+               kfree_skb(read_skb);
+       }
+exit:
+       reinit_completion(&mgmt_eth_data->rw_done);
+
+       /* Increment seq_num and set it in the clear pkt */
+       mgmt_eth_data->seq++;
+       qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
+       mgmt_eth_data->ack = false;
+
+       dev_queue_xmit(clear_skb);
+
+       wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+                                   QCA8K_ETHERNET_TIMEOUT);
+
+       mutex_unlock(&mgmt_eth_data->mutex);
+
+       return ret;
+
+       /* Error handling before lock */
+err_mgmt_master:
+       kfree_skb(read_skb);
+err_read_skb:
+       kfree_skb(clear_skb);
+err_clear_skb:
+       kfree_skb(write_skb);
+
+       return ret;
+}
+
+static u32
+qca8k_port_to_phy(int port)
+{
+       /* From Andrew Lunn:
+        * Port 0 has no internal phy.
+        * Port 1 has an internal PHY at MDIO address 0.
+        * Port 2 has an internal PHY at MDIO address 1.
+        * ...
+        * Port 5 has an internal PHY at MDIO address 4.
+        * Port 6 has no internal PHY.
+        */
+
+       return port - 1;
+}
+
+static int
+qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
+{
+       u16 r1, r2, page;
+       u32 val;
+       int ret, ret1;
+
+       qca8k_split_addr(reg, &r1, &r2, &page);
+
+       ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
+                               QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+                               bus, 0x10 | r2, r1, &val);
+
+       /* Check if qca8k_read has failed for a different reason
+        * before returnting -ETIMEDOUT
+        */
+       if (ret < 0 && ret1 < 0)
+               return ret1;
+
+       return ret;
+}
+
+static int
+qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
+{
+       struct mii_bus *bus = priv->bus;
+       u16 r1, r2, page;
+       u32 val;
+       int ret;
+
+       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+               return -EINVAL;
+
+       val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+             QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+             QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
+             QCA8K_MDIO_MASTER_DATA(data);
+
+       qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
+
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       ret = qca8k_set_page(priv, page);
+       if (ret)
+               goto exit;
+
+       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+
+       ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+                                  QCA8K_MDIO_MASTER_BUSY);
+
+exit:
+       /* even if the busy_wait timeouts try to clear the MASTER_EN */
+       qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
+
+       mutex_unlock(&bus->mdio_lock);
+
+       return ret;
+}
+
+static int
+qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
+{
+       struct mii_bus *bus = priv->bus;
+       u16 r1, r2, page;
+       u32 val;
+       int ret;
+
+       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+               return -EINVAL;
+
+       val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+             QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+             QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+       qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
+
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       ret = qca8k_set_page(priv, page);
+       if (ret)
+               goto exit;
+
+       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+
+       ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+                                  QCA8K_MDIO_MASTER_BUSY);
+       if (ret)
+               goto exit;
+
+       ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+
+exit:
+       /* even if the busy_wait timeouts try to clear the MASTER_EN */
+       qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
+
+       mutex_unlock(&bus->mdio_lock);
+
+       if (ret >= 0)
+               ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
+
+       return ret;
+}
+
+static int
+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
+{
+       struct qca8k_priv *priv = slave_bus->priv;
+       int ret;
+
+       /* Use mdio Ethernet when available, fallback to legacy one on error */
+       ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
+       if (!ret)
+               return 0;
+
+       return qca8k_mdio_write(priv, phy, regnum, data);
+}
+
+static int
+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
+{
+       struct qca8k_priv *priv = slave_bus->priv;
+       int ret;
+
+       /* Use mdio Ethernet when available, fallback to legacy one on error */
+       ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
+       if (ret >= 0)
+               return ret;
+
+       ret = qca8k_mdio_read(priv, phy, regnum);
+
+       if (ret < 0)
+               return 0xffff;
+
+       return ret;
+}
+
+static int
+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
+{
+       port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+
+       return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
+}
+
+static int
+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
+{
+       port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+
+       return qca8k_internal_mdio_read(slave_bus, port, regnum);
+}
+
+static int
+qca8k_mdio_register(struct qca8k_priv *priv)
+{
+       struct dsa_switch *ds = priv->ds;
+       struct device_node *mdio;
+       struct mii_bus *bus;
+
+       bus = devm_mdiobus_alloc(ds->dev);
+       if (!bus)
+               return -ENOMEM;
+
+       bus->priv = (void *)priv;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
+                ds->dst->index, ds->index);
+       bus->parent = ds->dev;
+       bus->phy_mask = ~ds->phys_mii_mask;
+       ds->slave_mii_bus = bus;
+
+       /* Check if the devicetree declare the port:phy mapping */
+       mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+       if (of_device_is_available(mdio)) {
+               bus->name = "qca8k slave mii";
+               bus->read = qca8k_internal_mdio_read;
+               bus->write = qca8k_internal_mdio_write;
+               return devm_of_mdiobus_register(priv->dev, bus, mdio);
+       }
+
+       /* If a mapping can't be found the legacy mapping is used,
+        * using the qca8k_port_to_phy function
+        */
+       bus->name = "qca8k-legacy slave mii";
+       bus->read = qca8k_legacy_mdio_read;
+       bus->write = qca8k_legacy_mdio_write;
+       return devm_mdiobus_register(priv->dev, bus);
+}
+
+static int
+qca8k_setup_mdio_bus(struct qca8k_priv *priv)
+{
+       u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
+       struct device_node *ports, *port;
+       phy_interface_t mode;
+       int err;
+
+       ports = of_get_child_by_name(priv->dev->of_node, "ports");
+       if (!ports)
+               ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
+
+       if (!ports)
+               return -EINVAL;
+
+       for_each_available_child_of_node(ports, port) {
+               err = of_property_read_u32(port, "reg", &reg);
+               if (err) {
+                       of_node_put(port);
+                       of_node_put(ports);
+                       return err;
+               }
+
+               if (!dsa_is_user_port(priv->ds, reg))
+                       continue;
+
+               of_get_phy_mode(port, &mode);
+
+               if (of_property_read_bool(port, "phy-handle") &&
+                   mode != PHY_INTERFACE_MODE_INTERNAL)
+                       external_mdio_mask |= BIT(reg);
+               else
+                       internal_mdio_mask |= BIT(reg);
+       }
+
+       of_node_put(ports);
+       if (!external_mdio_mask && !internal_mdio_mask) {
+               dev_err(priv->dev, "no PHYs are defined.\n");
+               return -EINVAL;
+       }
+
+       /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
+        * the MDIO_MASTER register also _disconnects_ the external MDC
+        * passthrough to the internal PHYs. It's not possible to use both
+        * configurations at the same time!
+        *
+        * Because this came up during the review process:
+        * If the external mdio-bus driver is capable magically disabling
+        * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
+        * accessors for the time being, it would be possible to pull this
+        * off.
+        */
+       if (!!external_mdio_mask && !!internal_mdio_mask) {
+               dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
+               return -EINVAL;
+       }
+
+       if (external_mdio_mask) {
+               /* Make sure to disable the internal mdio bus in cases
+                * a dt-overlay and driver reload changed the configuration
+                */
+
+               return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
+                                        QCA8K_MDIO_MASTER_EN);
+       }
+
+       return qca8k_mdio_register(priv);
+}
+
+static int
+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
+{
+       u32 mask = 0;
+       int ret = 0;
+
+       /* SoC specific settings for ipq8064.
+        * If more device require this consider adding
+        * a dedicated binding.
+        */
+       if (of_machine_is_compatible("qcom,ipq8064"))
+               mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
+
+       /* SoC specific settings for ipq8065 */
+       if (of_machine_is_compatible("qcom,ipq8065"))
+               mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
+
+       if (mask) {
+               ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
+                               QCA8K_MAC_PWR_RGMII0_1_8V |
+                               QCA8K_MAC_PWR_RGMII1_1_8V,
+                               mask);
+       }
+
+       return ret;
+}
+
+static int qca8k_find_cpu_port(struct dsa_switch *ds)
+{
+       struct qca8k_priv *priv = ds->priv;
+
+       /* Find the connected cpu port. Valid port are 0 or 6 */
+       if (dsa_is_cpu_port(ds, 0))
+               return 0;
+
+       dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
+
+       if (dsa_is_cpu_port(ds, 6))
+               return 6;
+
+       return -EINVAL;
+}
+
+static int
+qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
+{
+       const struct qca8k_match_data *data = priv->info;
+       struct device_node *node = priv->dev->of_node;
+       u32 val = 0;
+       int ret;
+
+       /* QCA8327 require to set to the correct mode.
+        * His bigger brother QCA8328 have the 172 pin layout.
+        * Should be applied by default but we set this just to make sure.
+        */
+       if (priv->switch_id == QCA8K_ID_QCA8327) {
+               /* Set the correct package of 148 pin for QCA8327 */
+               if (data->reduced_package)
+                       val |= QCA8327_PWS_PACKAGE148_EN;
+
+               ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
+                               val);
+               if (ret)
+                       return ret;
+       }
+
+       if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
+               val |= QCA8K_PWS_POWER_ON_SEL;
+
+       if (of_property_read_bool(node, "qca,led-open-drain")) {
+               if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
+                       dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
+                       return -EINVAL;
+               }
+
+               val |= QCA8K_PWS_LED_OPEN_EN_CSR;
+       }
+
+       return qca8k_rmw(priv, QCA8K_REG_PWS,
+                       QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
+                       val);
+}
+
+static int
+qca8k_parse_port_config(struct qca8k_priv *priv)
+{
+       int port, cpu_port_index = -1, ret;
+       struct device_node *port_dn;
+       phy_interface_t mode;
+       struct dsa_port *dp;
+       u32 delay;
+
+       /* We have 2 CPU port. Check them */
+       for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+               /* Skip every other port */
+               if (port != 0 && port != 6)
+                       continue;
+
+               dp = dsa_to_port(priv->ds, port);
+               port_dn = dp->dn;
+               cpu_port_index++;
+
+               if (!of_device_is_available(port_dn))
+                       continue;
+
+               ret = of_get_phy_mode(port_dn, &mode);
+               if (ret)
+                       continue;
+
+               switch (mode) {
+               case PHY_INTERFACE_MODE_RGMII:
+               case PHY_INTERFACE_MODE_RGMII_ID:
+               case PHY_INTERFACE_MODE_RGMII_TXID:
+               case PHY_INTERFACE_MODE_RGMII_RXID:
+               case PHY_INTERFACE_MODE_SGMII:
+                       delay = 0;
+
+                       if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
+                               /* Switch regs accept value in ns, convert ps to ns */
+                               delay = delay / 1000;
+                       else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+                                mode == PHY_INTERFACE_MODE_RGMII_TXID)
+                               delay = 1;
+
+                       if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
+                               dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
+                               delay = 3;
+                       }
+
+                       priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
+
+                       delay = 0;
+
+                       if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
+                               /* Switch regs accept value in ns, convert ps to ns */
+                               delay = delay / 1000;
+                       else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+                                mode == PHY_INTERFACE_MODE_RGMII_RXID)
+                               delay = 2;
+
+                       if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
+                               dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
+                               delay = 3;
+                       }
+
+                       priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
+
+                       /* Skip sgmii parsing for rgmii* mode */
+                       if (mode == PHY_INTERFACE_MODE_RGMII ||
+                           mode == PHY_INTERFACE_MODE_RGMII_ID ||
+                           mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+                           mode == PHY_INTERFACE_MODE_RGMII_RXID)
+                               break;
+
+                       if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
+                               priv->ports_config.sgmii_tx_clk_falling_edge = true;
+
+                       if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
+                               priv->ports_config.sgmii_rx_clk_falling_edge = true;
+
+                       if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
+                               priv->ports_config.sgmii_enable_pll = true;
+
+                               if (priv->switch_id == QCA8K_ID_QCA8327) {
+                                       dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
+                                       priv->ports_config.sgmii_enable_pll = false;
+                               }
+
+                               if (priv->switch_revision < 2)
+                                       dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
+                       }
+
+                       break;
+               default:
+                       continue;
+               }
+       }
+
+       return 0;
+}
+
+static void
+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
+                                     u32 reg)
+{
+       u32 delay, val = 0;
+       int ret;
+
+       /* Delay can be declared in 3 different way.
+        * Mode to rgmii and internal-delay standard binding defined
+        * rgmii-id or rgmii-tx/rx phy mode set.
+        * The parse logic set a delay different than 0 only when one
+        * of the 3 different way is used. In all other case delay is
+        * not enabled. With ID or TX/RXID delay is enabled and set
+        * to the default and recommended value.
+        */
+       if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
+               delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
+
+               val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
+                       QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
+       }
+
+       if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
+               delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
+
+               val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
+                       QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
+       }
+
+       /* Set RGMII delay based on the selected values */
+       ret = qca8k_rmw(priv, reg,
+                       QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
+                       QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
+                       QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
+                       QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
+                       val);
+       if (ret)
+               dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
+                       cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
+}
+
+static struct phylink_pcs *
+qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+                            phy_interface_t interface)
+{
+       struct qca8k_priv *priv = ds->priv;
+       struct phylink_pcs *pcs = NULL;
+
+       switch (interface) {
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_1000BASEX:
+               switch (port) {
+               case 0:
+                       pcs = &priv->pcs_port_0.pcs;
+                       break;
+
+               case 6:
+                       pcs = &priv->pcs_port_6.pcs;
+                       break;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       return pcs;
+}
+
+static void
+qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+                        const struct phylink_link_state *state)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int cpu_port_index;
+       u32 reg;
+
+       switch (port) {
+       case 0: /* 1st CPU port */
+               if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
+                   state->interface != PHY_INTERFACE_MODE_SGMII)
+                       return;
+
+               reg = QCA8K_REG_PORT0_PAD_CTRL;
+               cpu_port_index = QCA8K_CPU_PORT0;
+               break;
+       case 1:
+       case 2:
+       case 3:
+       case 4:
+       case 5:
+               /* Internal PHY, nothing to do */
+               return;
+       case 6: /* 2nd CPU port / external PHY */
+               if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
+                   state->interface != PHY_INTERFACE_MODE_SGMII &&
+                   state->interface != PHY_INTERFACE_MODE_1000BASEX)
+                       return;
+
+               reg = QCA8K_REG_PORT6_PAD_CTRL;
+               cpu_port_index = QCA8K_CPU_PORT6;
+               break;
+       default:
+               dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+               return;
+       }
+
+       if (port != 6 && phylink_autoneg_inband(mode)) {
+               dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
+                       __func__);
+               return;
+       }
+
+       switch (state->interface) {
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+               qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
+
+               /* Configure rgmii delay */
+               qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
+               /* QCA8337 requires to set rgmii rx delay for all ports.
+                * This is enabled through PORT5_PAD_CTRL for all ports,
+                * rather than individual port registers.
+                */
+               if (priv->switch_id == QCA8K_ID_QCA8337)
+                       qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+                                   QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_1000BASEX:
+               /* Enable SGMII on the port */
+               qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
+               break;
+       default:
+               dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
+                       phy_modes(state->interface), port);
+               return;
+       }
+}
+
+static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
+                                  struct phylink_config *config)
+{
+       switch (port) {
+       case 0: /* 1st CPU port */
+               phy_interface_set_rgmii(config->supported_interfaces);
+               __set_bit(PHY_INTERFACE_MODE_SGMII,
+                         config->supported_interfaces);
+               break;
+
+       case 1:
+       case 2:
+       case 3:
+       case 4:
+       case 5:
+               /* Internal PHY */
+               __set_bit(PHY_INTERFACE_MODE_GMII,
+                         config->supported_interfaces);
+               __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+                         config->supported_interfaces);
+               break;
+
+       case 6: /* 2nd CPU port / external PHY */
+               phy_interface_set_rgmii(config->supported_interfaces);
+               __set_bit(PHY_INTERFACE_MODE_SGMII,
+                         config->supported_interfaces);
+               __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+                         config->supported_interfaces);
+               break;
+       }
+
+       config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+               MAC_10 | MAC_100 | MAC_1000FD;
+
+       config->legacy_pre_march2020 = false;
+}
+
+static void
+qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+                           phy_interface_t interface)
+{
+       struct qca8k_priv *priv = ds->priv;
+
+       qca8k_port_set_status(priv, port, 0);
+}
+
+static void
+qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
+                         phy_interface_t interface, struct phy_device *phydev,
+                         int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+       struct qca8k_priv *priv = ds->priv;
+       u32 reg;
+
+       if (phylink_autoneg_inband(mode)) {
+               reg = QCA8K_PORT_STATUS_LINK_AUTO;
+       } else {
+               switch (speed) {
+               case SPEED_10:
+                       reg = QCA8K_PORT_STATUS_SPEED_10;
+                       break;
+               case SPEED_100:
+                       reg = QCA8K_PORT_STATUS_SPEED_100;
+                       break;
+               case SPEED_1000:
+                       reg = QCA8K_PORT_STATUS_SPEED_1000;
+                       break;
+               default:
+                       reg = QCA8K_PORT_STATUS_LINK_AUTO;
+                       break;
+               }
+
+               if (duplex == DUPLEX_FULL)
+                       reg |= QCA8K_PORT_STATUS_DUPLEX;
+
+               if (rx_pause || dsa_is_cpu_port(ds, port))
+                       reg |= QCA8K_PORT_STATUS_RXFLOW;
+
+               if (tx_pause || dsa_is_cpu_port(ds, port))
+                       reg |= QCA8K_PORT_STATUS_TXFLOW;
+       }
+
+       reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+       qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
+}
+
+static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
+{
+       return container_of(pcs, struct qca8k_pcs, pcs);
+}
+
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+                               struct phylink_link_state *state)
+{
+       struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+       int port = pcs_to_qca8k_pcs(pcs)->port;
+       u32 reg;
+       int ret;
+
+       ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
+       if (ret < 0) {
+               state->link = false;
+               return;
+       }
+
+       state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+       state->an_complete = state->link;
+       state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
+       state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+                                                          DUPLEX_HALF;
+
+       switch (reg & QCA8K_PORT_STATUS_SPEED) {
+       case QCA8K_PORT_STATUS_SPEED_10:
+               state->speed = SPEED_10;
+               break;
+       case QCA8K_PORT_STATUS_SPEED_100:
+               state->speed = SPEED_100;
+               break;
+       case QCA8K_PORT_STATUS_SPEED_1000:
+               state->speed = SPEED_1000;
+               break;
+       default:
+               state->speed = SPEED_UNKNOWN;
+               break;
+       }
+
+       if (reg & QCA8K_PORT_STATUS_RXFLOW)
+               state->pause |= MLO_PAUSE_RX;
+       if (reg & QCA8K_PORT_STATUS_TXFLOW)
+               state->pause |= MLO_PAUSE_TX;
+}
+
+static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+                           phy_interface_t interface,
+                           const unsigned long *advertising,
+                           bool permit_pause_to_mac)
+{
+       struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+       int cpu_port_index, ret, port;
+       u32 reg, val;
+
+       port = pcs_to_qca8k_pcs(pcs)->port;
+       switch (port) {
+       case 0:
+               reg = QCA8K_REG_PORT0_PAD_CTRL;
+               cpu_port_index = QCA8K_CPU_PORT0;
+               break;
+
+       case 6:
+               reg = QCA8K_REG_PORT6_PAD_CTRL;
+               cpu_port_index = QCA8K_CPU_PORT6;
+               break;
+
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       /* Enable/disable SerDes auto-negotiation as necessary */
+       ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
+       if (ret)
+               return ret;
+       if (phylink_autoneg_inband(mode))
+               val &= ~QCA8K_PWS_SERDES_AEN_DIS;
+       else
+               val |= QCA8K_PWS_SERDES_AEN_DIS;
+       qca8k_write(priv, QCA8K_REG_PWS, val);
+
+       /* Configure the SGMII parameters */
+       ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
+       if (ret)
+               return ret;
+
+       val |= QCA8K_SGMII_EN_SD;
+
+       if (priv->ports_config.sgmii_enable_pll)
+               val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+                      QCA8K_SGMII_EN_TX;
+
+       if (dsa_is_cpu_port(priv->ds, port)) {
+               /* CPU port, we're talking to the CPU MAC, be a PHY */
+               val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+               val |= QCA8K_SGMII_MODE_CTRL_PHY;
+       } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+               val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+               val |= QCA8K_SGMII_MODE_CTRL_MAC;
+       } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+               val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+               val |= QCA8K_SGMII_MODE_CTRL_BASEX;
+       }
+
+       qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+       /* From original code is reported port instability as SGMII also
+        * require delay set. Apply advised values here or take them from DT.
+        */
+       if (interface == PHY_INTERFACE_MODE_SGMII)
+               qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+       /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+        * falling edge is set writing in the PORT0 PAD reg
+        */
+       if (priv->switch_id == QCA8K_ID_QCA8327 ||
+           priv->switch_id == QCA8K_ID_QCA8337)
+               reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+       val = 0;
+
+       /* SGMII Clock phase configuration */
+       if (priv->ports_config.sgmii_rx_clk_falling_edge)
+               val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+       if (priv->ports_config.sgmii_tx_clk_falling_edge)
+               val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+       if (val)
+               ret = qca8k_rmw(priv, reg,
+                               QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+                               QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+                               val);
+
+       return 0;
+}
+
+static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
+{
+}
+
+static const struct phylink_pcs_ops qca8k_pcs_ops = {
+       .pcs_get_state = qca8k_pcs_get_state,
+       .pcs_config = qca8k_pcs_config,
+       .pcs_an_restart = qca8k_pcs_an_restart,
+};
+
+static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
+                           int port)
+{
+       qpcs->pcs.ops = &qca8k_pcs_ops;
+
+       /* We don't have interrupts for link changes, so we need to poll */
+       qpcs->pcs.poll = true;
+       qpcs->priv = priv;
+       qpcs->port = port;
+}
+
+static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+       struct qca8k_mib_eth_data *mib_eth_data;
+       struct qca8k_priv *priv = ds->priv;
+       const struct qca8k_mib_desc *mib;
+       struct mib_ethhdr *mib_ethhdr;
+       int i, mib_len, offset = 0;
+       u64 *data;
+       u8 port;
+
+       mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
+       mib_eth_data = &priv->mib_eth_data;
+
+       /* The switch autocast every port. Ignore other packet and
+        * parse only the requested one.
+        */
+       port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
+       if (port != mib_eth_data->req_port)
+               goto exit;
+
+       data = mib_eth_data->data;
+
+       for (i = 0; i < priv->info->mib_count; i++) {
+               mib = &ar8327_mib[i];
+
+               /* First 3 mib are present in the skb head */
+               if (i < 3) {
+                       data[i] = mib_ethhdr->data[i];
+                       continue;
+               }
+
+               mib_len = sizeof(uint32_t);
+
+               /* Some mib are 64 bit wide */
+               if (mib->size == 2)
+                       mib_len = sizeof(uint64_t);
+
+               /* Copy the mib value from packet to the */
+               memcpy(data + i, skb->data + offset, mib_len);
+
+               /* Set the offset for the next mib */
+               offset += mib_len;
+       }
+
+exit:
+       /* Complete on receiving all the mib packet */
+       if (refcount_dec_and_test(&mib_eth_data->port_parsed))
+               complete(&mib_eth_data->rw_done);
+}
+
+static int
+qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
+{
+       struct dsa_port *dp = dsa_to_port(ds, port);
+       struct qca8k_mib_eth_data *mib_eth_data;
+       struct qca8k_priv *priv = ds->priv;
+       int ret;
+
+       mib_eth_data = &priv->mib_eth_data;
+
+       mutex_lock(&mib_eth_data->mutex);
+
+       reinit_completion(&mib_eth_data->rw_done);
+
+       mib_eth_data->req_port = dp->index;
+       mib_eth_data->data = data;
+       refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
+
+       mutex_lock(&priv->reg_mutex);
+
+       /* Send mib autocast request */
+       ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+                                QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+                                FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
+                                QCA8K_MIB_BUSY);
+
+       mutex_unlock(&priv->reg_mutex);
+
+       if (ret)
+               goto exit;
+
+       ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
+
+exit:
+       mutex_unlock(&mib_eth_data->mutex);
+
+       return ret;
+}
+
+static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
+{
+       struct qca8k_priv *priv = ds->priv;
+
+       /* Communicate to the phy internal driver the switch revision.
+        * Based on the switch revision different values needs to be
+        * set to the dbg and mmd reg on the phy.
+        * The first 2 bit are used to communicate the switch revision
+        * to the phy driver.
+        */
+       if (port > 0 && port < 6)
+               return priv->switch_revision;
+
+       return 0;
+}
+
+static enum dsa_tag_protocol
+qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
+                      enum dsa_tag_protocol mp)
+{
+       return DSA_TAG_PROTO_QCA;
+}
+
+static void
+qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
+                   bool operational)
+{
+       struct dsa_port *dp = master->dsa_ptr;
+       struct qca8k_priv *priv = ds->priv;
+
+       /* Ethernet MIB/MDIO is only supported for CPU port 0 */
+       if (dp->index != 0)
+               return;
+
+       mutex_lock(&priv->mgmt_eth_data.mutex);
+       mutex_lock(&priv->mib_eth_data.mutex);
+
+       priv->mgmt_master = operational ? (struct net_device *)master : NULL;
+
+       mutex_unlock(&priv->mib_eth_data.mutex);
+       mutex_unlock(&priv->mgmt_eth_data.mutex);
+}
+
+static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
+                                     enum dsa_tag_protocol proto)
+{
+       struct qca_tagger_data *tagger_data;
+
+       switch (proto) {
+       case DSA_TAG_PROTO_QCA:
+               tagger_data = ds->tagger_data;
+
+               tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
+               tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int
+qca8k_setup(struct dsa_switch *ds)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       int cpu_port, ret, i;
+       u32 mask;
+
+       cpu_port = qca8k_find_cpu_port(ds);
+       if (cpu_port < 0) {
+               dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+               return cpu_port;
+       }
+
+       /* Parse CPU port config to be later used in phy_link mac_config */
+       ret = qca8k_parse_port_config(priv);
+       if (ret)
+               return ret;
+
+       ret = qca8k_setup_mdio_bus(priv);
+       if (ret)
+               return ret;
+
+       ret = qca8k_setup_of_pws_reg(priv);
+       if (ret)
+               return ret;
+
+       ret = qca8k_setup_mac_pwr_sel(priv);
+       if (ret)
+               return ret;
+
+       qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
+       qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
+
+       /* Make sure MAC06 is disabled */
+       ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
+                               QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
+       if (ret) {
+               dev_err(priv->dev, "failed disabling MAC06 exchange");
+               return ret;
+       }
+
+       /* Enable CPU Port */
+       ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+                             QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+       if (ret) {
+               dev_err(priv->dev, "failed enabling CPU port");
+               return ret;
+       }
+
+       /* Enable MIB counters */
+       ret = qca8k_mib_init(priv);
+       if (ret)
+               dev_warn(priv->dev, "mib init failed");
+
+       /* Initial setup of all ports */
+       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+               /* Disable forwarding by default on all ports */
+               ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+                               QCA8K_PORT_LOOKUP_MEMBER, 0);
+               if (ret)
+                       return ret;
+
+               /* Enable QCA header mode on all cpu ports */
+               if (dsa_is_cpu_port(ds, i)) {
+                       ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+                                         FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
+                                         FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
+                       if (ret) {
+                               dev_err(priv->dev, "failed enabling QCA header mode");
+                               return ret;
+                       }
+               }
+
+               /* Disable MAC by default on all user ports */
+               if (dsa_is_user_port(ds, i))
+                       qca8k_port_set_status(priv, i, 0);
+       }
+
+       /* Forward all unknown frames to CPU port for Linux processing
+        * Notice that in multi-cpu config only one port should be set
+        * for igmp, unknown, multicast and broadcast packet
+        */
+       ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+                         FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
+                         FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
+                         FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
+                         FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
+       if (ret)
+               return ret;
+
+       /* Setup connection between CPU port & user ports
+        * Configure specific switch configuration for ports
+        */
+       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+               /* CPU port gets connected to all user ports of the switch */
+               if (dsa_is_cpu_port(ds, i)) {
+                       ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+                                       QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
+                       if (ret)
+                               return ret;
+               }
+
+               /* Individual user ports get connected to CPU port only */
+               if (dsa_is_user_port(ds, i)) {
+                       ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+                                       QCA8K_PORT_LOOKUP_MEMBER,
+                                       BIT(cpu_port));
+                       if (ret)
+                               return ret;
+
+                       /* Enable ARP Auto-learning by default */
+                       ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
+                                             QCA8K_PORT_LOOKUP_LEARN);
+                       if (ret)
+                               return ret;
+
+                       /* For port based vlans to work we need to set the
+                        * default egress vid
+                        */
+                       ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+                                       QCA8K_EGREES_VLAN_PORT_MASK(i),
+                                       QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
+                       if (ret)
+                               return ret;
+
+                       ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
+                                         QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
+                                         QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
+                       if (ret)
+                               return ret;
+               }
+
+               /* The port 5 of the qca8337 have some problem in flood condition. The
+                * original legacy driver had some specific buffer and priority settings
+                * for the different port suggested by the QCA switch team. Add this
+                * missing settings to improve switch stability under load condition.
+                * This problem is limited to qca8337 and other qca8k switch are not affected.
+                */
+               if (priv->switch_id == QCA8K_ID_QCA8337) {
+                       switch (i) {
+                       /* The 2 CPU port and port 5 requires some different
+                        * priority than any other ports.
+                        */
+                       case 0:
+                       case 5:
+                       case 6:
+                               mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+                                       QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+                                       QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
+                                       QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
+                                       QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
+                                       QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
+                                       QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
+                               break;
+                       default:
+                               mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+                                       QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+                                       QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
+                                       QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
+                                       QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
+                       }
+                       qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
+
+                       mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
+                       QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+                       QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+                       QCA8K_PORT_HOL_CTRL1_WRED_EN;
+                       qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
+                                 QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
+                                 QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+                                 QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+                                 QCA8K_PORT_HOL_CTRL1_WRED_EN,
+                                 mask);
+               }
+       }
+
+       /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
+       if (priv->switch_id == QCA8K_ID_QCA8327) {
+               mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
+                      QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
+               qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
+                         QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
+                         QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
+                         mask);
+       }
+
+       /* Setup our port MTUs to match power on defaults */
+       ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
+       if (ret)
+               dev_warn(priv->dev, "failed setting MTU settings");
+
+       /* Flush the FDB table */
+       qca8k_fdb_flush(priv);
+
+       /* Set min a max ageing value supported */
+       ds->ageing_time_min = 7000;
+       ds->ageing_time_max = 458745000;
+
+       /* Set max number of LAGs supported */
+       ds->num_lag_ids = QCA8K_NUM_LAGS;
+
+       return 0;
+}
+
+static const struct dsa_switch_ops qca8k_switch_ops = {
+       .get_tag_protocol       = qca8k_get_tag_protocol,
+       .setup                  = qca8k_setup,
+       .get_strings            = qca8k_get_strings,
+       .get_ethtool_stats      = qca8k_get_ethtool_stats,
+       .get_sset_count         = qca8k_get_sset_count,
+       .set_ageing_time        = qca8k_set_ageing_time,
+       .get_mac_eee            = qca8k_get_mac_eee,
+       .set_mac_eee            = qca8k_set_mac_eee,
+       .port_enable            = qca8k_port_enable,
+       .port_disable           = qca8k_port_disable,
+       .port_change_mtu        = qca8k_port_change_mtu,
+       .port_max_mtu           = qca8k_port_max_mtu,
+       .port_stp_state_set     = qca8k_port_stp_state_set,
+       .port_bridge_join       = qca8k_port_bridge_join,
+       .port_bridge_leave      = qca8k_port_bridge_leave,
+       .port_fast_age          = qca8k_port_fast_age,
+       .port_fdb_add           = qca8k_port_fdb_add,
+       .port_fdb_del           = qca8k_port_fdb_del,
+       .port_fdb_dump          = qca8k_port_fdb_dump,
+       .port_mdb_add           = qca8k_port_mdb_add,
+       .port_mdb_del           = qca8k_port_mdb_del,
+       .port_mirror_add        = qca8k_port_mirror_add,
+       .port_mirror_del        = qca8k_port_mirror_del,
+       .port_vlan_filtering    = qca8k_port_vlan_filtering,
+       .port_vlan_add          = qca8k_port_vlan_add,
+       .port_vlan_del          = qca8k_port_vlan_del,
+       .phylink_get_caps       = qca8k_phylink_get_caps,
+       .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
+       .phylink_mac_config     = qca8k_phylink_mac_config,
+       .phylink_mac_link_down  = qca8k_phylink_mac_link_down,
+       .phylink_mac_link_up    = qca8k_phylink_mac_link_up,
+       .get_phy_flags          = qca8k_get_phy_flags,
+       .port_lag_join          = qca8k_port_lag_join,
+       .port_lag_leave         = qca8k_port_lag_leave,
+       .master_state_change    = qca8k_master_change,
+       .connect_tag_protocol   = qca8k_connect_tag_protocol,
+};
+
+static int
+qca8k_sw_probe(struct mdio_device *mdiodev)
+{
+       struct qca8k_priv *priv;
+       int ret;
+
+       /* allocate the private data struct so that we can probe the switches
+        * ID register
+        */
+       priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->info = of_device_get_match_data(priv->dev);
+       priv->bus = mdiodev->bus;
+       priv->dev = &mdiodev->dev;
+
+       priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
+                                                  GPIOD_ASIS);
+       if (IS_ERR(priv->reset_gpio))
+               return PTR_ERR(priv->reset_gpio);
+
+       if (priv->reset_gpio) {
+               gpiod_set_value_cansleep(priv->reset_gpio, 1);
+               /* The active low duration must be greater than 10 ms
+                * and checkpatch.pl wants 20 ms.
+                */
+               msleep(20);
+               gpiod_set_value_cansleep(priv->reset_gpio, 0);
+       }
+
+       /* Start by setting up the register mapping */
+       priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
+                                       &qca8k_regmap_config);
+       if (IS_ERR(priv->regmap)) {
+               dev_err(priv->dev, "regmap initialization failed");
+               return PTR_ERR(priv->regmap);
+       }
+
+       priv->mdio_cache.page = 0xffff;
+       priv->mdio_cache.lo = 0xffff;
+       priv->mdio_cache.hi = 0xffff;
+
+       /* Check the detected switch id */
+       ret = qca8k_read_switch_id(priv);
+       if (ret)
+               return ret;
+
+       priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+       if (!priv->ds)
+               return -ENOMEM;
+
+       mutex_init(&priv->mgmt_eth_data.mutex);
+       init_completion(&priv->mgmt_eth_data.rw_done);
+
+       mutex_init(&priv->mib_eth_data.mutex);
+       init_completion(&priv->mib_eth_data.rw_done);
+
+       priv->ds->dev = &mdiodev->dev;
+       priv->ds->num_ports = QCA8K_NUM_PORTS;
+       priv->ds->priv = priv;
+       priv->ds->ops = &qca8k_switch_ops;
+       mutex_init(&priv->reg_mutex);
+       dev_set_drvdata(&mdiodev->dev, priv);
+
+       return dsa_register_switch(priv->ds);
+}
+
+static void
+qca8k_sw_remove(struct mdio_device *mdiodev)
+{
+       struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+       int i;
+
+       if (!priv)
+               return;
+
+       for (i = 0; i < QCA8K_NUM_PORTS; i++)
+               qca8k_port_set_status(priv, i, 0);
+
+       dsa_unregister_switch(priv->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
+{
+       struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(priv->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void
+qca8k_set_pm(struct qca8k_priv *priv, int enable)
+{
+       int port;
+
+       for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+               /* Do not enable on resume if the port was
+                * disabled before.
+                */
+               if (!(priv->port_enabled_map & BIT(port)))
+                       continue;
+
+               qca8k_port_set_status(priv, port, enable);
+       }
+}
+
+static int qca8k_suspend(struct device *dev)
+{
+       struct qca8k_priv *priv = dev_get_drvdata(dev);
+
+       qca8k_set_pm(priv, 0);
+
+       return dsa_switch_suspend(priv->ds);
+}
+
+static int qca8k_resume(struct device *dev)
+{
+       struct qca8k_priv *priv = dev_get_drvdata(dev);
+
+       qca8k_set_pm(priv, 1);
+
+       return dsa_switch_resume(priv->ds);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
+                        qca8k_suspend, qca8k_resume);
+
+static const struct qca8k_info_ops qca8xxx_ops = {
+       .autocast_mib = qca8k_get_ethtool_stats_eth,
+       .read_eth = qca8k_read_eth,
+       .write_eth = qca8k_write_eth,
+};
+
+static const struct qca8k_match_data qca8327 = {
+       .id = QCA8K_ID_QCA8327,
+       .reduced_package = true,
+       .mib_count = QCA8K_QCA832X_MIB_COUNT,
+       .ops = &qca8xxx_ops,
+};
+
+static const struct qca8k_match_data qca8328 = {
+       .id = QCA8K_ID_QCA8327,
+       .mib_count = QCA8K_QCA832X_MIB_COUNT,
+       .ops = &qca8xxx_ops,
+};
+
+static const struct qca8k_match_data qca833x = {
+       .id = QCA8K_ID_QCA8337,
+       .mib_count = QCA8K_QCA833X_MIB_COUNT,
+       .ops = &qca8xxx_ops,
+};
+
+static const struct of_device_id qca8k_of_match[] = {
+       { .compatible = "qca,qca8327", .data = &qca8327 },
+       { .compatible = "qca,qca8328", .data = &qca8328 },
+       { .compatible = "qca,qca8334", .data = &qca833x },
+       { .compatible = "qca,qca8337", .data = &qca833x },
+       { /* sentinel */ },
+};
+
+static struct mdio_driver qca8kmdio_driver = {
+       .probe  = qca8k_sw_probe,
+       .remove = qca8k_sw_remove,
+       .shutdown = qca8k_sw_shutdown,
+       .mdiodrv.driver = {
+               .name = "qca8k",
+               .of_match_table = qca8k_of_match,
+               .pm = &qca8k_pm_ops,
+       },
+};
+
+mdio_module_driver(qca8kmdio_driver);
+
+MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qca8k");
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
new file mode 100644 (file)
index 0000000..bba9561
--- /dev/null
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+
+#include "qca8k.h"
+
+#define MIB_DESC(_s, _o, _n)   \
+       {                       \
+               .size = (_s),   \
+               .offset = (_o), \
+               .name = (_n),   \
+       }
+
+const struct qca8k_mib_desc ar8327_mib[] = {
+       MIB_DESC(1, 0x00, "RxBroad"),
+       MIB_DESC(1, 0x04, "RxPause"),
+       MIB_DESC(1, 0x08, "RxMulti"),
+       MIB_DESC(1, 0x0c, "RxFcsErr"),
+       MIB_DESC(1, 0x10, "RxAlignErr"),
+       MIB_DESC(1, 0x14, "RxRunt"),
+       MIB_DESC(1, 0x18, "RxFragment"),
+       MIB_DESC(1, 0x1c, "Rx64Byte"),
+       MIB_DESC(1, 0x20, "Rx128Byte"),
+       MIB_DESC(1, 0x24, "Rx256Byte"),
+       MIB_DESC(1, 0x28, "Rx512Byte"),
+       MIB_DESC(1, 0x2c, "Rx1024Byte"),
+       MIB_DESC(1, 0x30, "Rx1518Byte"),
+       MIB_DESC(1, 0x34, "RxMaxByte"),
+       MIB_DESC(1, 0x38, "RxTooLong"),
+       MIB_DESC(2, 0x3c, "RxGoodByte"),
+       MIB_DESC(2, 0x44, "RxBadByte"),
+       MIB_DESC(1, 0x4c, "RxOverFlow"),
+       MIB_DESC(1, 0x50, "Filtered"),
+       MIB_DESC(1, 0x54, "TxBroad"),
+       MIB_DESC(1, 0x58, "TxPause"),
+       MIB_DESC(1, 0x5c, "TxMulti"),
+       MIB_DESC(1, 0x60, "TxUnderRun"),
+       MIB_DESC(1, 0x64, "Tx64Byte"),
+       MIB_DESC(1, 0x68, "Tx128Byte"),
+       MIB_DESC(1, 0x6c, "Tx256Byte"),
+       MIB_DESC(1, 0x70, "Tx512Byte"),
+       MIB_DESC(1, 0x74, "Tx1024Byte"),
+       MIB_DESC(1, 0x78, "Tx1518Byte"),
+       MIB_DESC(1, 0x7c, "TxMaxByte"),
+       MIB_DESC(1, 0x80, "TxOverSize"),
+       MIB_DESC(2, 0x84, "TxByte"),
+       MIB_DESC(1, 0x8c, "TxCollision"),
+       MIB_DESC(1, 0x90, "TxAbortCol"),
+       MIB_DESC(1, 0x94, "TxMultiCol"),
+       MIB_DESC(1, 0x98, "TxSingleCol"),
+       MIB_DESC(1, 0x9c, "TxExcDefer"),
+       MIB_DESC(1, 0xa0, "TxDefer"),
+       MIB_DESC(1, 0xa4, "TxLateCol"),
+       MIB_DESC(1, 0xa8, "RXUnicast"),
+       MIB_DESC(1, 0xac, "TXUnicast"),
+};
+
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
+{
+       return regmap_read(priv->regmap, reg, val);
+}
+
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+       return regmap_write(priv->regmap, reg, val);
+}
+
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+       return regmap_update_bits(priv->regmap, reg, mask, write_val);
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+       regmap_reg_range(0x0000, 0x00e4), /* Global control */
+       regmap_reg_range(0x0100, 0x0168), /* EEE control */
+       regmap_reg_range(0x0200, 0x0270), /* Parser control */
+       regmap_reg_range(0x0400, 0x0454), /* ACL */
+       regmap_reg_range(0x0600, 0x0718), /* Lookup */
+       regmap_reg_range(0x0800, 0x0b70), /* QM */
+       regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+       regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+       regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+       regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+       regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+       regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+       regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+       regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+       regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+};
+
+const struct regmap_access_table qca8k_readable_table = {
+       .yes_ranges = qca8k_readable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+/* TODO: remove these extra ops when we can support regmap bulk read/write */
+static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+       int i, count = len / sizeof(u32), ret;
+
+       if (priv->mgmt_master && priv->info->ops->read_eth &&
+           !priv->info->ops->read_eth(priv, reg, val, len))
+               return 0;
+
+       for (i = 0; i < count; i++) {
+               ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* TODO: remove these extra ops when we can support regmap bulk read/write */
+static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+       int i, count = len / sizeof(u32), ret;
+       u32 tmp;
+
+       if (priv->mgmt_master && priv->info->ops->write_eth &&
+           !priv->info->ops->write_eth(priv, reg, val, len))
+               return 0;
+
+       for (i = 0; i < count; i++) {
+               tmp = val[i];
+
+               ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+       u32 val;
+
+       return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
+                                      QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
+}
+
+static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+       u32 reg[3];
+       int ret;
+
+       /* load the ARL table into an array */
+       ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+       if (ret)
+               return ret;
+
+       /* vid - 83:72 */
+       fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
+       /* aging - 67:64 */
+       fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
+       /* portmask - 54:48 */
+       fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
+       /* mac - 47:0 */
+       fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
+       fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
+       fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
+       fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
+       fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
+       fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
+
+       return 0;
+}
+
+static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
+                           const u8 *mac, u8 aging)
+{
+       u32 reg[3] = { 0 };
+
+       /* vid - 83:72 */
+       reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
+       /* aging - 67:64 */
+       reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
+       /* portmask - 54:48 */
+       reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
+       /* mac - 47:0 */
+       reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
+       reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
+       reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
+       reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
+       reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
+       reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
+
+       /* load the array into the ARL table */
+       qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+}
+
+static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
+                           int port)
+{
+       u32 reg;
+       int ret;
+
+       /* Set the command and FDB index */
+       reg = QCA8K_ATU_FUNC_BUSY;
+       reg |= cmd;
+       if (port >= 0) {
+               reg |= QCA8K_ATU_FUNC_PORT_EN;
+               reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
+       }
+
+       /* Write the function register triggering the table access */
+       ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+       if (ret)
+               return ret;
+
+       /* wait for completion */
+       ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
+       if (ret)
+               return ret;
+
+       /* Check for table full violation when adding an entry */
+       if (cmd == QCA8K_FDB_LOAD) {
+               ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
+               if (ret < 0)
+                       return ret;
+               if (reg & QCA8K_ATU_FUNC_FULL)
+                       return -1;
+       }
+
+       return 0;
+}
+
+static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
+                         int port)
+{
+       int ret;
+
+       qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+       if (ret < 0)
+               return ret;
+
+       return qca8k_fdb_read(priv, fdb);
+}
+
+static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
+                        u16 port_mask, u16 vid, u8 aging)
+{
+       int ret;
+
+       mutex_lock(&priv->reg_mutex);
+       qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+       mutex_unlock(&priv->reg_mutex);
+
+       return ret;
+}
+
+static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
+                        u16 port_mask, u16 vid)
+{
+       int ret;
+
+       mutex_lock(&priv->reg_mutex);
+       qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+       mutex_unlock(&priv->reg_mutex);
+
+       return ret;
+}
+
+void qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+       mutex_lock(&priv->reg_mutex);
+       qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+       mutex_unlock(&priv->reg_mutex);
+}
+
+static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+                                      const u8 *mac, u16 vid)
+{
+       struct qca8k_fdb fdb = { 0 };
+       int ret;
+
+       mutex_lock(&priv->reg_mutex);
+
+       qca8k_fdb_write(priv, vid, 0, mac, 0);
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+       if (ret < 0)
+               goto exit;
+
+       ret = qca8k_fdb_read(priv, &fdb);
+       if (ret < 0)
+               goto exit;
+
+       /* Rule exist. Delete first */
+       if (!fdb.aging) {
+               ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+               if (ret)
+                       goto exit;
+       }
+
+       /* Add port to fdb portmask */
+       fdb.port_mask |= port_mask;
+
+       qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+       mutex_unlock(&priv->reg_mutex);
+       return ret;
+}
+
+static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
+                                   const u8 *mac, u16 vid)
+{
+       struct qca8k_fdb fdb = { 0 };
+       int ret;
+
+       mutex_lock(&priv->reg_mutex);
+
+       qca8k_fdb_write(priv, vid, 0, mac, 0);
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+       if (ret < 0)
+               goto exit;
+
+       /* Rule doesn't exist. Why delete? */
+       if (!fdb.aging) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+       if (ret)
+               goto exit;
+
+       /* Only port in the rule is this port. Don't re insert */
+       if (fdb.port_mask == port_mask)
+               goto exit;
+
+       /* Remove port from port mask */
+       fdb.port_mask &= ~port_mask;
+
+       qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+       mutex_unlock(&priv->reg_mutex);
+       return ret;
+}
+
+static int qca8k_vlan_access(struct qca8k_priv *priv,
+                            enum qca8k_vlan_cmd cmd, u16 vid)
+{
+       u32 reg;
+       int ret;
+
+       /* Set the command and VLAN index */
+       reg = QCA8K_VTU_FUNC1_BUSY;
+       reg |= cmd;
+       reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
+
+       /* Write the function register triggering the table access */
+       ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+       if (ret)
+               return ret;
+
+       /* wait for completion */
+       ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
+       if (ret)
+               return ret;
+
+       /* Check for table full violation when adding an entry */
+       if (cmd == QCA8K_VLAN_LOAD) {
+               ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
+               if (ret < 0)
+                       return ret;
+               if (reg & QCA8K_VTU_FUNC1_FULL)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
+                         bool untagged)
+{
+       u32 reg;
+       int ret;
+
+       /* We do the right thing with VLAN 0 and treat it as untagged while
+        * preserving the tag on egress.
+        */
+       if (vid == 0)
+               return 0;
+
+       mutex_lock(&priv->reg_mutex);
+       ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+       if (ret < 0)
+               goto out;
+
+       ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+       if (ret < 0)
+               goto out;
+       reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
+       reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+       if (untagged)
+               reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
+       else
+               reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
+
+       ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+       if (ret)
+               goto out;
+       ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+
+out:
+       mutex_unlock(&priv->reg_mutex);
+
+       return ret;
+}
+
+static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
+{
+       u32 reg, mask;
+       int ret, i;
+       bool del;
+
+       mutex_lock(&priv->reg_mutex);
+       ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+       if (ret < 0)
+               goto out;
+
+       ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+       if (ret < 0)
+               goto out;
+       reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+       reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
+
+       /* Check if we're the last member to be removed */
+       del = true;
+       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+               mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
+
+               if ((reg & mask) != mask) {
+                       del = false;
+                       break;
+               }
+       }
+
+       if (del) {
+               ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
+       } else {
+               ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+               if (ret)
+                       goto out;
+               ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+       }
+
+out:
+       mutex_unlock(&priv->reg_mutex);
+
+       return ret;
+}
+
+int qca8k_mib_init(struct qca8k_priv *priv)
+{
+       int ret;
+
+       mutex_lock(&priv->reg_mutex);
+       ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+                                QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+                                FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
+                                QCA8K_MIB_BUSY);
+       if (ret)
+               goto exit;
+
+       ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+       if (ret)
+               goto exit;
+
+       ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+       if (ret)
+               goto exit;
+
+       ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+
+exit:
+       mutex_unlock(&priv->reg_mutex);
+       return ret;
+}
+
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+       u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+       /* Port 0 and 6 have no internal PHY */
+       if (port > 0 && port < 6)
+               mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+       if (enable)
+               regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+       else
+               regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+                      uint8_t *data)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int i;
+
+       if (stringset != ETH_SS_STATS)
+               return;
+
+       for (i = 0; i < priv->info->mib_count; i++)
+               strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+                       ETH_GSTRING_LEN);
+}
+
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+                            uint64_t *data)
+{
+       struct qca8k_priv *priv = ds->priv;
+       const struct qca8k_mib_desc *mib;
+       u32 reg, i, val;
+       u32 hi = 0;
+       int ret;
+
+       if (priv->mgmt_master && priv->info->ops->autocast_mib &&
+           priv->info->ops->autocast_mib(ds, port, data) > 0)
+               return;
+
+       for (i = 0; i < priv->info->mib_count; i++) {
+               mib = &ar8327_mib[i];
+               reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+               ret = qca8k_read(priv, reg, &val);
+               if (ret < 0)
+                       continue;
+
+               if (mib->size == 2) {
+                       ret = qca8k_read(priv, reg + 4, &hi);
+                       if (ret < 0)
+                               continue;
+               }
+
+               data[i] = val;
+               if (mib->size == 2)
+                       data[i] |= (u64)hi << 32;
+       }
+}
+
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+       struct qca8k_priv *priv = ds->priv;
+
+       if (sset != ETH_SS_STATS)
+               return 0;
+
+       return priv->info->mib_count;
+}
+
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
+                     struct ethtool_eee *eee)
+{
+       u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+       struct qca8k_priv *priv = ds->priv;
+       u32 reg;
+       int ret;
+
+       mutex_lock(&priv->reg_mutex);
+       ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
+       if (ret < 0)
+               goto exit;
+
+       if (eee->eee_enabled)
+               reg |= lpi_en;
+       else
+               reg &= ~lpi_en;
+       ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+
+exit:
+       mutex_unlock(&priv->reg_mutex);
+       return ret;
+}
+
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
+                     struct ethtool_eee *e)
+{
+       /* Nothing to do on the port's MAC */
+       return 0;
+}
+
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+       struct qca8k_priv *priv = ds->priv;
+       u32 stp_state;
+
+       switch (state) {
+       case BR_STATE_DISABLED:
+               stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+               break;
+       case BR_STATE_BLOCKING:
+               stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+               break;
+       case BR_STATE_LISTENING:
+               stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+               break;
+       case BR_STATE_LEARNING:
+               stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+               break;
+       case BR_STATE_FORWARDING:
+       default:
+               stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+               break;
+       }
+
+       qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+                 QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+}
+
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+                          struct dsa_bridge bridge,
+                          bool *tx_fwd_offload,
+                          struct netlink_ext_ack *extack)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int port_mask, cpu_port;
+       int i, ret;
+
+       cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+       port_mask = BIT(cpu_port);
+
+       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+               if (dsa_is_cpu_port(ds, i))
+                       continue;
+               if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+                       continue;
+               /* Add this port to the portvlan mask of the other ports
+                * in the bridge
+                */
+               ret = regmap_set_bits(priv->regmap,
+                                     QCA8K_PORT_LOOKUP_CTRL(i),
+                                     BIT(port));
+               if (ret)
+                       return ret;
+               if (i != port)
+                       port_mask |= BIT(i);
+       }
+
+       /* Add all other ports to this ports portvlan mask */
+       ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+                       QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+       return ret;
+}
+
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+                            struct dsa_bridge bridge)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int cpu_port, i;
+
+       cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+               if (dsa_is_cpu_port(ds, i))
+                       continue;
+               if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+                       continue;
+               /* Remove this port to the portvlan mask of the other ports
+                * in the bridge
+                */
+               regmap_clear_bits(priv->regmap,
+                                 QCA8K_PORT_LOOKUP_CTRL(i),
+                                 BIT(port));
+       }
+
+       /* Set the cpu port to be the only one in the portvlan mask of
+        * this port
+        */
+       qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+                 QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+}
+
+void qca8k_port_fast_age(struct dsa_switch *ds, int port)
+{
+       struct qca8k_priv *priv = ds->priv;
+
+       mutex_lock(&priv->reg_mutex);
+       qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
+       mutex_unlock(&priv->reg_mutex);
+}
+
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+       struct qca8k_priv *priv = ds->priv;
+       unsigned int secs = msecs / 1000;
+       u32 val;
+
+       /* AGE_TIME reg is set in 7s step */
+       val = secs / 7;
+
+       /* Handle case with 0 as val to NOT disable
+        * learning
+        */
+       if (!val)
+               val = 1;
+
+       return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
+                                 QCA8K_ATU_AGE_TIME_MASK,
+                                 QCA8K_ATU_AGE_TIME(val));
+}
+
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+                     struct phy_device *phy)
+{
+       struct qca8k_priv *priv = ds->priv;
+
+       qca8k_port_set_status(priv, port, 1);
+       priv->port_enabled_map |= BIT(port);
+
+       if (dsa_is_user_port(ds, port))
+               phy_support_asym_pause(phy);
+
+       return 0;
+}
+
+void qca8k_port_disable(struct dsa_switch *ds, int port)
+{
+       struct qca8k_priv *priv = ds->priv;
+
+       qca8k_port_set_status(priv, port, 0);
+       priv->port_enabled_map &= ~BIT(port);
+}
+
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int ret;
+
+       /* We have only have a general MTU setting.
+        * DSA always set the CPU port's MTU to the largest MTU of the slave
+        * ports.
+        * Setting MTU just for the CPU port is sufficient to correctly set a
+        * value for every port.
+        */
+       if (!dsa_is_cpu_port(ds, port))
+               return 0;
+
+       /* To change the MAX_FRAME_SIZE the cpu ports must be off or
+        * the switch panics.
+        * Turn off both cpu ports before applying the new value to prevent
+        * this.
+        */
+       if (priv->port_enabled_map & BIT(0))
+               qca8k_port_set_status(priv, 0, 0);
+
+       if (priv->port_enabled_map & BIT(6))
+               qca8k_port_set_status(priv, 6, 0);
+
+       /* Include L2 header / FCS length */
+       ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
+                         ETH_HLEN + ETH_FCS_LEN);
+
+       if (priv->port_enabled_map & BIT(0))
+               qca8k_port_set_status(priv, 0, 1);
+
+       if (priv->port_enabled_map & BIT(6))
+               qca8k_port_set_status(priv, 6, 1);
+
+       return ret;
+}
+
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
+{
+       return QCA8K_MAX_MTU;
+}
+
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+                         u16 port_mask, u16 vid)
+{
+       /* Set the vid to the port vlan id if no vid is set */
+       if (!vid)
+               vid = QCA8K_PORT_VID_DEF;
+
+       return qca8k_fdb_add(priv, addr, port_mask, vid,
+                            QCA8K_ATU_STATUS_STATIC);
+}
+
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+                      const unsigned char *addr, u16 vid,
+                      struct dsa_db db)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       u16 port_mask = BIT(port);
+
+       return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+                      const unsigned char *addr, u16 vid,
+                      struct dsa_db db)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       u16 port_mask = BIT(port);
+
+       if (!vid)
+               vid = QCA8K_PORT_VID_DEF;
+
+       return qca8k_fdb_del(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+                       dsa_fdb_dump_cb_t *cb, void *data)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       struct qca8k_fdb _fdb = { 0 };
+       int cnt = QCA8K_NUM_FDB_RECORDS;
+       bool is_static;
+       int ret = 0;
+
+       mutex_lock(&priv->reg_mutex);
+       while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+               if (!_fdb.aging)
+                       break;
+               is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
+               ret = cb(_fdb.mac, _fdb.vid, is_static, data);
+               if (ret)
+                       break;
+       }
+       mutex_unlock(&priv->reg_mutex);
+
+       return 0;
+}
+
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+                      const struct switchdev_obj_port_mdb *mdb,
+                      struct dsa_db db)
+{
+       struct qca8k_priv *priv = ds->priv;
+       const u8 *addr = mdb->addr;
+       u16 vid = mdb->vid;
+
+       return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+                      const struct switchdev_obj_port_mdb *mdb,
+                      struct dsa_db db)
+{
+       struct qca8k_priv *priv = ds->priv;
+       const u8 *addr = mdb->addr;
+       u16 vid = mdb->vid;
+
+       return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+                         struct dsa_mall_mirror_tc_entry *mirror,
+                         bool ingress, struct netlink_ext_ack *extack)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int monitor_port, ret;
+       u32 reg, val;
+
+       /* Check for existent entry */
+       if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+               return -EEXIST;
+
+       ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
+       if (ret)
+               return ret;
+
+       /* QCA83xx can have only one port set to mirror mode.
+        * Check that the correct port is requested and return error otherwise.
+        * When no mirror port is set, the values is set to 0xF
+        */
+       monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+       if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
+               return -EEXIST;
+
+       /* Set the monitor port */
+       val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
+                        mirror->to_local_port);
+       ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+                                QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+       if (ret)
+               return ret;
+
+       if (ingress) {
+               reg = QCA8K_PORT_LOOKUP_CTRL(port);
+               val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+       } else {
+               reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+               val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+       }
+
+       ret = regmap_update_bits(priv->regmap, reg, val, val);
+       if (ret)
+               return ret;
+
+       /* Track mirror port for tx and rx to decide when the
+        * mirror port has to be disabled.
+        */
+       if (ingress)
+               priv->mirror_rx |= BIT(port);
+       else
+               priv->mirror_tx |= BIT(port);
+
+       return 0;
+}
+
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+                          struct dsa_mall_mirror_tc_entry *mirror)
+{
+       struct qca8k_priv *priv = ds->priv;
+       u32 reg, val;
+       int ret;
+
+       if (mirror->ingress) {
+               reg = QCA8K_PORT_LOOKUP_CTRL(port);
+               val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+       } else {
+               reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+               val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+       }
+
+       ret = regmap_clear_bits(priv->regmap, reg, val);
+       if (ret)
+               goto err;
+
+       if (mirror->ingress)
+               priv->mirror_rx &= ~BIT(port);
+       else
+               priv->mirror_tx &= ~BIT(port);
+
+       /* No port set to send packet to mirror port. Disable mirror port */
+       if (!priv->mirror_rx && !priv->mirror_tx) {
+               val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
+               ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+                                        QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+               if (ret)
+                       goto err;
+       }
+err:
+       dev_err(priv->dev, "Failed to del mirror port from %d", port);
+}
+
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
+                             bool vlan_filtering,
+                             struct netlink_ext_ack *extack)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int ret;
+
+       if (vlan_filtering) {
+               ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+                               QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+                               QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+       } else {
+               ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+                               QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+                               QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+       }
+
+       return ret;
+}
+
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+                       const struct switchdev_obj_port_vlan *vlan,
+                       struct netlink_ext_ack *extack)
+{
+       bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+       bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+       struct qca8k_priv *priv = ds->priv;
+       int ret;
+
+       ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+       if (ret) {
+               dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+               return ret;
+       }
+
+       if (pvid) {
+               ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+                               QCA8K_EGREES_VLAN_PORT_MASK(port),
+                               QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
+               if (ret)
+                       return ret;
+
+               ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+                                 QCA8K_PORT_VLAN_CVID(vlan->vid) |
+                                 QCA8K_PORT_VLAN_SVID(vlan->vid));
+       }
+
+       return ret;
+}
+
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+                       const struct switchdev_obj_port_vlan *vlan)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int ret;
+
+       ret = qca8k_vlan_del(priv, port, vlan->vid);
+       if (ret)
+               dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
+
+       return ret;
+}
+
+static bool qca8k_lag_can_offload(struct dsa_switch *ds,
+                                 struct dsa_lag lag,
+                                 struct netdev_lag_upper_info *info)
+{
+       struct dsa_port *dp;
+       int members = 0;
+
+       if (!lag.id)
+               return false;
+
+       dsa_lag_foreach_port(dp, ds->dst, &lag)
+               /* Includes the port joining the LAG */
+               members++;
+
+       if (members > QCA8K_NUM_PORTS_FOR_LAG)
+               return false;
+
+       if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+               return false;
+
+       if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+           info->hash_type != NETDEV_LAG_HASH_L23)
+               return false;
+
+       return true;
+}
+
+static int qca8k_lag_setup_hash(struct dsa_switch *ds,
+                               struct dsa_lag lag,
+                               struct netdev_lag_upper_info *info)
+{
+       struct net_device *lag_dev = lag.dev;
+       struct qca8k_priv *priv = ds->priv;
+       bool unique_lag = true;
+       unsigned int i;
+       u32 hash = 0;
+
+       switch (info->hash_type) {
+       case NETDEV_LAG_HASH_L23:
+               hash |= QCA8K_TRUNK_HASH_SIP_EN;
+               hash |= QCA8K_TRUNK_HASH_DIP_EN;
+               fallthrough;
+       case NETDEV_LAG_HASH_L2:
+               hash |= QCA8K_TRUNK_HASH_SA_EN;
+               hash |= QCA8K_TRUNK_HASH_DA_EN;
+               break;
+       default: /* We should NEVER reach this */
+               return -EOPNOTSUPP;
+       }
+
+       /* Check if we are the unique configured LAG */
+       dsa_lags_foreach_id(i, ds->dst)
+               if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
+                       unique_lag = false;
+                       break;
+               }
+
+       /* Hash Mode is global. Make sure the same Hash Mode
+        * is set to all the 4 possible lag.
+        * If we are the unique LAG we can set whatever hash
+        * mode we want.
+        * To change hash mode it's needed to remove all LAG
+        * and change the mode with the latest.
+        */
+       if (unique_lag) {
+               priv->lag_hash_mode = hash;
+       } else if (priv->lag_hash_mode != hash) {
+               netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
+               return -EOPNOTSUPP;
+       }
+
+       return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
+                                 QCA8K_TRUNK_HASH_MASK, hash);
+}
+
+static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
+                                    struct dsa_lag lag, bool delete)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int ret, id, i;
+       u32 val;
+
+       /* DSA LAG IDs are one-based, hardware is zero-based */
+       id = lag.id - 1;
+
+       /* Read current port member */
+       ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
+       if (ret)
+               return ret;
+
+       /* Shift val to the correct trunk */
+       val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
+       val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
+       if (delete)
+               val &= ~BIT(port);
+       else
+               val |= BIT(port);
+
+       /* Update port member. With empty portmap disable trunk */
+       ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
+                                QCA8K_REG_GOL_TRUNK_MEMBER(id) |
+                                QCA8K_REG_GOL_TRUNK_EN(id),
+                                !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
+                                val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
+
+       /* Search empty member if adding or port on deleting */
+       for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
+               ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
+               if (ret)
+                       return ret;
+
+               val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
+               val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
+
+               if (delete) {
+                       /* If port flagged to be disabled assume this member is
+                        * empty
+                        */
+                       if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+                               continue;
+
+                       val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
+                       if (val != port)
+                               continue;
+               } else {
+                       /* If port flagged to be enabled assume this member is
+                        * already set
+                        */
+                       if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+                               continue;
+               }
+
+               /* We have found the member to add/remove */
+               break;
+       }
+
+       /* Set port in the correct port mask or disable port if in delete mode */
+       return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
+                                 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
+                                 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
+                                 !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
+                                 port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
+}
+
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+                       struct netdev_lag_upper_info *info)
+{
+       int ret;
+
+       if (!qca8k_lag_can_offload(ds, lag, info))
+               return -EOPNOTSUPP;
+
+       ret = qca8k_lag_setup_hash(ds, lag, info);
+       if (ret)
+               return ret;
+
+       return qca8k_lag_refresh_portmap(ds, port, lag, false);
+}
+
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+                        struct dsa_lag lag)
+{
+       return qca8k_lag_refresh_portmap(ds, port, lag, true);
+}
+
+int qca8k_read_switch_id(struct qca8k_priv *priv)
+{
+       u32 val;
+       u8 id;
+       int ret;
+
+       if (!priv->info)
+               return -ENODEV;
+
+       ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
+       if (ret < 0)
+               return -ENODEV;
+
+       id = QCA8K_MASK_CTRL_DEVICE_ID(val);
+       if (id != priv->info->id) {
+               dev_err(priv->dev,
+                       "Switch id detected %x but expected %x",
+                       id, priv->info->id);
+               return -ENODEV;
+       }
+
+       priv->switch_id = id;
+
+       /* Save revision to communicate to the internal PHY driver */
+       priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
+
+       return 0;
+}
diff --git a/drivers/net/dsa/qca/qca8k.c b/drivers/net/dsa/qca/qca8k.c
deleted file mode 100644 (file)
index 1cbb05b..0000000
+++ /dev/null
@@ -1,3299 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2016 John Crispin <john@phrozen.org>
- */
-
-#include <linux/module.h>
-#include <linux/phy.h>
-#include <linux/netdevice.h>
-#include <linux/bitfield.h>
-#include <linux/regmap.h>
-#include <net/dsa.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
-#include <linux/if_bridge.h>
-#include <linux/mdio.h>
-#include <linux/phylink.h>
-#include <linux/gpio/consumer.h>
-#include <linux/etherdevice.h>
-#include <linux/dsa/tag_qca.h>
-
-#include "qca8k.h"
-
-#define MIB_DESC(_s, _o, _n)   \
-       {                       \
-               .size = (_s),   \
-               .offset = (_o), \
-               .name = (_n),   \
-       }
-
-static const struct qca8k_mib_desc ar8327_mib[] = {
-       MIB_DESC(1, 0x00, "RxBroad"),
-       MIB_DESC(1, 0x04, "RxPause"),
-       MIB_DESC(1, 0x08, "RxMulti"),
-       MIB_DESC(1, 0x0c, "RxFcsErr"),
-       MIB_DESC(1, 0x10, "RxAlignErr"),
-       MIB_DESC(1, 0x14, "RxRunt"),
-       MIB_DESC(1, 0x18, "RxFragment"),
-       MIB_DESC(1, 0x1c, "Rx64Byte"),
-       MIB_DESC(1, 0x20, "Rx128Byte"),
-       MIB_DESC(1, 0x24, "Rx256Byte"),
-       MIB_DESC(1, 0x28, "Rx512Byte"),
-       MIB_DESC(1, 0x2c, "Rx1024Byte"),
-       MIB_DESC(1, 0x30, "Rx1518Byte"),
-       MIB_DESC(1, 0x34, "RxMaxByte"),
-       MIB_DESC(1, 0x38, "RxTooLong"),
-       MIB_DESC(2, 0x3c, "RxGoodByte"),
-       MIB_DESC(2, 0x44, "RxBadByte"),
-       MIB_DESC(1, 0x4c, "RxOverFlow"),
-       MIB_DESC(1, 0x50, "Filtered"),
-       MIB_DESC(1, 0x54, "TxBroad"),
-       MIB_DESC(1, 0x58, "TxPause"),
-       MIB_DESC(1, 0x5c, "TxMulti"),
-       MIB_DESC(1, 0x60, "TxUnderRun"),
-       MIB_DESC(1, 0x64, "Tx64Byte"),
-       MIB_DESC(1, 0x68, "Tx128Byte"),
-       MIB_DESC(1, 0x6c, "Tx256Byte"),
-       MIB_DESC(1, 0x70, "Tx512Byte"),
-       MIB_DESC(1, 0x74, "Tx1024Byte"),
-       MIB_DESC(1, 0x78, "Tx1518Byte"),
-       MIB_DESC(1, 0x7c, "TxMaxByte"),
-       MIB_DESC(1, 0x80, "TxOverSize"),
-       MIB_DESC(2, 0x84, "TxByte"),
-       MIB_DESC(1, 0x8c, "TxCollision"),
-       MIB_DESC(1, 0x90, "TxAbortCol"),
-       MIB_DESC(1, 0x94, "TxMultiCol"),
-       MIB_DESC(1, 0x98, "TxSingleCol"),
-       MIB_DESC(1, 0x9c, "TxExcDefer"),
-       MIB_DESC(1, 0xa0, "TxDefer"),
-       MIB_DESC(1, 0xa4, "TxLateCol"),
-       MIB_DESC(1, 0xa8, "RXUnicast"),
-       MIB_DESC(1, 0xac, "TXUnicast"),
-};
-
-static void
-qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
-{
-       regaddr >>= 1;
-       *r1 = regaddr & 0x1e;
-
-       regaddr >>= 5;
-       *r2 = regaddr & 0x7;
-
-       regaddr >>= 3;
-       *page = regaddr & 0x3ff;
-}
-
-static int
-qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
-{
-       u16 *cached_lo = &priv->mdio_cache.lo;
-       struct mii_bus *bus = priv->bus;
-       int ret;
-
-       if (lo == *cached_lo)
-               return 0;
-
-       ret = bus->write(bus, phy_id, regnum, lo);
-       if (ret < 0)
-               dev_err_ratelimited(&bus->dev,
-                                   "failed to write qca8k 32bit lo register\n");
-
-       *cached_lo = lo;
-       return 0;
-}
-
-static int
-qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
-{
-       u16 *cached_hi = &priv->mdio_cache.hi;
-       struct mii_bus *bus = priv->bus;
-       int ret;
-
-       if (hi == *cached_hi)
-               return 0;
-
-       ret = bus->write(bus, phy_id, regnum, hi);
-       if (ret < 0)
-               dev_err_ratelimited(&bus->dev,
-                                   "failed to write qca8k 32bit hi register\n");
-
-       *cached_hi = hi;
-       return 0;
-}
-
-static int
-qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
-{
-       int ret;
-
-       ret = bus->read(bus, phy_id, regnum);
-       if (ret >= 0) {
-               *val = ret;
-               ret = bus->read(bus, phy_id, regnum + 1);
-               *val |= ret << 16;
-       }
-
-       if (ret < 0) {
-               dev_err_ratelimited(&bus->dev,
-                                   "failed to read qca8k 32bit register\n");
-               *val = 0;
-               return ret;
-       }
-
-       return 0;
-}
-
-static void
-qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
-{
-       u16 lo, hi;
-       int ret;
-
-       lo = val & 0xffff;
-       hi = (u16)(val >> 16);
-
-       ret = qca8k_set_lo(priv, phy_id, regnum, lo);
-       if (ret >= 0)
-               ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
-}
-
-static int
-qca8k_set_page(struct qca8k_priv *priv, u16 page)
-{
-       u16 *cached_page = &priv->mdio_cache.page;
-       struct mii_bus *bus = priv->bus;
-       int ret;
-
-       if (page == *cached_page)
-               return 0;
-
-       ret = bus->write(bus, 0x18, 0, page);
-       if (ret < 0) {
-               dev_err_ratelimited(&bus->dev,
-                                   "failed to set qca8k page\n");
-               return ret;
-       }
-
-       *cached_page = page;
-       usleep_range(1000, 2000);
-       return 0;
-}
-
-static int
-qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
-{
-       return regmap_read(priv->regmap, reg, val);
-}
-
-static int
-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
-{
-       return regmap_write(priv->regmap, reg, val);
-}
-
-static int
-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
-{
-       return regmap_update_bits(priv->regmap, reg, mask, write_val);
-}
-
-static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
-{
-       struct qca8k_mgmt_eth_data *mgmt_eth_data;
-       struct qca8k_priv *priv = ds->priv;
-       struct qca_mgmt_ethhdr *mgmt_ethhdr;
-       u8 len, cmd;
-
-       mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
-       mgmt_eth_data = &priv->mgmt_eth_data;
-
-       cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
-       len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
-
-       /* Make sure the seq match the requested packet */
-       if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
-               mgmt_eth_data->ack = true;
-
-       if (cmd == MDIO_READ) {
-               mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
-
-               /* Get the rest of the 12 byte of data.
-                * The read/write function will extract the requested data.
-                */
-               if (len > QCA_HDR_MGMT_DATA1_LEN)
-                       memcpy(mgmt_eth_data->data + 1, skb->data,
-                              QCA_HDR_MGMT_DATA2_LEN);
-       }
-
-       complete(&mgmt_eth_data->rw_done);
-}
-
-static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
-                                              int priority, unsigned int len)
-{
-       struct qca_mgmt_ethhdr *mgmt_ethhdr;
-       unsigned int real_len;
-       struct sk_buff *skb;
-       u32 *data2;
-       u16 hdr;
-
-       skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
-       if (!skb)
-               return NULL;
-
-       /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
-        * Actually for some reason the steps are:
-        * 0: nothing
-        * 1-4: first 4 byte
-        * 5-6: first 12 byte
-        * 7-15: all 16 byte
-        */
-       if (len == 16)
-               real_len = 15;
-       else
-               real_len = len;
-
-       skb_reset_mac_header(skb);
-       skb_set_network_header(skb, skb->len);
-
-       mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
-
-       hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
-       hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
-       hdr |= QCA_HDR_XMIT_FROM_CPU;
-       hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
-       hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
-
-       mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
-       mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
-       mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
-       mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
-                                          QCA_HDR_MGMT_CHECK_CODE_VAL);
-
-       if (cmd == MDIO_WRITE)
-               mgmt_ethhdr->mdio_data = *val;
-
-       mgmt_ethhdr->hdr = htons(hdr);
-
-       data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
-       if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
-               memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
-
-       return skb;
-}
-
-static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
-{
-       struct qca_mgmt_ethhdr *mgmt_ethhdr;
-
-       mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
-       mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
-}
-
-static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
-       struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
-       struct sk_buff *skb;
-       bool ack;
-       int ret;
-
-       skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
-                                     QCA8K_ETHERNET_MDIO_PRIORITY, len);
-       if (!skb)
-               return -ENOMEM;
-
-       mutex_lock(&mgmt_eth_data->mutex);
-
-       /* Check mgmt_master if is operational */
-       if (!priv->mgmt_master) {
-               kfree_skb(skb);
-               mutex_unlock(&mgmt_eth_data->mutex);
-               return -EINVAL;
-       }
-
-       skb->dev = priv->mgmt_master;
-
-       reinit_completion(&mgmt_eth_data->rw_done);
-
-       /* Increment seq_num and set it in the mdio pkt */
-       mgmt_eth_data->seq++;
-       qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
-       mgmt_eth_data->ack = false;
-
-       dev_queue_xmit(skb);
-
-       ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-                                         msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
-
-       *val = mgmt_eth_data->data[0];
-       if (len > QCA_HDR_MGMT_DATA1_LEN)
-               memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
-
-       ack = mgmt_eth_data->ack;
-
-       mutex_unlock(&mgmt_eth_data->mutex);
-
-       if (ret <= 0)
-               return -ETIMEDOUT;
-
-       if (!ack)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
-       struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
-       struct sk_buff *skb;
-       bool ack;
-       int ret;
-
-       skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
-                                     QCA8K_ETHERNET_MDIO_PRIORITY, len);
-       if (!skb)
-               return -ENOMEM;
-
-       mutex_lock(&mgmt_eth_data->mutex);
-
-       /* Check mgmt_master if is operational */
-       if (!priv->mgmt_master) {
-               kfree_skb(skb);
-               mutex_unlock(&mgmt_eth_data->mutex);
-               return -EINVAL;
-       }
-
-       skb->dev = priv->mgmt_master;
-
-       reinit_completion(&mgmt_eth_data->rw_done);
-
-       /* Increment seq_num and set it in the mdio pkt */
-       mgmt_eth_data->seq++;
-       qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
-       mgmt_eth_data->ack = false;
-
-       dev_queue_xmit(skb);
-
-       ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-                                         msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
-
-       ack = mgmt_eth_data->ack;
-
-       mutex_unlock(&mgmt_eth_data->mutex);
-
-       if (ret <= 0)
-               return -ETIMEDOUT;
-
-       if (!ack)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int
-qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
-{
-       u32 val = 0;
-       int ret;
-
-       ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
-       if (ret)
-               return ret;
-
-       val &= ~mask;
-       val |= write_val;
-
-       return qca8k_write_eth(priv, reg, &val, sizeof(val));
-}
-
-static int
-qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
-       int i, count = len / sizeof(u32), ret;
-
-       if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
-               return 0;
-
-       for (i = 0; i < count; i++) {
-               ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int
-qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
-       int i, count = len / sizeof(u32), ret;
-       u32 tmp;
-
-       if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
-               return 0;
-
-       for (i = 0; i < count; i++) {
-               tmp = val[i];
-
-               ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int
-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
-       struct mii_bus *bus = priv->bus;
-       u16 r1, r2, page;
-       int ret;
-
-       if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
-               return 0;
-
-       qca8k_split_addr(reg, &r1, &r2, &page);
-
-       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
-       ret = qca8k_set_page(priv, page);
-       if (ret < 0)
-               goto exit;
-
-       ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
-
-exit:
-       mutex_unlock(&bus->mdio_lock);
-       return ret;
-}
-
-static int
-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
-       struct mii_bus *bus = priv->bus;
-       u16 r1, r2, page;
-       int ret;
-
-       if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
-               return 0;
-
-       qca8k_split_addr(reg, &r1, &r2, &page);
-
-       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
-       ret = qca8k_set_page(priv, page);
-       if (ret < 0)
-               goto exit;
-
-       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
-
-exit:
-       mutex_unlock(&bus->mdio_lock);
-       return ret;
-}
-
-static int
-qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
-       struct mii_bus *bus = priv->bus;
-       u16 r1, r2, page;
-       u32 val;
-       int ret;
-
-       if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
-               return 0;
-
-       qca8k_split_addr(reg, &r1, &r2, &page);
-
-       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
-       ret = qca8k_set_page(priv, page);
-       if (ret < 0)
-               goto exit;
-
-       ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
-       if (ret < 0)
-               goto exit;
-
-       val &= ~mask;
-       val |= write_val;
-       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
-
-exit:
-       mutex_unlock(&bus->mdio_lock);
-
-       return ret;
-}
-
-static const struct regmap_range qca8k_readable_ranges[] = {
-       regmap_reg_range(0x0000, 0x00e4), /* Global control */
-       regmap_reg_range(0x0100, 0x0168), /* EEE control */
-       regmap_reg_range(0x0200, 0x0270), /* Parser control */
-       regmap_reg_range(0x0400, 0x0454), /* ACL */
-       regmap_reg_range(0x0600, 0x0718), /* Lookup */
-       regmap_reg_range(0x0800, 0x0b70), /* QM */
-       regmap_reg_range(0x0c00, 0x0c80), /* PKT */
-       regmap_reg_range(0x0e00, 0x0e98), /* L3 */
-       regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
-       regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
-       regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
-       regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
-       regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
-       regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
-       regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
-
-};
-
-static const struct regmap_access_table qca8k_readable_table = {
-       .yes_ranges = qca8k_readable_ranges,
-       .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
-};
-
-static struct regmap_config qca8k_regmap_config = {
-       .reg_bits = 16,
-       .val_bits = 32,
-       .reg_stride = 4,
-       .max_register = 0x16ac, /* end MIB - Port6 range */
-       .reg_read = qca8k_regmap_read,
-       .reg_write = qca8k_regmap_write,
-       .reg_update_bits = qca8k_regmap_update_bits,
-       .rd_table = &qca8k_readable_table,
-       .disable_locking = true, /* Locking is handled by qca8k read/write */
-       .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
-};
-
-static int
-qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
-{
-       u32 val;
-
-       return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
-                                      QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
-}
-
-static int
-qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
-{
-       u32 reg[3];
-       int ret;
-
-       /* load the ARL table into an array */
-       ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
-       if (ret)
-               return ret;
-
-       /* vid - 83:72 */
-       fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
-       /* aging - 67:64 */
-       fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
-       /* portmask - 54:48 */
-       fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
-       /* mac - 47:0 */
-       fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
-       fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
-       fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
-       fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
-       fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
-       fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
-
-       return 0;
-}
-
-static void
-qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
-               u8 aging)
-{
-       u32 reg[3] = { 0 };
-
-       /* vid - 83:72 */
-       reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
-       /* aging - 67:64 */
-       reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
-       /* portmask - 54:48 */
-       reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
-       /* mac - 47:0 */
-       reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
-       reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
-       reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
-       reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
-       reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
-       reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
-
-       /* load the array into the ARL table */
-       qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
-}
-
-static int
-qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
-{
-       u32 reg;
-       int ret;
-
-       /* Set the command and FDB index */
-       reg = QCA8K_ATU_FUNC_BUSY;
-       reg |= cmd;
-       if (port >= 0) {
-               reg |= QCA8K_ATU_FUNC_PORT_EN;
-               reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
-       }
-
-       /* Write the function register triggering the table access */
-       ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
-       if (ret)
-               return ret;
-
-       /* wait for completion */
-       ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
-       if (ret)
-               return ret;
-
-       /* Check for table full violation when adding an entry */
-       if (cmd == QCA8K_FDB_LOAD) {
-               ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
-               if (ret < 0)
-                       return ret;
-               if (reg & QCA8K_ATU_FUNC_FULL)
-                       return -1;
-       }
-
-       return 0;
-}
-
-static int
-qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
-{
-       int ret;
-
-       qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
-       ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
-       if (ret < 0)
-               return ret;
-
-       return qca8k_fdb_read(priv, fdb);
-}
-
-static int
-qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
-             u16 vid, u8 aging)
-{
-       int ret;
-
-       mutex_lock(&priv->reg_mutex);
-       qca8k_fdb_write(priv, vid, port_mask, mac, aging);
-       ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-       mutex_unlock(&priv->reg_mutex);
-
-       return ret;
-}
-
-static int
-qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
-{
-       int ret;
-
-       mutex_lock(&priv->reg_mutex);
-       qca8k_fdb_write(priv, vid, port_mask, mac, 0);
-       ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
-       mutex_unlock(&priv->reg_mutex);
-
-       return ret;
-}
-
-static void
-qca8k_fdb_flush(struct qca8k_priv *priv)
-{
-       mutex_lock(&priv->reg_mutex);
-       qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
-       mutex_unlock(&priv->reg_mutex);
-}
-
-static int
-qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
-                           const u8 *mac, u16 vid)
-{
-       struct qca8k_fdb fdb = { 0 };
-       int ret;
-
-       mutex_lock(&priv->reg_mutex);
-
-       qca8k_fdb_write(priv, vid, 0, mac, 0);
-       ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
-       if (ret < 0)
-               goto exit;
-
-       ret = qca8k_fdb_read(priv, &fdb);
-       if (ret < 0)
-               goto exit;
-
-       /* Rule exist. Delete first */
-       if (!fdb.aging) {
-               ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
-               if (ret)
-                       goto exit;
-       }
-
-       /* Add port to fdb portmask */
-       fdb.port_mask |= port_mask;
-
-       qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
-       ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-
-exit:
-       mutex_unlock(&priv->reg_mutex);
-       return ret;
-}
-
-static int
-qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
-                        const u8 *mac, u16 vid)
-{
-       struct qca8k_fdb fdb = { 0 };
-       int ret;
-
-       mutex_lock(&priv->reg_mutex);
-
-       qca8k_fdb_write(priv, vid, 0, mac, 0);
-       ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
-       if (ret < 0)
-               goto exit;
-
-       /* Rule doesn't exist. Why delete? */
-       if (!fdb.aging) {
-               ret = -EINVAL;
-               goto exit;
-       }
-
-       ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
-       if (ret)
-               goto exit;
-
-       /* Only port in the rule is this port. Don't re insert */
-       if (fdb.port_mask == port_mask)
-               goto exit;
-
-       /* Remove port from port mask */
-       fdb.port_mask &= ~port_mask;
-
-       qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
-       ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-
-exit:
-       mutex_unlock(&priv->reg_mutex);
-       return ret;
-}
-
-static int
-qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
-{
-       u32 reg;
-       int ret;
-
-       /* Set the command and VLAN index */
-       reg = QCA8K_VTU_FUNC1_BUSY;
-       reg |= cmd;
-       reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
-
-       /* Write the function register triggering the table access */
-       ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
-       if (ret)
-               return ret;
-
-       /* wait for completion */
-       ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
-       if (ret)
-               return ret;
-
-       /* Check for table full violation when adding an entry */
-       if (cmd == QCA8K_VLAN_LOAD) {
-               ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
-               if (ret < 0)
-                       return ret;
-               if (reg & QCA8K_VTU_FUNC1_FULL)
-                       return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static int
-qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
-{
-       u32 reg;
-       int ret;
-
-       /*
-          We do the right thing with VLAN 0 and treat it as untagged while
-          preserving the tag on egress.
-        */
-       if (vid == 0)
-               return 0;
-
-       mutex_lock(&priv->reg_mutex);
-       ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
-       if (ret < 0)
-               goto out;
-
-       ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
-       if (ret < 0)
-               goto out;
-       reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
-       reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
-       if (untagged)
-               reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
-       else
-               reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
-
-       ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
-       if (ret)
-               goto out;
-       ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
-
-out:
-       mutex_unlock(&priv->reg_mutex);
-
-       return ret;
-}
-
-static int
-qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
-{
-       u32 reg, mask;
-       int ret, i;
-       bool del;
-
-       mutex_lock(&priv->reg_mutex);
-       ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
-       if (ret < 0)
-               goto out;
-
-       ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
-       if (ret < 0)
-               goto out;
-       reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
-       reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
-
-       /* Check if we're the last member to be removed */
-       del = true;
-       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
-               mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
-
-               if ((reg & mask) != mask) {
-                       del = false;
-                       break;
-               }
-       }
-
-       if (del) {
-               ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
-       } else {
-               ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
-               if (ret)
-                       goto out;
-               ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
-       }
-
-out:
-       mutex_unlock(&priv->reg_mutex);
-
-       return ret;
-}
-
-static int
-qca8k_mib_init(struct qca8k_priv *priv)
-{
-       int ret;
-
-       mutex_lock(&priv->reg_mutex);
-       ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
-                                QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
-                                FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
-                                QCA8K_MIB_BUSY);
-       if (ret)
-               goto exit;
-
-       ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
-       if (ret)
-               goto exit;
-
-       ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
-       if (ret)
-               goto exit;
-
-       ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
-
-exit:
-       mutex_unlock(&priv->reg_mutex);
-       return ret;
-}
-
-static void
-qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
-{
-       u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
-       /* Port 0 and 6 have no internal PHY */
-       if (port > 0 && port < 6)
-               mask |= QCA8K_PORT_STATUS_LINK_AUTO;
-
-       if (enable)
-               regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
-       else
-               regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
-}
-
-static int
-qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
-                       struct sk_buff *read_skb, u32 *val)
-{
-       struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
-       bool ack;
-       int ret;
-
-       reinit_completion(&mgmt_eth_data->rw_done);
-
-       /* Increment seq_num and set it in the copy pkt */
-       mgmt_eth_data->seq++;
-       qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
-       mgmt_eth_data->ack = false;
-
-       dev_queue_xmit(skb);
-
-       ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-                                         QCA8K_ETHERNET_TIMEOUT);
-
-       ack = mgmt_eth_data->ack;
-
-       if (ret <= 0)
-               return -ETIMEDOUT;
-
-       if (!ack)
-               return -EINVAL;
-
-       *val = mgmt_eth_data->data[0];
-
-       return 0;
-}
-
-static int
-qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
-                     int regnum, u16 data)
-{
-       struct sk_buff *write_skb, *clear_skb, *read_skb;
-       struct qca8k_mgmt_eth_data *mgmt_eth_data;
-       u32 write_val, clear_val = 0, val;
-       struct net_device *mgmt_master;
-       int ret, ret1;
-       bool ack;
-
-       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
-               return -EINVAL;
-
-       mgmt_eth_data = &priv->mgmt_eth_data;
-
-       write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
-                   QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
-                   QCA8K_MDIO_MASTER_REG_ADDR(regnum);
-
-       if (read) {
-               write_val |= QCA8K_MDIO_MASTER_READ;
-       } else {
-               write_val |= QCA8K_MDIO_MASTER_WRITE;
-               write_val |= QCA8K_MDIO_MASTER_DATA(data);
-       }
-
-       /* Prealloc all the needed skb before the lock */
-       write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
-                                           QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
-       if (!write_skb)
-               return -ENOMEM;
-
-       clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
-                                           QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
-       if (!clear_skb) {
-               ret = -ENOMEM;
-               goto err_clear_skb;
-       }
-
-       read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
-                                          QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
-       if (!read_skb) {
-               ret = -ENOMEM;
-               goto err_read_skb;
-       }
-
-       /* Actually start the request:
-        * 1. Send mdio master packet
-        * 2. Busy Wait for mdio master command
-        * 3. Get the data if we are reading
-        * 4. Reset the mdio master (even with error)
-        */
-       mutex_lock(&mgmt_eth_data->mutex);
-
-       /* Check if mgmt_master is operational */
-       mgmt_master = priv->mgmt_master;
-       if (!mgmt_master) {
-               mutex_unlock(&mgmt_eth_data->mutex);
-               ret = -EINVAL;
-               goto err_mgmt_master;
-       }
-
-       read_skb->dev = mgmt_master;
-       clear_skb->dev = mgmt_master;
-       write_skb->dev = mgmt_master;
-
-       reinit_completion(&mgmt_eth_data->rw_done);
-
-       /* Increment seq_num and set it in the write pkt */
-       mgmt_eth_data->seq++;
-       qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
-       mgmt_eth_data->ack = false;
-
-       dev_queue_xmit(write_skb);
-
-       ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-                                         QCA8K_ETHERNET_TIMEOUT);
-
-       ack = mgmt_eth_data->ack;
-
-       if (ret <= 0) {
-               ret = -ETIMEDOUT;
-               kfree_skb(read_skb);
-               goto exit;
-       }
-
-       if (!ack) {
-               ret = -EINVAL;
-               kfree_skb(read_skb);
-               goto exit;
-       }
-
-       ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
-                               !(val & QCA8K_MDIO_MASTER_BUSY), 0,
-                               QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
-                               mgmt_eth_data, read_skb, &val);
-
-       if (ret < 0 && ret1 < 0) {
-               ret = ret1;
-               goto exit;
-       }
-
-       if (read) {
-               reinit_completion(&mgmt_eth_data->rw_done);
-
-               /* Increment seq_num and set it in the read pkt */
-               mgmt_eth_data->seq++;
-               qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
-               mgmt_eth_data->ack = false;
-
-               dev_queue_xmit(read_skb);
-
-               ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-                                                 QCA8K_ETHERNET_TIMEOUT);
-
-               ack = mgmt_eth_data->ack;
-
-               if (ret <= 0) {
-                       ret = -ETIMEDOUT;
-                       goto exit;
-               }
-
-               if (!ack) {
-                       ret = -EINVAL;
-                       goto exit;
-               }
-
-               ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
-       } else {
-               kfree_skb(read_skb);
-       }
-exit:
-       reinit_completion(&mgmt_eth_data->rw_done);
-
-       /* Increment seq_num and set it in the clear pkt */
-       mgmt_eth_data->seq++;
-       qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
-       mgmt_eth_data->ack = false;
-
-       dev_queue_xmit(clear_skb);
-
-       wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-                                   QCA8K_ETHERNET_TIMEOUT);
-
-       mutex_unlock(&mgmt_eth_data->mutex);
-
-       return ret;
-
-       /* Error handling before lock */
-err_mgmt_master:
-       kfree_skb(read_skb);
-err_read_skb:
-       kfree_skb(clear_skb);
-err_clear_skb:
-       kfree_skb(write_skb);
-
-       return ret;
-}
-
-static u32
-qca8k_port_to_phy(int port)
-{
-       /* From Andrew Lunn:
-        * Port 0 has no internal phy.
-        * Port 1 has an internal PHY at MDIO address 0.
-        * Port 2 has an internal PHY at MDIO address 1.
-        * ...
-        * Port 5 has an internal PHY at MDIO address 4.
-        * Port 6 has no internal PHY.
-        */
-
-       return port - 1;
-}
-
-static int
-qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
-{
-       u16 r1, r2, page;
-       u32 val;
-       int ret, ret1;
-
-       qca8k_split_addr(reg, &r1, &r2, &page);
-
-       ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
-                               QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
-                               bus, 0x10 | r2, r1, &val);
-
-       /* Check if qca8k_read has failed for a different reason
-        * before returnting -ETIMEDOUT
-        */
-       if (ret < 0 && ret1 < 0)
-               return ret1;
-
-       return ret;
-}
-
-static int
-qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
-{
-       struct mii_bus *bus = priv->bus;
-       u16 r1, r2, page;
-       u32 val;
-       int ret;
-
-       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
-               return -EINVAL;
-
-       val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
-             QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
-             QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
-             QCA8K_MDIO_MASTER_DATA(data);
-
-       qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
-
-       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
-       ret = qca8k_set_page(priv, page);
-       if (ret)
-               goto exit;
-
-       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
-
-       ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
-                                  QCA8K_MDIO_MASTER_BUSY);
-
-exit:
-       /* even if the busy_wait timeouts try to clear the MASTER_EN */
-       qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
-
-       mutex_unlock(&bus->mdio_lock);
-
-       return ret;
-}
-
-static int
-qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
-{
-       struct mii_bus *bus = priv->bus;
-       u16 r1, r2, page;
-       u32 val;
-       int ret;
-
-       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
-               return -EINVAL;
-
-       val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
-             QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
-             QCA8K_MDIO_MASTER_REG_ADDR(regnum);
-
-       qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
-
-       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
-       ret = qca8k_set_page(priv, page);
-       if (ret)
-               goto exit;
-
-       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
-
-       ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
-                                  QCA8K_MDIO_MASTER_BUSY);
-       if (ret)
-               goto exit;
-
-       ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
-
-exit:
-       /* even if the busy_wait timeouts try to clear the MASTER_EN */
-       qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
-
-       mutex_unlock(&bus->mdio_lock);
-
-       if (ret >= 0)
-               ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
-
-       return ret;
-}
-
-static int
-qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
-{
-       struct qca8k_priv *priv = slave_bus->priv;
-       int ret;
-
-       /* Use mdio Ethernet when available, fallback to legacy one on error */
-       ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
-       if (!ret)
-               return 0;
-
-       return qca8k_mdio_write(priv, phy, regnum, data);
-}
-
-static int
-qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
-{
-       struct qca8k_priv *priv = slave_bus->priv;
-       int ret;
-
-       /* Use mdio Ethernet when available, fallback to legacy one on error */
-       ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
-       if (ret >= 0)
-               return ret;
-
-       ret = qca8k_mdio_read(priv, phy, regnum);
-
-       if (ret < 0)
-               return 0xffff;
-
-       return ret;
-}
-
-static int
-qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
-{
-       port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
-       return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
-}
-
-static int
-qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
-{
-       port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
-       return qca8k_internal_mdio_read(slave_bus, port, regnum);
-}
-
-static int
-qca8k_mdio_register(struct qca8k_priv *priv)
-{
-       struct dsa_switch *ds = priv->ds;
-       struct device_node *mdio;
-       struct mii_bus *bus;
-
-       bus = devm_mdiobus_alloc(ds->dev);
-       if (!bus)
-               return -ENOMEM;
-
-       bus->priv = (void *)priv;
-       snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
-                ds->dst->index, ds->index);
-       bus->parent = ds->dev;
-       bus->phy_mask = ~ds->phys_mii_mask;
-       ds->slave_mii_bus = bus;
-
-       /* Check if the devicetree declare the port:phy mapping */
-       mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
-       if (of_device_is_available(mdio)) {
-               bus->name = "qca8k slave mii";
-               bus->read = qca8k_internal_mdio_read;
-               bus->write = qca8k_internal_mdio_write;
-               return devm_of_mdiobus_register(priv->dev, bus, mdio);
-       }
-
-       /* If a mapping can't be found the legacy mapping is used,
-        * using the qca8k_port_to_phy function
-        */
-       bus->name = "qca8k-legacy slave mii";
-       bus->read = qca8k_legacy_mdio_read;
-       bus->write = qca8k_legacy_mdio_write;
-       return devm_mdiobus_register(priv->dev, bus);
-}
-
-static int
-qca8k_setup_mdio_bus(struct qca8k_priv *priv)
-{
-       u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
-       struct device_node *ports, *port;
-       phy_interface_t mode;
-       int err;
-
-       ports = of_get_child_by_name(priv->dev->of_node, "ports");
-       if (!ports)
-               ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
-
-       if (!ports)
-               return -EINVAL;
-
-       for_each_available_child_of_node(ports, port) {
-               err = of_property_read_u32(port, "reg", &reg);
-               if (err) {
-                       of_node_put(port);
-                       of_node_put(ports);
-                       return err;
-               }
-
-               if (!dsa_is_user_port(priv->ds, reg))
-                       continue;
-
-               of_get_phy_mode(port, &mode);
-
-               if (of_property_read_bool(port, "phy-handle") &&
-                   mode != PHY_INTERFACE_MODE_INTERNAL)
-                       external_mdio_mask |= BIT(reg);
-               else
-                       internal_mdio_mask |= BIT(reg);
-       }
-
-       of_node_put(ports);
-       if (!external_mdio_mask && !internal_mdio_mask) {
-               dev_err(priv->dev, "no PHYs are defined.\n");
-               return -EINVAL;
-       }
-
-       /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
-        * the MDIO_MASTER register also _disconnects_ the external MDC
-        * passthrough to the internal PHYs. It's not possible to use both
-        * configurations at the same time!
-        *
-        * Because this came up during the review process:
-        * If the external mdio-bus driver is capable magically disabling
-        * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
-        * accessors for the time being, it would be possible to pull this
-        * off.
-        */
-       if (!!external_mdio_mask && !!internal_mdio_mask) {
-               dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
-               return -EINVAL;
-       }
-
-       if (external_mdio_mask) {
-               /* Make sure to disable the internal mdio bus in cases
-                * a dt-overlay and driver reload changed the configuration
-                */
-
-               return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
-                                        QCA8K_MDIO_MASTER_EN);
-       }
-
-       return qca8k_mdio_register(priv);
-}
-
-static int
-qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
-{
-       u32 mask = 0;
-       int ret = 0;
-
-       /* SoC specific settings for ipq8064.
-        * If more device require this consider adding
-        * a dedicated binding.
-        */
-       if (of_machine_is_compatible("qcom,ipq8064"))
-               mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
-
-       /* SoC specific settings for ipq8065 */
-       if (of_machine_is_compatible("qcom,ipq8065"))
-               mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
-
-       if (mask) {
-               ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
-                               QCA8K_MAC_PWR_RGMII0_1_8V |
-                               QCA8K_MAC_PWR_RGMII1_1_8V,
-                               mask);
-       }
-
-       return ret;
-}
-
-static int qca8k_find_cpu_port(struct dsa_switch *ds)
-{
-       struct qca8k_priv *priv = ds->priv;
-
-       /* Find the connected cpu port. Valid port are 0 or 6 */
-       if (dsa_is_cpu_port(ds, 0))
-               return 0;
-
-       dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
-
-       if (dsa_is_cpu_port(ds, 6))
-               return 6;
-
-       return -EINVAL;
-}
-
-static int
-qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
-{
-       struct device_node *node = priv->dev->of_node;
-       const struct qca8k_match_data *data;
-       u32 val = 0;
-       int ret;
-
-       /* QCA8327 require to set to the correct mode.
-        * His bigger brother QCA8328 have the 172 pin layout.
-        * Should be applied by default but we set this just to make sure.
-        */
-       if (priv->switch_id == QCA8K_ID_QCA8327) {
-               data = of_device_get_match_data(priv->dev);
-
-               /* Set the correct package of 148 pin for QCA8327 */
-               if (data->reduced_package)
-                       val |= QCA8327_PWS_PACKAGE148_EN;
-
-               ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
-                               val);
-               if (ret)
-                       return ret;
-       }
-
-       if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
-               val |= QCA8K_PWS_POWER_ON_SEL;
-
-       if (of_property_read_bool(node, "qca,led-open-drain")) {
-               if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
-                       dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
-                       return -EINVAL;
-               }
-
-               val |= QCA8K_PWS_LED_OPEN_EN_CSR;
-       }
-
-       return qca8k_rmw(priv, QCA8K_REG_PWS,
-                       QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
-                       val);
-}
-
-static int
-qca8k_parse_port_config(struct qca8k_priv *priv)
-{
-       int port, cpu_port_index = -1, ret;
-       struct device_node *port_dn;
-       phy_interface_t mode;
-       struct dsa_port *dp;
-       u32 delay;
-
-       /* We have 2 CPU port. Check them */
-       for (port = 0; port < QCA8K_NUM_PORTS; port++) {
-               /* Skip every other port */
-               if (port != 0 && port != 6)
-                       continue;
-
-               dp = dsa_to_port(priv->ds, port);
-               port_dn = dp->dn;
-               cpu_port_index++;
-
-               if (!of_device_is_available(port_dn))
-                       continue;
-
-               ret = of_get_phy_mode(port_dn, &mode);
-               if (ret)
-                       continue;
-
-               switch (mode) {
-               case PHY_INTERFACE_MODE_RGMII:
-               case PHY_INTERFACE_MODE_RGMII_ID:
-               case PHY_INTERFACE_MODE_RGMII_TXID:
-               case PHY_INTERFACE_MODE_RGMII_RXID:
-               case PHY_INTERFACE_MODE_SGMII:
-                       delay = 0;
-
-                       if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
-                               /* Switch regs accept value in ns, convert ps to ns */
-                               delay = delay / 1000;
-                       else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
-                                mode == PHY_INTERFACE_MODE_RGMII_TXID)
-                               delay = 1;
-
-                       if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
-                               dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
-                               delay = 3;
-                       }
-
-                       priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
-
-                       delay = 0;
-
-                       if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
-                               /* Switch regs accept value in ns, convert ps to ns */
-                               delay = delay / 1000;
-                       else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
-                                mode == PHY_INTERFACE_MODE_RGMII_RXID)
-                               delay = 2;
-
-                       if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
-                               dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
-                               delay = 3;
-                       }
-
-                       priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
-
-                       /* Skip sgmii parsing for rgmii* mode */
-                       if (mode == PHY_INTERFACE_MODE_RGMII ||
-                           mode == PHY_INTERFACE_MODE_RGMII_ID ||
-                           mode == PHY_INTERFACE_MODE_RGMII_TXID ||
-                           mode == PHY_INTERFACE_MODE_RGMII_RXID)
-                               break;
-
-                       if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
-                               priv->ports_config.sgmii_tx_clk_falling_edge = true;
-
-                       if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
-                               priv->ports_config.sgmii_rx_clk_falling_edge = true;
-
-                       if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
-                               priv->ports_config.sgmii_enable_pll = true;
-
-                               if (priv->switch_id == QCA8K_ID_QCA8327) {
-                                       dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
-                                       priv->ports_config.sgmii_enable_pll = false;
-                               }
-
-                               if (priv->switch_revision < 2)
-                                       dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
-                       }
-
-                       break;
-               default:
-                       continue;
-               }
-       }
-
-       return 0;
-}
-
-static void
-qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
-                                     u32 reg)
-{
-       u32 delay, val = 0;
-       int ret;
-
-       /* Delay can be declared in 3 different way.
-        * Mode to rgmii and internal-delay standard binding defined
-        * rgmii-id or rgmii-tx/rx phy mode set.
-        * The parse logic set a delay different than 0 only when one
-        * of the 3 different way is used. In all other case delay is
-        * not enabled. With ID or TX/RXID delay is enabled and set
-        * to the default and recommended value.
-        */
-       if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
-               delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
-
-               val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
-                       QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
-       }
-
-       if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
-               delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
-
-               val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
-                       QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
-       }
-
-       /* Set RGMII delay based on the selected values */
-       ret = qca8k_rmw(priv, reg,
-                       QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
-                       QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
-                       QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
-                       QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
-                       val);
-       if (ret)
-               dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
-                       cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
-}
-
-static struct phylink_pcs *
-qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
-                            phy_interface_t interface)
-{
-       struct qca8k_priv *priv = ds->priv;
-       struct phylink_pcs *pcs = NULL;
-
-       switch (interface) {
-       case PHY_INTERFACE_MODE_SGMII:
-       case PHY_INTERFACE_MODE_1000BASEX:
-               switch (port) {
-               case 0:
-                       pcs = &priv->pcs_port_0.pcs;
-                       break;
-
-               case 6:
-                       pcs = &priv->pcs_port_6.pcs;
-                       break;
-               }
-               break;
-
-       default:
-               break;
-       }
-
-       return pcs;
-}
-
-static void
-qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
-                        const struct phylink_link_state *state)
-{
-       struct qca8k_priv *priv = ds->priv;
-       int cpu_port_index;
-       u32 reg;
-
-       switch (port) {
-       case 0: /* 1st CPU port */
-               if (state->interface != PHY_INTERFACE_MODE_RGMII &&
-                   state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
-                   state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
-                   state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
-                   state->interface != PHY_INTERFACE_MODE_SGMII)
-                       return;
-
-               reg = QCA8K_REG_PORT0_PAD_CTRL;
-               cpu_port_index = QCA8K_CPU_PORT0;
-               break;
-       case 1:
-       case 2:
-       case 3:
-       case 4:
-       case 5:
-               /* Internal PHY, nothing to do */
-               return;
-       case 6: /* 2nd CPU port / external PHY */
-               if (state->interface != PHY_INTERFACE_MODE_RGMII &&
-                   state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
-                   state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
-                   state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
-                   state->interface != PHY_INTERFACE_MODE_SGMII &&
-                   state->interface != PHY_INTERFACE_MODE_1000BASEX)
-                       return;
-
-               reg = QCA8K_REG_PORT6_PAD_CTRL;
-               cpu_port_index = QCA8K_CPU_PORT6;
-               break;
-       default:
-               dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
-               return;
-       }
-
-       if (port != 6 && phylink_autoneg_inband(mode)) {
-               dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
-                       __func__);
-               return;
-       }
-
-       switch (state->interface) {
-       case PHY_INTERFACE_MODE_RGMII:
-       case PHY_INTERFACE_MODE_RGMII_ID:
-       case PHY_INTERFACE_MODE_RGMII_TXID:
-       case PHY_INTERFACE_MODE_RGMII_RXID:
-               qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
-
-               /* Configure rgmii delay */
-               qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
-               /* QCA8337 requires to set rgmii rx delay for all ports.
-                * This is enabled through PORT5_PAD_CTRL for all ports,
-                * rather than individual port registers.
-                */
-               if (priv->switch_id == QCA8K_ID_QCA8337)
-                       qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
-                                   QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
-               break;
-       case PHY_INTERFACE_MODE_SGMII:
-       case PHY_INTERFACE_MODE_1000BASEX:
-               /* Enable SGMII on the port */
-               qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
-               break;
-       default:
-               dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
-                       phy_modes(state->interface), port);
-               return;
-       }
-}
-
-static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
-                                  struct phylink_config *config)
-{
-       switch (port) {
-       case 0: /* 1st CPU port */
-               phy_interface_set_rgmii(config->supported_interfaces);
-               __set_bit(PHY_INTERFACE_MODE_SGMII,
-                         config->supported_interfaces);
-               break;
-
-       case 1:
-       case 2:
-       case 3:
-       case 4:
-       case 5:
-               /* Internal PHY */
-               __set_bit(PHY_INTERFACE_MODE_GMII,
-                         config->supported_interfaces);
-               __set_bit(PHY_INTERFACE_MODE_INTERNAL,
-                         config->supported_interfaces);
-               break;
-
-       case 6: /* 2nd CPU port / external PHY */
-               phy_interface_set_rgmii(config->supported_interfaces);
-               __set_bit(PHY_INTERFACE_MODE_SGMII,
-                         config->supported_interfaces);
-               __set_bit(PHY_INTERFACE_MODE_1000BASEX,
-                         config->supported_interfaces);
-               break;
-       }
-
-       config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
-               MAC_10 | MAC_100 | MAC_1000FD;
-
-       config->legacy_pre_march2020 = false;
-}
-
-static void
-qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
-                           phy_interface_t interface)
-{
-       struct qca8k_priv *priv = ds->priv;
-
-       qca8k_port_set_status(priv, port, 0);
-}
-
-static void
-qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
-                         phy_interface_t interface, struct phy_device *phydev,
-                         int speed, int duplex, bool tx_pause, bool rx_pause)
-{
-       struct qca8k_priv *priv = ds->priv;
-       u32 reg;
-
-       if (phylink_autoneg_inband(mode)) {
-               reg = QCA8K_PORT_STATUS_LINK_AUTO;
-       } else {
-               switch (speed) {
-               case SPEED_10:
-                       reg = QCA8K_PORT_STATUS_SPEED_10;
-                       break;
-               case SPEED_100:
-                       reg = QCA8K_PORT_STATUS_SPEED_100;
-                       break;
-               case SPEED_1000:
-                       reg = QCA8K_PORT_STATUS_SPEED_1000;
-                       break;
-               default:
-                       reg = QCA8K_PORT_STATUS_LINK_AUTO;
-                       break;
-               }
-
-               if (duplex == DUPLEX_FULL)
-                       reg |= QCA8K_PORT_STATUS_DUPLEX;
-
-               if (rx_pause || dsa_is_cpu_port(ds, port))
-                       reg |= QCA8K_PORT_STATUS_RXFLOW;
-
-               if (tx_pause || dsa_is_cpu_port(ds, port))
-                       reg |= QCA8K_PORT_STATUS_TXFLOW;
-       }
-
-       reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
-       qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
-}
-
-static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
-{
-       return container_of(pcs, struct qca8k_pcs, pcs);
-}
-
-static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
-                               struct phylink_link_state *state)
-{
-       struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
-       int port = pcs_to_qca8k_pcs(pcs)->port;
-       u32 reg;
-       int ret;
-
-       ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
-       if (ret < 0) {
-               state->link = false;
-               return;
-       }
-
-       state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
-       state->an_complete = state->link;
-       state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
-       state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
-                                                          DUPLEX_HALF;
-
-       switch (reg & QCA8K_PORT_STATUS_SPEED) {
-       case QCA8K_PORT_STATUS_SPEED_10:
-               state->speed = SPEED_10;
-               break;
-       case QCA8K_PORT_STATUS_SPEED_100:
-               state->speed = SPEED_100;
-               break;
-       case QCA8K_PORT_STATUS_SPEED_1000:
-               state->speed = SPEED_1000;
-               break;
-       default:
-               state->speed = SPEED_UNKNOWN;
-               break;
-       }
-
-       if (reg & QCA8K_PORT_STATUS_RXFLOW)
-               state->pause |= MLO_PAUSE_RX;
-       if (reg & QCA8K_PORT_STATUS_TXFLOW)
-               state->pause |= MLO_PAUSE_TX;
-}
-
-static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
-                           phy_interface_t interface,
-                           const unsigned long *advertising,
-                           bool permit_pause_to_mac)
-{
-       struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
-       int cpu_port_index, ret, port;
-       u32 reg, val;
-
-       port = pcs_to_qca8k_pcs(pcs)->port;
-       switch (port) {
-       case 0:
-               reg = QCA8K_REG_PORT0_PAD_CTRL;
-               cpu_port_index = QCA8K_CPU_PORT0;
-               break;
-
-       case 6:
-               reg = QCA8K_REG_PORT6_PAD_CTRL;
-               cpu_port_index = QCA8K_CPU_PORT6;
-               break;
-
-       default:
-               WARN_ON(1);
-               return -EINVAL;
-       }
-
-       /* Enable/disable SerDes auto-negotiation as necessary */
-       ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
-       if (ret)
-               return ret;
-       if (phylink_autoneg_inband(mode))
-               val &= ~QCA8K_PWS_SERDES_AEN_DIS;
-       else
-               val |= QCA8K_PWS_SERDES_AEN_DIS;
-       qca8k_write(priv, QCA8K_REG_PWS, val);
-
-       /* Configure the SGMII parameters */
-       ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
-       if (ret)
-               return ret;
-
-       val |= QCA8K_SGMII_EN_SD;
-
-       if (priv->ports_config.sgmii_enable_pll)
-               val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
-                      QCA8K_SGMII_EN_TX;
-
-       if (dsa_is_cpu_port(priv->ds, port)) {
-               /* CPU port, we're talking to the CPU MAC, be a PHY */
-               val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
-               val |= QCA8K_SGMII_MODE_CTRL_PHY;
-       } else if (interface == PHY_INTERFACE_MODE_SGMII) {
-               val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
-               val |= QCA8K_SGMII_MODE_CTRL_MAC;
-       } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
-               val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
-               val |= QCA8K_SGMII_MODE_CTRL_BASEX;
-       }
-
-       qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
-
-       /* From original code is reported port instability as SGMII also
-        * require delay set. Apply advised values here or take them from DT.
-        */
-       if (interface == PHY_INTERFACE_MODE_SGMII)
-               qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-       /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
-        * falling edge is set writing in the PORT0 PAD reg
-        */
-       if (priv->switch_id == QCA8K_ID_QCA8327 ||
-           priv->switch_id == QCA8K_ID_QCA8337)
-               reg = QCA8K_REG_PORT0_PAD_CTRL;
-
-       val = 0;
-
-       /* SGMII Clock phase configuration */
-       if (priv->ports_config.sgmii_rx_clk_falling_edge)
-               val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
-
-       if (priv->ports_config.sgmii_tx_clk_falling_edge)
-               val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
-
-       if (val)
-               ret = qca8k_rmw(priv, reg,
-                               QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
-                               QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
-                               val);
-
-       return 0;
-}
-
-static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
-{
-}
-
-static const struct phylink_pcs_ops qca8k_pcs_ops = {
-       .pcs_get_state = qca8k_pcs_get_state,
-       .pcs_config = qca8k_pcs_config,
-       .pcs_an_restart = qca8k_pcs_an_restart,
-};
-
-static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
-                           int port)
-{
-       qpcs->pcs.ops = &qca8k_pcs_ops;
-
-       /* We don't have interrupts for link changes, so we need to poll */
-       qpcs->pcs.poll = true;
-       qpcs->priv = priv;
-       qpcs->port = port;
-}
-
-static void
-qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
-{
-       const struct qca8k_match_data *match_data;
-       struct qca8k_priv *priv = ds->priv;
-       int i;
-
-       if (stringset != ETH_SS_STATS)
-               return;
-
-       match_data = of_device_get_match_data(priv->dev);
-
-       for (i = 0; i < match_data->mib_count; i++)
-               strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
-                       ETH_GSTRING_LEN);
-}
-
-static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
-{
-       const struct qca8k_match_data *match_data;
-       struct qca8k_mib_eth_data *mib_eth_data;
-       struct qca8k_priv *priv = ds->priv;
-       const struct qca8k_mib_desc *mib;
-       struct mib_ethhdr *mib_ethhdr;
-       int i, mib_len, offset = 0;
-       u64 *data;
-       u8 port;
-
-       mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
-       mib_eth_data = &priv->mib_eth_data;
-
-       /* The switch autocast every port. Ignore other packet and
-        * parse only the requested one.
-        */
-       port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
-       if (port != mib_eth_data->req_port)
-               goto exit;
-
-       match_data = device_get_match_data(priv->dev);
-       data = mib_eth_data->data;
-
-       for (i = 0; i < match_data->mib_count; i++) {
-               mib = &ar8327_mib[i];
-
-               /* First 3 mib are present in the skb head */
-               if (i < 3) {
-                       data[i] = mib_ethhdr->data[i];
-                       continue;
-               }
-
-               mib_len = sizeof(uint32_t);
-
-               /* Some mib are 64 bit wide */
-               if (mib->size == 2)
-                       mib_len = sizeof(uint64_t);
-
-               /* Copy the mib value from packet to the */
-               memcpy(data + i, skb->data + offset, mib_len);
-
-               /* Set the offset for the next mib */
-               offset += mib_len;
-       }
-
-exit:
-       /* Complete on receiving all the mib packet */
-       if (refcount_dec_and_test(&mib_eth_data->port_parsed))
-               complete(&mib_eth_data->rw_done);
-}
-
-static int
-qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
-{
-       struct dsa_port *dp = dsa_to_port(ds, port);
-       struct qca8k_mib_eth_data *mib_eth_data;
-       struct qca8k_priv *priv = ds->priv;
-       int ret;
-
-       mib_eth_data = &priv->mib_eth_data;
-
-       mutex_lock(&mib_eth_data->mutex);
-
-       reinit_completion(&mib_eth_data->rw_done);
-
-       mib_eth_data->req_port = dp->index;
-       mib_eth_data->data = data;
-       refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
-
-       mutex_lock(&priv->reg_mutex);
-
-       /* Send mib autocast request */
-       ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
-                                QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
-                                FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
-                                QCA8K_MIB_BUSY);
-
-       mutex_unlock(&priv->reg_mutex);
-
-       if (ret)
-               goto exit;
-
-       ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
-
-exit:
-       mutex_unlock(&mib_eth_data->mutex);
-
-       return ret;
-}
-
-static void
-qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
-                       uint64_t *data)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-       const struct qca8k_match_data *match_data;
-       const struct qca8k_mib_desc *mib;
-       u32 reg, i, val;
-       u32 hi = 0;
-       int ret;
-
-       if (priv->mgmt_master &&
-           qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
-               return;
-
-       match_data = of_device_get_match_data(priv->dev);
-
-       for (i = 0; i < match_data->mib_count; i++) {
-               mib = &ar8327_mib[i];
-               reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
-
-               ret = qca8k_read(priv, reg, &val);
-               if (ret < 0)
-                       continue;
-
-               if (mib->size == 2) {
-                       ret = qca8k_read(priv, reg + 4, &hi);
-                       if (ret < 0)
-                               continue;
-               }
-
-               data[i] = val;
-               if (mib->size == 2)
-                       data[i] |= (u64)hi << 32;
-       }
-}
-
-static int
-qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
-{
-       const struct qca8k_match_data *match_data;
-       struct qca8k_priv *priv = ds->priv;
-
-       if (sset != ETH_SS_STATS)
-               return 0;
-
-       match_data = of_device_get_match_data(priv->dev);
-
-       return match_data->mib_count;
-}
-
-static int
-qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-       u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
-       u32 reg;
-       int ret;
-
-       mutex_lock(&priv->reg_mutex);
-       ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
-       if (ret < 0)
-               goto exit;
-
-       if (eee->eee_enabled)
-               reg |= lpi_en;
-       else
-               reg &= ~lpi_en;
-       ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
-
-exit:
-       mutex_unlock(&priv->reg_mutex);
-       return ret;
-}
-
-static int
-qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
-{
-       /* Nothing to do on the port's MAC */
-       return 0;
-}
-
-static void
-qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-       u32 stp_state;
-
-       switch (state) {
-       case BR_STATE_DISABLED:
-               stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
-               break;
-       case BR_STATE_BLOCKING:
-               stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
-               break;
-       case BR_STATE_LISTENING:
-               stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
-               break;
-       case BR_STATE_LEARNING:
-               stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
-               break;
-       case BR_STATE_FORWARDING:
-       default:
-               stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
-               break;
-       }
-
-       qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
-                 QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
-}
-
-static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
-                                 struct dsa_bridge bridge,
-                                 bool *tx_fwd_offload,
-                                 struct netlink_ext_ack *extack)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-       int port_mask, cpu_port;
-       int i, ret;
-
-       cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
-       port_mask = BIT(cpu_port);
-
-       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
-               if (dsa_is_cpu_port(ds, i))
-                       continue;
-               if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
-                       continue;
-               /* Add this port to the portvlan mask of the other ports
-                * in the bridge
-                */
-               ret = regmap_set_bits(priv->regmap,
-                                     QCA8K_PORT_LOOKUP_CTRL(i),
-                                     BIT(port));
-               if (ret)
-                       return ret;
-               if (i != port)
-                       port_mask |= BIT(i);
-       }
-
-       /* Add all other ports to this ports portvlan mask */
-       ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
-                       QCA8K_PORT_LOOKUP_MEMBER, port_mask);
-
-       return ret;
-}
-
-static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
-                                   struct dsa_bridge bridge)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-       int cpu_port, i;
-
-       cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
-
-       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
-               if (dsa_is_cpu_port(ds, i))
-                       continue;
-               if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
-                       continue;
-               /* Remove this port to the portvlan mask of the other ports
-                * in the bridge
-                */
-               regmap_clear_bits(priv->regmap,
-                                 QCA8K_PORT_LOOKUP_CTRL(i),
-                                 BIT(port));
-       }
-
-       /* Set the cpu port to be the only one in the portvlan mask of
-        * this port
-        */
-       qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
-                 QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
-}
-
-static void
-qca8k_port_fast_age(struct dsa_switch *ds, int port)
-{
-       struct qca8k_priv *priv = ds->priv;
-
-       mutex_lock(&priv->reg_mutex);
-       qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
-       mutex_unlock(&priv->reg_mutex);
-}
-
-static int
-qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
-{
-       struct qca8k_priv *priv = ds->priv;
-       unsigned int secs = msecs / 1000;
-       u32 val;
-
-       /* AGE_TIME reg is set in 7s step */
-       val = secs / 7;
-
-       /* Handle case with 0 as val to NOT disable
-        * learning
-        */
-       if (!val)
-               val = 1;
-
-       return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
-                                 QCA8K_ATU_AGE_TIME(val));
-}
-
-static int
-qca8k_port_enable(struct dsa_switch *ds, int port,
-                 struct phy_device *phy)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
-       qca8k_port_set_status(priv, port, 1);
-       priv->port_enabled_map |= BIT(port);
-
-       if (dsa_is_user_port(ds, port))
-               phy_support_asym_pause(phy);
-
-       return 0;
-}
-
-static void
-qca8k_port_disable(struct dsa_switch *ds, int port)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
-       qca8k_port_set_status(priv, port, 0);
-       priv->port_enabled_map &= ~BIT(port);
-}
-
-static int
-qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
-{
-       struct qca8k_priv *priv = ds->priv;
-       int ret;
-
-       /* We have only have a general MTU setting.
-        * DSA always set the CPU port's MTU to the largest MTU of the slave
-        * ports.
-        * Setting MTU just for the CPU port is sufficient to correctly set a
-        * value for every port.
-        */
-       if (!dsa_is_cpu_port(ds, port))
-               return 0;
-
-       /* To change the MAX_FRAME_SIZE the cpu ports must be off or
-        * the switch panics.
-        * Turn off both cpu ports before applying the new value to prevent
-        * this.
-        */
-       if (priv->port_enabled_map & BIT(0))
-               qca8k_port_set_status(priv, 0, 0);
-
-       if (priv->port_enabled_map & BIT(6))
-               qca8k_port_set_status(priv, 6, 0);
-
-       /* Include L2 header / FCS length */
-       ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
-
-       if (priv->port_enabled_map & BIT(0))
-               qca8k_port_set_status(priv, 0, 1);
-
-       if (priv->port_enabled_map & BIT(6))
-               qca8k_port_set_status(priv, 6, 1);
-
-       return ret;
-}
-
-static int
-qca8k_port_max_mtu(struct dsa_switch *ds, int port)
-{
-       return QCA8K_MAX_MTU;
-}
-
-static int
-qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
-                     u16 port_mask, u16 vid)
-{
-       /* Set the vid to the port vlan id if no vid is set */
-       if (!vid)
-               vid = QCA8K_PORT_VID_DEF;
-
-       return qca8k_fdb_add(priv, addr, port_mask, vid,
-                            QCA8K_ATU_STATUS_STATIC);
-}
-
-static int
-qca8k_port_fdb_add(struct dsa_switch *ds, int port,
-                  const unsigned char *addr, u16 vid,
-                  struct dsa_db db)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-       u16 port_mask = BIT(port);
-
-       return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_del(struct dsa_switch *ds, int port,
-                  const unsigned char *addr, u16 vid,
-                  struct dsa_db db)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-       u16 port_mask = BIT(port);
-
-       if (!vid)
-               vid = QCA8K_PORT_VID_DEF;
-
-       return qca8k_fdb_del(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
-                   dsa_fdb_dump_cb_t *cb, void *data)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-       struct qca8k_fdb _fdb = { 0 };
-       int cnt = QCA8K_NUM_FDB_RECORDS;
-       bool is_static;
-       int ret = 0;
-
-       mutex_lock(&priv->reg_mutex);
-       while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
-               if (!_fdb.aging)
-                       break;
-               is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
-               ret = cb(_fdb.mac, _fdb.vid, is_static, data);
-               if (ret)
-                       break;
-       }
-       mutex_unlock(&priv->reg_mutex);
-
-       return 0;
-}
-
-static int
-qca8k_port_mdb_add(struct dsa_switch *ds, int port,
-                  const struct switchdev_obj_port_mdb *mdb,
-                  struct dsa_db db)
-{
-       struct qca8k_priv *priv = ds->priv;
-       const u8 *addr = mdb->addr;
-       u16 vid = mdb->vid;
-
-       return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
-}
-
-static int
-qca8k_port_mdb_del(struct dsa_switch *ds, int port,
-                  const struct switchdev_obj_port_mdb *mdb,
-                  struct dsa_db db)
-{
-       struct qca8k_priv *priv = ds->priv;
-       const u8 *addr = mdb->addr;
-       u16 vid = mdb->vid;
-
-       return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
-}
-
-static int
-qca8k_port_mirror_add(struct dsa_switch *ds, int port,
-                     struct dsa_mall_mirror_tc_entry *mirror,
-                     bool ingress, struct netlink_ext_ack *extack)
-{
-       struct qca8k_priv *priv = ds->priv;
-       int monitor_port, ret;
-       u32 reg, val;
-
-       /* Check for existent entry */
-       if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
-               return -EEXIST;
-
-       ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
-       if (ret)
-               return ret;
-
-       /* QCA83xx can have only one port set to mirror mode.
-        * Check that the correct port is requested and return error otherwise.
-        * When no mirror port is set, the values is set to 0xF
-        */
-       monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
-       if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
-               return -EEXIST;
-
-       /* Set the monitor port */
-       val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
-                        mirror->to_local_port);
-       ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
-                                QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
-       if (ret)
-               return ret;
-
-       if (ingress) {
-               reg = QCA8K_PORT_LOOKUP_CTRL(port);
-               val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
-       } else {
-               reg = QCA8K_REG_PORT_HOL_CTRL1(port);
-               val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
-       }
-
-       ret = regmap_update_bits(priv->regmap, reg, val, val);
-       if (ret)
-               return ret;
-
-       /* Track mirror port for tx and rx to decide when the
-        * mirror port has to be disabled.
-        */
-       if (ingress)
-               priv->mirror_rx |= BIT(port);
-       else
-               priv->mirror_tx |= BIT(port);
-
-       return 0;
-}
-
-static void
-qca8k_port_mirror_del(struct dsa_switch *ds, int port,
-                     struct dsa_mall_mirror_tc_entry *mirror)
-{
-       struct qca8k_priv *priv = ds->priv;
-       u32 reg, val;
-       int ret;
-
-       if (mirror->ingress) {
-               reg = QCA8K_PORT_LOOKUP_CTRL(port);
-               val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
-       } else {
-               reg = QCA8K_REG_PORT_HOL_CTRL1(port);
-               val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
-       }
-
-       ret = regmap_clear_bits(priv->regmap, reg, val);
-       if (ret)
-               goto err;
-
-       if (mirror->ingress)
-               priv->mirror_rx &= ~BIT(port);
-       else
-               priv->mirror_tx &= ~BIT(port);
-
-       /* No port set to send packet to mirror port. Disable mirror port */
-       if (!priv->mirror_rx && !priv->mirror_tx) {
-               val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
-               ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
-                                        QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
-               if (ret)
-                       goto err;
-       }
-err:
-       dev_err(priv->dev, "Failed to del mirror port from %d", port);
-}
-
-static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
-                         struct netlink_ext_ack *extack)
-{
-       struct qca8k_priv *priv = ds->priv;
-       int ret;
-
-       if (vlan_filtering) {
-               ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
-                               QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
-                               QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
-       } else {
-               ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
-                               QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
-                               QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
-       }
-
-       return ret;
-}
-
-static int
-qca8k_port_vlan_add(struct dsa_switch *ds, int port,
-                   const struct switchdev_obj_port_vlan *vlan,
-                   struct netlink_ext_ack *extack)
-{
-       bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
-       bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
-       struct qca8k_priv *priv = ds->priv;
-       int ret;
-
-       ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
-       if (ret) {
-               dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
-               return ret;
-       }
-
-       if (pvid) {
-               ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
-                               QCA8K_EGREES_VLAN_PORT_MASK(port),
-                               QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
-               if (ret)
-                       return ret;
-
-               ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
-                                 QCA8K_PORT_VLAN_CVID(vlan->vid) |
-                                 QCA8K_PORT_VLAN_SVID(vlan->vid));
-       }
-
-       return ret;
-}
-
-static int
-qca8k_port_vlan_del(struct dsa_switch *ds, int port,
-                   const struct switchdev_obj_port_vlan *vlan)
-{
-       struct qca8k_priv *priv = ds->priv;
-       int ret;
-
-       ret = qca8k_vlan_del(priv, port, vlan->vid);
-       if (ret)
-               dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
-
-       return ret;
-}
-
-static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
-{
-       struct qca8k_priv *priv = ds->priv;
-
-       /* Communicate to the phy internal driver the switch revision.
-        * Based on the switch revision different values needs to be
-        * set to the dbg and mmd reg on the phy.
-        * The first 2 bit are used to communicate the switch revision
-        * to the phy driver.
-        */
-       if (port > 0 && port < 6)
-               return priv->switch_revision;
-
-       return 0;
-}
-
-static enum dsa_tag_protocol
-qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
-                      enum dsa_tag_protocol mp)
-{
-       return DSA_TAG_PROTO_QCA;
-}
-
-static bool
-qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
-                     struct netdev_lag_upper_info *info)
-{
-       struct dsa_port *dp;
-       int members = 0;
-
-       if (!lag.id)
-               return false;
-
-       dsa_lag_foreach_port(dp, ds->dst, &lag)
-               /* Includes the port joining the LAG */
-               members++;
-
-       if (members > QCA8K_NUM_PORTS_FOR_LAG)
-               return false;
-
-       if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
-               return false;
-
-       if (info->hash_type != NETDEV_LAG_HASH_L2 &&
-           info->hash_type != NETDEV_LAG_HASH_L23)
-               return false;
-
-       return true;
-}
-
-static int
-qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
-                    struct netdev_lag_upper_info *info)
-{
-       struct net_device *lag_dev = lag.dev;
-       struct qca8k_priv *priv = ds->priv;
-       bool unique_lag = true;
-       unsigned int i;
-       u32 hash = 0;
-
-       switch (info->hash_type) {
-       case NETDEV_LAG_HASH_L23:
-               hash |= QCA8K_TRUNK_HASH_SIP_EN;
-               hash |= QCA8K_TRUNK_HASH_DIP_EN;
-               fallthrough;
-       case NETDEV_LAG_HASH_L2:
-               hash |= QCA8K_TRUNK_HASH_SA_EN;
-               hash |= QCA8K_TRUNK_HASH_DA_EN;
-               break;
-       default: /* We should NEVER reach this */
-               return -EOPNOTSUPP;
-       }
-
-       /* Check if we are the unique configured LAG */
-       dsa_lags_foreach_id(i, ds->dst)
-               if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
-                       unique_lag = false;
-                       break;
-               }
-
-       /* Hash Mode is global. Make sure the same Hash Mode
-        * is set to all the 4 possible lag.
-        * If we are the unique LAG we can set whatever hash
-        * mode we want.
-        * To change hash mode it's needed to remove all LAG
-        * and change the mode with the latest.
-        */
-       if (unique_lag) {
-               priv->lag_hash_mode = hash;
-       } else if (priv->lag_hash_mode != hash) {
-               netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
-               return -EOPNOTSUPP;
-       }
-
-       return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
-                                 QCA8K_TRUNK_HASH_MASK, hash);
-}
-
-static int
-qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
-                         struct dsa_lag lag, bool delete)
-{
-       struct qca8k_priv *priv = ds->priv;
-       int ret, id, i;
-       u32 val;
-
-       /* DSA LAG IDs are one-based, hardware is zero-based */
-       id = lag.id - 1;
-
-       /* Read current port member */
-       ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
-       if (ret)
-               return ret;
-
-       /* Shift val to the correct trunk */
-       val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
-       val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
-       if (delete)
-               val &= ~BIT(port);
-       else
-               val |= BIT(port);
-
-       /* Update port member. With empty portmap disable trunk */
-       ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
-                                QCA8K_REG_GOL_TRUNK_MEMBER(id) |
-                                QCA8K_REG_GOL_TRUNK_EN(id),
-                                !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
-                                val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
-
-       /* Search empty member if adding or port on deleting */
-       for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
-               ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
-               if (ret)
-                       return ret;
-
-               val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
-               val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
-
-               if (delete) {
-                       /* If port flagged to be disabled assume this member is
-                        * empty
-                        */
-                       if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
-                               continue;
-
-                       val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
-                       if (val != port)
-                               continue;
-               } else {
-                       /* If port flagged to be enabled assume this member is
-                        * already set
-                        */
-                       if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
-                               continue;
-               }
-
-               /* We have found the member to add/remove */
-               break;
-       }
-
-       /* Set port in the correct port mask or disable port if in delete mode */
-       return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
-                                 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
-                                 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
-                                 !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
-                                 port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
-}
-
-static int
-qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
-                   struct netdev_lag_upper_info *info)
-{
-       int ret;
-
-       if (!qca8k_lag_can_offload(ds, lag, info))
-               return -EOPNOTSUPP;
-
-       ret = qca8k_lag_setup_hash(ds, lag, info);
-       if (ret)
-               return ret;
-
-       return qca8k_lag_refresh_portmap(ds, port, lag, false);
-}
-
-static int
-qca8k_port_lag_leave(struct dsa_switch *ds, int port,
-                    struct dsa_lag lag)
-{
-       return qca8k_lag_refresh_portmap(ds, port, lag, true);
-}
-
-static void
-qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
-                   bool operational)
-{
-       struct dsa_port *dp = master->dsa_ptr;
-       struct qca8k_priv *priv = ds->priv;
-
-       /* Ethernet MIB/MDIO is only supported for CPU port 0 */
-       if (dp->index != 0)
-               return;
-
-       mutex_lock(&priv->mgmt_eth_data.mutex);
-       mutex_lock(&priv->mib_eth_data.mutex);
-
-       priv->mgmt_master = operational ? (struct net_device *)master : NULL;
-
-       mutex_unlock(&priv->mib_eth_data.mutex);
-       mutex_unlock(&priv->mgmt_eth_data.mutex);
-}
-
-static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
-                                     enum dsa_tag_protocol proto)
-{
-       struct qca_tagger_data *tagger_data;
-
-       switch (proto) {
-       case DSA_TAG_PROTO_QCA:
-               tagger_data = ds->tagger_data;
-
-               tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
-               tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
-
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       return 0;
-}
-
-static int
-qca8k_setup(struct dsa_switch *ds)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-       int cpu_port, ret, i;
-       u32 mask;
-
-       cpu_port = qca8k_find_cpu_port(ds);
-       if (cpu_port < 0) {
-               dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
-               return cpu_port;
-       }
-
-       /* Parse CPU port config to be later used in phy_link mac_config */
-       ret = qca8k_parse_port_config(priv);
-       if (ret)
-               return ret;
-
-       ret = qca8k_setup_mdio_bus(priv);
-       if (ret)
-               return ret;
-
-       ret = qca8k_setup_of_pws_reg(priv);
-       if (ret)
-               return ret;
-
-       ret = qca8k_setup_mac_pwr_sel(priv);
-       if (ret)
-               return ret;
-
-       qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
-       qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
-
-       /* Make sure MAC06 is disabled */
-       ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
-                               QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
-       if (ret) {
-               dev_err(priv->dev, "failed disabling MAC06 exchange");
-               return ret;
-       }
-
-       /* Enable CPU Port */
-       ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
-                             QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
-       if (ret) {
-               dev_err(priv->dev, "failed enabling CPU port");
-               return ret;
-       }
-
-       /* Enable MIB counters */
-       ret = qca8k_mib_init(priv);
-       if (ret)
-               dev_warn(priv->dev, "mib init failed");
-
-       /* Initial setup of all ports */
-       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
-               /* Disable forwarding by default on all ports */
-               ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
-                               QCA8K_PORT_LOOKUP_MEMBER, 0);
-               if (ret)
-                       return ret;
-
-               /* Enable QCA header mode on all cpu ports */
-               if (dsa_is_cpu_port(ds, i)) {
-                       ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
-                                         FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
-                                         FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
-                       if (ret) {
-                               dev_err(priv->dev, "failed enabling QCA header mode");
-                               return ret;
-                       }
-               }
-
-               /* Disable MAC by default on all user ports */
-               if (dsa_is_user_port(ds, i))
-                       qca8k_port_set_status(priv, i, 0);
-       }
-
-       /* Forward all unknown frames to CPU port for Linux processing
-        * Notice that in multi-cpu config only one port should be set
-        * for igmp, unknown, multicast and broadcast packet
-        */
-       ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
-                         FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
-                         FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
-                         FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
-                         FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
-       if (ret)
-               return ret;
-
-       /* Setup connection between CPU port & user ports
-        * Configure specific switch configuration for ports
-        */
-       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
-               /* CPU port gets connected to all user ports of the switch */
-               if (dsa_is_cpu_port(ds, i)) {
-                       ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
-                                       QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
-                       if (ret)
-                               return ret;
-               }
-
-               /* Individual user ports get connected to CPU port only */
-               if (dsa_is_user_port(ds, i)) {
-                       ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
-                                       QCA8K_PORT_LOOKUP_MEMBER,
-                                       BIT(cpu_port));
-                       if (ret)
-                               return ret;
-
-                       /* Enable ARP Auto-learning by default */
-                       ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
-                                             QCA8K_PORT_LOOKUP_LEARN);
-                       if (ret)
-                               return ret;
-
-                       /* For port based vlans to work we need to set the
-                        * default egress vid
-                        */
-                       ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
-                                       QCA8K_EGREES_VLAN_PORT_MASK(i),
-                                       QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
-                       if (ret)
-                               return ret;
-
-                       ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
-                                         QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
-                                         QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
-                       if (ret)
-                               return ret;
-               }
-
-               /* The port 5 of the qca8337 have some problem in flood condition. The
-                * original legacy driver had some specific buffer and priority settings
-                * for the different port suggested by the QCA switch team. Add this
-                * missing settings to improve switch stability under load condition.
-                * This problem is limited to qca8337 and other qca8k switch are not affected.
-                */
-               if (priv->switch_id == QCA8K_ID_QCA8337) {
-                       switch (i) {
-                       /* The 2 CPU port and port 5 requires some different
-                        * priority than any other ports.
-                        */
-                       case 0:
-                       case 5:
-                       case 6:
-                               mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
-                                       QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
-                                       QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
-                                       QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
-                                       QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
-                                       QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
-                                       QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
-                               break;
-                       default:
-                               mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
-                                       QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
-                                       QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
-                                       QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
-                                       QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
-                       }
-                       qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
-
-                       mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
-                       QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
-                       QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
-                       QCA8K_PORT_HOL_CTRL1_WRED_EN;
-                       qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
-                                 QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
-                                 QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
-                                 QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
-                                 QCA8K_PORT_HOL_CTRL1_WRED_EN,
-                                 mask);
-               }
-       }
-
-       /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
-       if (priv->switch_id == QCA8K_ID_QCA8327) {
-               mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
-                      QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
-               qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
-                         QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
-                         QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
-                         mask);
-       }
-
-       /* Setup our port MTUs to match power on defaults */
-       ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
-       if (ret)
-               dev_warn(priv->dev, "failed setting MTU settings");
-
-       /* Flush the FDB table */
-       qca8k_fdb_flush(priv);
-
-       /* Set min a max ageing value supported */
-       ds->ageing_time_min = 7000;
-       ds->ageing_time_max = 458745000;
-
-       /* Set max number of LAGs supported */
-       ds->num_lag_ids = QCA8K_NUM_LAGS;
-
-       return 0;
-}
-
-static const struct dsa_switch_ops qca8k_switch_ops = {
-       .get_tag_protocol       = qca8k_get_tag_protocol,
-       .setup                  = qca8k_setup,
-       .get_strings            = qca8k_get_strings,
-       .get_ethtool_stats      = qca8k_get_ethtool_stats,
-       .get_sset_count         = qca8k_get_sset_count,
-       .set_ageing_time        = qca8k_set_ageing_time,
-       .get_mac_eee            = qca8k_get_mac_eee,
-       .set_mac_eee            = qca8k_set_mac_eee,
-       .port_enable            = qca8k_port_enable,
-       .port_disable           = qca8k_port_disable,
-       .port_change_mtu        = qca8k_port_change_mtu,
-       .port_max_mtu           = qca8k_port_max_mtu,
-       .port_stp_state_set     = qca8k_port_stp_state_set,
-       .port_bridge_join       = qca8k_port_bridge_join,
-       .port_bridge_leave      = qca8k_port_bridge_leave,
-       .port_fast_age          = qca8k_port_fast_age,
-       .port_fdb_add           = qca8k_port_fdb_add,
-       .port_fdb_del           = qca8k_port_fdb_del,
-       .port_fdb_dump          = qca8k_port_fdb_dump,
-       .port_mdb_add           = qca8k_port_mdb_add,
-       .port_mdb_del           = qca8k_port_mdb_del,
-       .port_mirror_add        = qca8k_port_mirror_add,
-       .port_mirror_del        = qca8k_port_mirror_del,
-       .port_vlan_filtering    = qca8k_port_vlan_filtering,
-       .port_vlan_add          = qca8k_port_vlan_add,
-       .port_vlan_del          = qca8k_port_vlan_del,
-       .phylink_get_caps       = qca8k_phylink_get_caps,
-       .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
-       .phylink_mac_config     = qca8k_phylink_mac_config,
-       .phylink_mac_link_down  = qca8k_phylink_mac_link_down,
-       .phylink_mac_link_up    = qca8k_phylink_mac_link_up,
-       .get_phy_flags          = qca8k_get_phy_flags,
-       .port_lag_join          = qca8k_port_lag_join,
-       .port_lag_leave         = qca8k_port_lag_leave,
-       .master_state_change    = qca8k_master_change,
-       .connect_tag_protocol   = qca8k_connect_tag_protocol,
-};
-
-static int qca8k_read_switch_id(struct qca8k_priv *priv)
-{
-       const struct qca8k_match_data *data;
-       u32 val;
-       u8 id;
-       int ret;
-
-       /* get the switches ID from the compatible */
-       data = of_device_get_match_data(priv->dev);
-       if (!data)
-               return -ENODEV;
-
-       ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
-       if (ret < 0)
-               return -ENODEV;
-
-       id = QCA8K_MASK_CTRL_DEVICE_ID(val);
-       if (id != data->id) {
-               dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
-               return -ENODEV;
-       }
-
-       priv->switch_id = id;
-
-       /* Save revision to communicate to the internal PHY driver */
-       priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
-
-       return 0;
-}
-
-static int
-qca8k_sw_probe(struct mdio_device *mdiodev)
-{
-       struct qca8k_priv *priv;
-       int ret;
-
-       /* allocate the private data struct so that we can probe the switches
-        * ID register
-        */
-       priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       priv->bus = mdiodev->bus;
-       priv->dev = &mdiodev->dev;
-
-       priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
-                                                  GPIOD_ASIS);
-       if (IS_ERR(priv->reset_gpio))
-               return PTR_ERR(priv->reset_gpio);
-
-       if (priv->reset_gpio) {
-               gpiod_set_value_cansleep(priv->reset_gpio, 1);
-               /* The active low duration must be greater than 10 ms
-                * and checkpatch.pl wants 20 ms.
-                */
-               msleep(20);
-               gpiod_set_value_cansleep(priv->reset_gpio, 0);
-       }
-
-       /* Start by setting up the register mapping */
-       priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
-                                       &qca8k_regmap_config);
-       if (IS_ERR(priv->regmap)) {
-               dev_err(priv->dev, "regmap initialization failed");
-               return PTR_ERR(priv->regmap);
-       }
-
-       priv->mdio_cache.page = 0xffff;
-       priv->mdio_cache.lo = 0xffff;
-       priv->mdio_cache.hi = 0xffff;
-
-       /* Check the detected switch id */
-       ret = qca8k_read_switch_id(priv);
-       if (ret)
-               return ret;
-
-       priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
-       if (!priv->ds)
-               return -ENOMEM;
-
-       mutex_init(&priv->mgmt_eth_data.mutex);
-       init_completion(&priv->mgmt_eth_data.rw_done);
-
-       mutex_init(&priv->mib_eth_data.mutex);
-       init_completion(&priv->mib_eth_data.rw_done);
-
-       priv->ds->dev = &mdiodev->dev;
-       priv->ds->num_ports = QCA8K_NUM_PORTS;
-       priv->ds->priv = priv;
-       priv->ds->ops = &qca8k_switch_ops;
-       mutex_init(&priv->reg_mutex);
-       dev_set_drvdata(&mdiodev->dev, priv);
-
-       return dsa_register_switch(priv->ds);
-}
-
-static void
-qca8k_sw_remove(struct mdio_device *mdiodev)
-{
-       struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
-       int i;
-
-       if (!priv)
-               return;
-
-       for (i = 0; i < QCA8K_NUM_PORTS; i++)
-               qca8k_port_set_status(priv, i, 0);
-
-       dsa_unregister_switch(priv->ds);
-
-       dev_set_drvdata(&mdiodev->dev, NULL);
-}
-
-static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
-{
-       struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
-
-       if (!priv)
-               return;
-
-       dsa_switch_shutdown(priv->ds);
-
-       dev_set_drvdata(&mdiodev->dev, NULL);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static void
-qca8k_set_pm(struct qca8k_priv *priv, int enable)
-{
-       int port;
-
-       for (port = 0; port < QCA8K_NUM_PORTS; port++) {
-               /* Do not enable on resume if the port was
-                * disabled before.
-                */
-               if (!(priv->port_enabled_map & BIT(port)))
-                       continue;
-
-               qca8k_port_set_status(priv, port, enable);
-       }
-}
-
-static int qca8k_suspend(struct device *dev)
-{
-       struct qca8k_priv *priv = dev_get_drvdata(dev);
-
-       qca8k_set_pm(priv, 0);
-
-       return dsa_switch_suspend(priv->ds);
-}
-
-static int qca8k_resume(struct device *dev)
-{
-       struct qca8k_priv *priv = dev_get_drvdata(dev);
-
-       qca8k_set_pm(priv, 1);
-
-       return dsa_switch_resume(priv->ds);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
-                        qca8k_suspend, qca8k_resume);
-
-static const struct qca8k_match_data qca8327 = {
-       .id = QCA8K_ID_QCA8327,
-       .reduced_package = true,
-       .mib_count = QCA8K_QCA832X_MIB_COUNT,
-};
-
-static const struct qca8k_match_data qca8328 = {
-       .id = QCA8K_ID_QCA8327,
-       .mib_count = QCA8K_QCA832X_MIB_COUNT,
-};
-
-static const struct qca8k_match_data qca833x = {
-       .id = QCA8K_ID_QCA8337,
-       .mib_count = QCA8K_QCA833X_MIB_COUNT,
-};
-
-static const struct of_device_id qca8k_of_match[] = {
-       { .compatible = "qca,qca8327", .data = &qca8327 },
-       { .compatible = "qca,qca8328", .data = &qca8328 },
-       { .compatible = "qca,qca8334", .data = &qca833x },
-       { .compatible = "qca,qca8337", .data = &qca833x },
-       { /* sentinel */ },
-};
-
-static struct mdio_driver qca8kmdio_driver = {
-       .probe  = qca8k_sw_probe,
-       .remove = qca8k_sw_remove,
-       .shutdown = qca8k_sw_shutdown,
-       .mdiodrv.driver = {
-               .name = "qca8k",
-               .of_match_table = qca8k_of_match,
-               .pm = &qca8k_pm_ops,
-       },
-};
-
-mdio_module_driver(qca8kmdio_driver);
-
-MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
-MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:qca8k");
index ec58d0e..e36ecc9 100644 (file)
@@ -324,10 +324,20 @@ enum qca8k_mid_cmd {
        QCA8K_MIB_CAST = 3,
 };
 
+struct qca8k_priv;
+
+struct qca8k_info_ops {
+       int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data);
+       /* TODO: remove these extra ops when we can support regmap bulk read/write */
+       int (*read_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
+       int (*write_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
+};
+
 struct qca8k_match_data {
        u8 id;
        bool reduced_package;
        u8 mib_count;
+       const struct qca8k_info_ops *ops;
 };
 
 enum {
@@ -401,6 +411,7 @@ struct qca8k_priv {
        struct qca8k_mdio_cache mdio_cache;
        struct qca8k_pcs pcs_port_0;
        struct qca8k_pcs pcs_port_6;
+       const struct qca8k_match_data *info;
 };
 
 struct qca8k_mib_desc {
@@ -416,4 +427,93 @@ struct qca8k_fdb {
        u8 mac[6];
 };
 
+/* Common setup function */
+extern const struct qca8k_mib_desc ar8327_mib[];
+extern const struct regmap_access_table qca8k_readable_table;
+int qca8k_mib_init(struct qca8k_priv *priv);
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable);
+int qca8k_read_switch_id(struct qca8k_priv *priv);
+
+/* Common read/write/rmw function */
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val);
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val);
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val);
+
+/* Common ops function */
+void qca8k_fdb_flush(struct qca8k_priv *priv);
+
+/* Common ethtool stats function */
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data);
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+                            uint64_t *data);
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* Common eee function */
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+
+/* Common bridge function */
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+                          struct dsa_bridge bridge,
+                          bool *tx_fwd_offload,
+                          struct netlink_ext_ack *extack);
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+                            struct dsa_bridge bridge);
+
+/* Common port enable/disable function */
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+                     struct phy_device *phy);
+void qca8k_port_disable(struct dsa_switch *ds, int port);
+
+/* Common MTU function */
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu);
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port);
+
+/* Common fast age function */
+void qca8k_port_fast_age(struct dsa_switch *ds, int port);
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
+
+/* Common FDB function */
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+                         u16 port_mask, u16 vid);
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+                      const unsigned char *addr, u16 vid,
+                      struct dsa_db db);
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+                      const unsigned char *addr, u16 vid,
+                      struct dsa_db db);
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+                       dsa_fdb_dump_cb_t *cb, void *data);
+
+/* Common MDB function */
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+                      const struct switchdev_obj_port_mdb *mdb,
+                      struct dsa_db db);
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+                      const struct switchdev_obj_port_mdb *mdb,
+                      struct dsa_db db);
+
+/* Common port mirror function */
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+                         struct dsa_mall_mirror_tc_entry *mirror,
+                         bool ingress, struct netlink_ext_ack *extack);
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+                          struct dsa_mall_mirror_tc_entry *mirror);
+
+/* Common port VLAN function */
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+                             struct netlink_ext_ack *extack);
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+                       const struct switchdev_obj_port_vlan *vlan,
+                       struct netlink_ext_ack *extack);
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+                       const struct switchdev_obj_port_vlan *vlan);
+
+/* Common port LAG function */
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+                       struct netdev_lag_upper_info *info);
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+                        struct dsa_lag lag);
+
 #endif /* __QCA8K_H */
index 6b3d4f4..14df8cf 100644 (file)
@@ -20,6 +20,8 @@
 #include "bnxt_ulp.h"
 #include "bnxt_ptp.h"
 #include "bnxt_coredump.h"
+#include "bnxt_nvm_defs.h"
+#include "bnxt_ethtool.h"
 
 static void __bnxt_fw_recover(struct bnxt *bp)
 {
@@ -610,6 +612,63 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
        return rc;
 }
 
+static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+       u32 datalen;
+       u16 index;
+       u8 *buf;
+
+       if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD,
+                                BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+                                &index, NULL, &datalen) || !datalen) {
+               NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error");
+               return false;
+       }
+
+       buf = kzalloc(datalen, GFP_KERNEL);
+       if (!buf) {
+               NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test");
+               return false;
+       }
+
+       if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
+               NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
+               goto err;
+       }
+
+       if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
+                            BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
+               NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
+               goto err;
+       }
+
+       return true;
+
+err:
+       kfree(buf);
+       return false;
+}
+
+static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
+                                  struct netlink_ext_ack *extack)
+{
+       return id == DEVLINK_ATTR_SELFTEST_ID_FLASH;
+}
+
+static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl,
+                                                        unsigned int id,
+                                                        struct netlink_ext_ack *extack)
+{
+       struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+       if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH)
+               return bnxt_nvm_test(bp, extack) ?
+                               DEVLINK_SELFTEST_STATUS_PASS :
+                               DEVLINK_SELFTEST_STATUS_FAIL;
+
+       return DEVLINK_SELFTEST_STATUS_SKIP;
+}
+
 static const struct devlink_ops bnxt_dl_ops = {
 #ifdef CONFIG_BNXT_SRIOV
        .eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -622,6 +681,8 @@ static const struct devlink_ops bnxt_dl_ops = {
        .reload_limits    = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
        .reload_down      = bnxt_dl_reload_down,
        .reload_up        = bnxt_dl_reload_up,
+       .selftest_check   = bnxt_dl_selftest_check,
+       .selftest_run     = bnxt_dl_selftest_run,
 };
 
 static const struct devlink_ops bnxt_vf_dl_ops;
index 7191e5d..87eb536 100644 (file)
@@ -2176,14 +2176,14 @@ static void bnxt_print_admin_err(struct bnxt *bp)
        netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
 }
 
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
-                               u16 ext, u16 *index, u32 *item_length,
-                               u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+                        u16 ext, u16 *index, u32 *item_length,
+                        u32 *data_length);
 
-static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
-                           u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
-                           u32 dir_item_len, const u8 *data,
-                           size_t data_len)
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+                    u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+                    u32 dir_item_len, const u8 *data,
+                    size_t data_len)
 {
        struct bnxt *bp = netdev_priv(dev);
        struct hwrm_nvm_write_input *req;
@@ -2836,8 +2836,8 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
        return rc;
 }
 
-static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
-                              u32 length, u8 *data)
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+                       u32 length, u8 *data)
 {
        struct bnxt *bp = netdev_priv(dev);
        int rc;
@@ -2871,9 +2871,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
        return rc;
 }
 
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
-                               u16 ext, u16 *index, u32 *item_length,
-                               u32 *data_length)
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+                        u16 ext, u16 *index, u32 *item_length,
+                        u32 *data_length)
 {
        struct hwrm_nvm_find_dir_entry_output *output;
        struct hwrm_nvm_find_dir_entry_input *req;
index a592842..a8ecef8 100644 (file)
@@ -58,5 +58,17 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
 void bnxt_ethtool_init(struct bnxt *bp);
 void bnxt_ethtool_free(struct bnxt *bp);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+                        u16 ext, u16 *index, u32 *item_length,
+                        u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+                        u16 ext, u16 *index, u32 *item_length,
+                        u32 *data_length);
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+                    u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+                    u32 dir_item_len, const u8 *data,
+                    size_t data_len);
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+                       u32 length, u8 *data);
 
 #endif
index 0f6a549..29a6c2e 100644 (file)
@@ -142,6 +142,7 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
                         int ref_ok, struct funeth_txq *xdp_q)
 {
        struct bpf_prog *xdp_prog;
+       struct xdp_frame *xdpf;
        struct xdp_buff xdp;
        u32 act;
 
@@ -163,7 +164,9 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
        case XDP_TX:
                if (unlikely(!ref_ok))
                        goto pass;
-               if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
+
+               xdpf = xdp_convert_buff_to_frame(&xdp);
+               if (!xdpf || !fun_xdp_tx(xdp_q, xdpf))
                        goto xdp_error;
                FUN_QSTAT_INC(q, xdp_tx);
                q->xdp_flush |= FUN_XDP_FLUSH_TX;
index a97e3af..54bdeb6 100644 (file)
@@ -487,7 +487,7 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
 
                do {
                        fun_xdp_unmap(q, reclaim_idx);
-                       page_frag_free(q->info[reclaim_idx].vaddr);
+                       xdp_return_frame(q->info[reclaim_idx].xdpf);
 
                        trace_funeth_tx_free(q, reclaim_idx, 1, head);
 
@@ -500,11 +500,11 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
        return npkts;
 }
 
-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
 {
        struct fun_eth_tx_req *req;
        struct fun_dataop_gl *gle;
-       unsigned int idx;
+       unsigned int idx, len;
        dma_addr_t dma;
 
        if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
@@ -515,7 +515,8 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
                return false;
        }
 
-       dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);
+       len = xdpf->len;
+       dma = dma_map_single(q->dma_dev, xdpf->data, len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
                FUN_QSTAT_INC(q, tx_map_err);
                return false;
@@ -535,7 +536,7 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
        gle = (struct fun_dataop_gl *)req->dataop.imm;
        fun_dataop_gl_init(gle, 0, 0, len, dma);
 
-       q->info[idx].vaddr = data;
+       q->info[idx].xdpf = xdpf;
 
        u64_stats_update_begin(&q->syncp);
        q->stats.tx_bytes += len;
@@ -566,12 +567,9 @@ int fun_xdp_xmit_frames(struct net_device *dev, int n,
        if (unlikely(q_idx >= fp->num_xdpqs))
                return -ENXIO;
 
-       for (q = xdpqs[q_idx], i = 0; i < n; i++) {
-               const struct xdp_frame *xdpf = frames[i];
-
-               if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
+       for (q = xdpqs[q_idx], i = 0; i < n; i++)
+               if (!fun_xdp_tx(q, frames[i]))
                        break;
-       }
 
        if (unlikely(flags & XDP_XMIT_FLUSH))
                fun_txq_wr_db(q);
@@ -598,7 +596,7 @@ static void fun_xdpq_purge(struct funeth_txq *q)
                unsigned int idx = q->cons_cnt & q->mask;
 
                fun_xdp_unmap(q, idx);
-               page_frag_free(q->info[idx].vaddr);
+               xdp_return_frame(q->info[idx].xdpf);
                q->cons_cnt++;
        }
 }
index 1711f82..53b7e95 100644 (file)
@@ -96,8 +96,8 @@ struct funeth_txq_stats {  /* per Tx queue SW counters */
 
 struct funeth_tx_info {      /* per Tx descriptor state */
        union {
-               struct sk_buff *skb; /* associated packet */
-               void *vaddr;         /* start address for XDP */
+               struct sk_buff *skb;    /* associated packet (sk_buff path) */
+               struct xdp_frame *xdpf; /* associated XDP frame (XDP path) */
        };
 };
 
@@ -246,7 +246,7 @@ static inline int fun_irq_node(const struct fun_irq *p)
 int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
 int fun_txq_napi_poll(struct napi_struct *napi, int budget);
 netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len);
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf);
 int fun_xdp_xmit_frames(struct net_device *dev, int n,
                        struct xdp_frame **frames, u32 flags);
 
index fb9f476..b36bf9c 100644 (file)
@@ -2033,11 +2033,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                 * non-zero req_queue_pairs says that user requested a new
                 * queue count via ethtool's set_channels, so use this
                 * value for queues distribution across traffic classes
+                * We need at least one queue pair for the interface
+                * to be usable as we see in else statement.
                 */
                if (vsi->req_queue_pairs > 0)
                        vsi->num_queue_pairs = vsi->req_queue_pairs;
                else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
                        vsi->num_queue_pairs = pf->num_lan_msix;
+               else
+                       vsi->num_queue_pairs = 1;
        }
 
        /* Number of queues per enabled TC */
index 340dc5a..a6fff8e 100644 (file)
@@ -658,7 +658,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
                rx_desc = ICE_RX_DESC(rx_ring, i);
 
                if (!(rx_desc->wb.status_error0 &
-                   cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
+                   (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
+                    cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
                        continue;
 
                rx_buf = &rx_ring->rx_buf[i];
index cbbbb67..9a3b14d 100644 (file)
@@ -4657,6 +4657,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
                ice_set_safe_mode_caps(hw);
        }
 
+       hw->ucast_shared = true;
+
        err = ice_init_pf(pf);
        if (err) {
                dev_err(dev, "ice_init_pf failed: %d\n", err);
@@ -6033,10 +6035,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
        if (vsi->netdev) {
                ice_set_rx_mode(vsi->netdev);
 
-               err = ice_vsi_vlan_setup(vsi);
+               if (vsi->type != ICE_VSI_LB) {
+                       err = ice_vsi_vlan_setup(vsi);
 
-               if (err)
-                       return err;
+                       if (err)
+                               return err;
+               }
        }
        ice_vsi_cfg_dcb_rings(vsi);
 
index 86093b2..3ba1408 100644 (file)
@@ -1309,39 +1309,6 @@ out_put_vf:
        return ret;
 }
 
-/**
- * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
- * @pf: PF used to reference the switch's rules
- * @umac: unicast MAC to compare against existing switch rules
- *
- * Return true on the first/any match, else return false
- */
-static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
-{
-       struct ice_sw_recipe *mac_recipe_list =
-               &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
-       struct ice_fltr_mgmt_list_entry *list_itr;
-       struct list_head *rule_head;
-       struct mutex *rule_lock; /* protect MAC filter list access */
-
-       rule_head = &mac_recipe_list->filt_rules;
-       rule_lock = &mac_recipe_list->filt_rule_lock;
-
-       mutex_lock(rule_lock);
-       list_for_each_entry(list_itr, rule_head, list_entry) {
-               u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
-
-               if (ether_addr_equal(existing_mac, umac)) {
-                       mutex_unlock(rule_lock);
-                       return true;
-               }
-       }
-
-       mutex_unlock(rule_lock);
-
-       return false;
-}
-
 /**
  * ice_set_vf_mac
  * @netdev: network interface device structure
@@ -1376,13 +1343,6 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        if (ret)
                goto out_put_vf;
 
-       if (ice_unicast_mac_exists(pf, mac)) {
-               netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
-                          mac, vf_id, mac);
-               ret = -EINVAL;
-               goto out_put_vf;
-       }
-
        mutex_lock(&vf->cfg_lock);
 
        /* VF is notified of its new MAC via the PF's response to the
index 3f8b727..836dce8 100644 (file)
@@ -1751,11 +1751,13 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
 
        protocol = vlan_get_protocol(skb);
 
-       if (eth_p_mpls(protocol))
+       if (eth_p_mpls(protocol)) {
                ip.hdr = skb_inner_network_header(skb);
-       else
+               l4.hdr = skb_checksum_start(skb);
+       } else {
                ip.hdr = skb_network_header(skb);
-       l4.hdr = skb_checksum_start(skb);
+               l4.hdr = skb_transport_header(skb);
+       }
 
        /* compute outer L2 header size */
        l2_len = ip.hdr - skb->data;
index d46786c..094e3c9 100644 (file)
@@ -2972,7 +2972,8 @@ ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
                                     struct virtchnl_vlan_filtering_caps *vfc,
                                     struct virtchnl_vlan_filter_list_v2 *vfl)
 {
-       u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
+       u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
+               vfl->num_elements;
 
        if (num_requested_filters > vfc->max_filters)
                return false;
index 28b1994..e64318c 100644 (file)
@@ -28,6 +28,9 @@
 #define MAX_RATE_EXPONENT              0x0FULL
 #define MAX_RATE_MANTISSA              0xFFULL
 
+#define CN10K_MAX_BURST_MANTISSA       0x7FFFULL
+#define CN10K_MAX_BURST_SIZE           8453888ULL
+
 /* Bitfields in NIX_TLX_PIR register */
 #define TLX_RATE_MANTISSA              GENMASK_ULL(8, 1)
 #define TLX_RATE_EXPONENT              GENMASK_ULL(12, 9)
@@ -35,6 +38,9 @@
 #define TLX_BURST_MANTISSA             GENMASK_ULL(36, 29)
 #define TLX_BURST_EXPONENT             GENMASK_ULL(40, 37)
 
+#define CN10K_TLX_BURST_MANTISSA       GENMASK_ULL(43, 29)
+#define CN10K_TLX_BURST_EXPONENT       GENMASK_ULL(47, 44)
+
 struct otx2_tc_flow_stats {
        u64 bytes;
        u64 pkts;
@@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
 }
 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
 
-static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
-                                     u32 *burst_mantissa)
+static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+                                     u32 *burst_exp, u32 *burst_mantissa)
 {
+       int max_burst, max_mantissa;
        unsigned int tmp;
 
+       if (is_dev_otx2(nic->pdev)) {
+               max_burst = MAX_BURST_SIZE;
+               max_mantissa = MAX_BURST_MANTISSA;
+       } else {
+               max_burst = CN10K_MAX_BURST_SIZE;
+               max_mantissa = CN10K_MAX_BURST_MANTISSA;
+       }
+
        /* Burst is calculated as
         * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
         * Max supported burst size is 130,816 bytes.
         */
-       burst = min_t(u32, burst, MAX_BURST_SIZE);
+       burst = min_t(u32, burst, max_burst);
        if (burst) {
                *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
                tmp = burst - rounddown_pow_of_two(burst);
-               if (burst < MAX_BURST_MANTISSA)
+               if (burst < max_mantissa)
                        *burst_mantissa = tmp * 2;
                else
                        *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
        } else {
                *burst_exp = MAX_BURST_EXPONENT;
-               *burst_mantissa = MAX_BURST_MANTISSA;
+               *burst_mantissa = max_mantissa;
        }
 }
 
-static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
+static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
                                     u32 *mantissa, u32 *div_exp)
 {
-       unsigned int tmp;
+       u64 tmp;
 
        /* Rate calculation by hardware
         *
@@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
        }
 }
 
-static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
+static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
+                                      u64 maxrate, u32 burst)
 {
-       struct otx2_hw *hw = &nic->hw;
-       struct nix_txschq_config *req;
        u32 burst_exp, burst_mantissa;
        u32 exp, mantissa, div_exp;
+       u64 regval = 0;
+
+       /* Get exponent and mantissa values from the desired rate */
+       otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
+       otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+       if (is_dev_otx2(nic->pdev)) {
+               regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
+                               FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+                               FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+                               FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+                               FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+       } else {
+               regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
+                               FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+                               FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+                               FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+                               FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+       }
+
+       return regval;
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
+                                        u32 burst, u64 maxrate)
+{
+       struct otx2_hw *hw = &nic->hw;
+       struct nix_txschq_config *req;
        int txschq, err;
 
        /* All SQs share the same TL4, so pick the first scheduler */
        txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
 
-       /* Get exponent and mantissa values from the desired rate */
-       otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
-       otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
-
        mutex_lock(&nic->mbox.lock);
        req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
        if (!req) {
@@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma
        req->lvl = NIX_TXSCH_LVL_TL4;
        req->num_regs = 1;
        req->reg[0] = NIX_AF_TL4X_PIR(txschq);
-       req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
-                        FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
-                        FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
-                        FIELD_PREP(TLX_RATE_EXPONENT, exp) |
-                        FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+       req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
 
        err = otx2_sync_mbox_msg(&nic->mbox);
        mutex_unlock(&nic->mbox.lock);
@@ -230,7 +264,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
        struct netlink_ext_ack *extack = cls->common.extack;
        struct flow_action *actions = &cls->rule->action;
        struct flow_action_entry *entry;
-       u32 rate;
+       u64 rate;
        int err;
 
        err = otx2_tc_validate_flow(nic, actions, extack);
@@ -256,7 +290,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
                }
                /* Convert bytes per second to Mbps */
                rate = entry->police.rate_bytes_ps * 8;
-               rate = max_t(u32, rate / 1000000, 1);
+               rate = max_t(u64, rate / 1000000, 1);
                err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
                if (err)
                        return err;
@@ -614,21 +648,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
 
                flow_spec->dport = match.key->dst;
                flow_mask->dport = match.mask->dst;
-               if (ip_proto == IPPROTO_UDP)
-                       req->features |= BIT_ULL(NPC_DPORT_UDP);
-               else if (ip_proto == IPPROTO_TCP)
-                       req->features |= BIT_ULL(NPC_DPORT_TCP);
-               else if (ip_proto == IPPROTO_SCTP)
-                       req->features |= BIT_ULL(NPC_DPORT_SCTP);
+
+               if (flow_mask->dport) {
+                       if (ip_proto == IPPROTO_UDP)
+                               req->features |= BIT_ULL(NPC_DPORT_UDP);
+                       else if (ip_proto == IPPROTO_TCP)
+                               req->features |= BIT_ULL(NPC_DPORT_TCP);
+                       else if (ip_proto == IPPROTO_SCTP)
+                               req->features |= BIT_ULL(NPC_DPORT_SCTP);
+               }
 
                flow_spec->sport = match.key->src;
                flow_mask->sport = match.mask->src;
-               if (ip_proto == IPPROTO_UDP)
-                       req->features |= BIT_ULL(NPC_SPORT_UDP);
-               else if (ip_proto == IPPROTO_TCP)
-                       req->features |= BIT_ULL(NPC_SPORT_TCP);
-               else if (ip_proto == IPPROTO_SCTP)
-                       req->features |= BIT_ULL(NPC_SPORT_SCTP);
+
+               if (flow_mask->sport) {
+                       if (ip_proto == IPPROTO_UDP)
+                               req->features |= BIT_ULL(NPC_SPORT_UDP);
+                       else if (ip_proto == IPPROTO_TCP)
+                               req->features |= BIT_ULL(NPC_SPORT_TCP);
+                       else if (ip_proto == IPPROTO_SCTP)
+                               req->features |= BIT_ULL(NPC_SPORT_SCTP);
+               }
        }
 
        return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
index c267ca1..4b64bda 100644 (file)
@@ -847,7 +847,7 @@ static void prestera_event_handlers_unregister(struct prestera_switch *sw)
 static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
 {
        struct device_node *base_mac_np;
-       int ret;
+       int ret = 0;
 
        if (sw->np) {
                base_mac_np = of_parse_phandle(sw->np, "base-mac-provider", 0);
index c370d65..d9426b0 100644 (file)
@@ -1001,7 +1001,7 @@ static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
 }
 
 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
-                        bool napi)
+                        struct xdp_frame_bulk *bq, bool napi)
 {
        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
                if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
@@ -1031,23 +1031,24 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
                }
        }
 
-       if (tx_buf->type == MTK_TYPE_SKB) {
-               if (tx_buf->data &&
-                   tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+       if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+               if (tx_buf->type == MTK_TYPE_SKB) {
                        struct sk_buff *skb = tx_buf->data;
 
                        if (napi)
                                napi_consume_skb(skb, napi);
                        else
                                dev_kfree_skb_any(skb);
-               }
-       } else if (tx_buf->data) {
-               struct xdp_frame *xdpf = tx_buf->data;
+               } else {
+                       struct xdp_frame *xdpf = tx_buf->data;
 
-               if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
-                       xdp_return_frame_rx_napi(xdpf);
-               else
-                       xdp_return_frame(xdpf);
+                       if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+                               xdp_return_frame_rx_napi(xdpf);
+                       else if (bq)
+                               xdp_return_frame_bulk(xdpf, bq);
+                       else
+                               xdp_return_frame(xdpf);
+               }
        }
        tx_buf->flags = 0;
        tx_buf->data = NULL;
@@ -1297,7 +1298,7 @@ err_dma:
                tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
 
                /* unmap dma */
-               mtk_tx_unmap(eth, tx_buf, false);
+               mtk_tx_unmap(eth, tx_buf, NULL, false);
 
                itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
                if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
@@ -1523,68 +1524,112 @@ static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
                skb_free_frag(data);
 }
 
+static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
+                            struct mtk_tx_dma_desc_info *txd_info,
+                            struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
+                            void *data, u16 headroom, int index, bool dma_map)
+{
+       struct mtk_tx_ring *ring = &eth->tx_ring;
+       struct mtk_mac *mac = netdev_priv(dev);
+       struct mtk_tx_dma *txd_pdma;
+
+       if (dma_map) {  /* ndo_xdp_xmit */
+               txd_info->addr = dma_map_single(eth->dma_dev, data,
+                                               txd_info->size, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
+                       return -ENOMEM;
+
+               tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+       } else {
+               struct page *page = virt_to_head_page(data);
+
+               txd_info->addr = page_pool_get_dma_addr(page) +
+                                sizeof(struct xdp_frame) + headroom;
+               dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
+                                          txd_info->size, DMA_BIDIRECTIONAL);
+       }
+       mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+       tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+       tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+       tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+
+       txd_pdma = qdma_to_pdma(ring, txd);
+       setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+                    index);
+
+       return 0;
+}
+
 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
                                struct net_device *dev, bool dma_map)
 {
+       struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
        const struct mtk_soc_data *soc = eth->soc;
        struct mtk_tx_ring *ring = &eth->tx_ring;
        struct mtk_tx_dma_desc_info txd_info = {
                .size   = xdpf->len,
                .first  = true,
-               .last   = true,
+               .last   = !xdp_frame_has_frags(xdpf),
        };
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_tx_dma *txd, *txd_pdma;
-       int err = 0, index = 0, n_desc = 1;
-       struct mtk_tx_buf *tx_buf;
+       int err, index = 0, n_desc = 1, nr_frags;
+       struct mtk_tx_dma *htxd, *txd, *txd_pdma;
+       struct mtk_tx_buf *htx_buf, *tx_buf;
+       void *data = xdpf->data;
 
        if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
                return -EBUSY;
 
-       if (unlikely(atomic_read(&ring->free_count) <= 1))
+       nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+       if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
                return -EBUSY;
 
        spin_lock(&eth->page_lock);
 
        txd = ring->next_free;
        if (txd == ring->last_free) {
-               err = -ENOMEM;
-               goto out;
+               spin_unlock(&eth->page_lock);
+               return -ENOMEM;
        }
+       htxd = txd;
 
        tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
        memset(tx_buf, 0, sizeof(*tx_buf));
+       htx_buf = tx_buf;
 
-       if (dma_map) {  /* ndo_xdp_xmit */
-               txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data,
-                                              txd_info.size, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) {
-                       err = -ENOMEM;
-                       goto out;
-               }
-               tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-       } else {
-               struct page *page = virt_to_head_page(xdpf->data);
+       for (;;) {
+               err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+                                       data, xdpf->headroom, index, dma_map);
+               if (err < 0)
+                       goto unmap;
 
-               txd_info.addr = page_pool_get_dma_addr(page) +
-                               sizeof(*xdpf) + xdpf->headroom;
-               dma_sync_single_for_device(eth->dma_dev, txd_info.addr,
-                                          txd_info.size,
-                                          DMA_BIDIRECTIONAL);
-       }
-       mtk_tx_set_dma_desc(dev, txd, &txd_info);
+               if (txd_info.last)
+                       break;
 
-       tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+               if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
+                       txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+                       txd_pdma = qdma_to_pdma(ring, txd);
+                       if (txd == ring->last_free)
+                               goto unmap;
 
-       txd_pdma = qdma_to_pdma(ring, txd);
-       setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size,
-                    index++);
+                       tx_buf = mtk_desc_to_tx_buf(ring, txd,
+                                                   soc->txrx.txd_size);
+                       memset(tx_buf, 0, sizeof(*tx_buf));
+                       n_desc++;
+               }
+
+               memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+               txd_info.size = skb_frag_size(&sinfo->frags[index]);
+               txd_info.last = index + 1 == nr_frags;
+               data = skb_frag_address(&sinfo->frags[index]);
 
+               index++;
+       }
        /* store xdpf for cleanup */
-       tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
-       tx_buf->data = xdpf;
+       htx_buf->data = xdpf;
 
        if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+               txd_pdma = qdma_to_pdma(ring, txd);
                if (index & 1)
                        txd_pdma->txd2 |= TX_DMA_LS0;
                else
@@ -1608,7 +1653,24 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
                mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
                        MT7628_TX_CTX_IDX0);
        }
-out:
+
+       spin_unlock(&eth->page_lock);
+
+       return 0;
+
+unmap:
+       while (htxd != txd) {
+               txd_pdma = qdma_to_pdma(ring, htxd);
+               tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+               mtk_tx_unmap(eth, tx_buf, NULL, false);
+
+               htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+               if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+                       txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
+               htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
+       }
+
        spin_unlock(&eth->page_lock);
 
        return err;
@@ -1913,6 +1975,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
        const struct mtk_reg_map *reg_map = eth->soc->reg_map;
        struct mtk_tx_ring *ring = &eth->tx_ring;
        struct mtk_tx_buf *tx_buf;
+       struct xdp_frame_bulk bq;
        struct mtk_tx_dma *desc;
        u32 cpu, dma;
 
@@ -1920,6 +1983,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
        dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
 
        desc = mtk_qdma_phys_to_virt(ring, cpu);
+       xdp_frame_bulk_init(&bq);
 
        while ((cpu != dma) && budget) {
                u32 next_cpu = desc->txd2;
@@ -1937,25 +2001,23 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
                if (!tx_buf->data)
                        break;
 
-               if (tx_buf->type == MTK_TYPE_SKB &&
-                   tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
-                       struct sk_buff *skb = tx_buf->data;
+               if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+                       if (tx_buf->type == MTK_TYPE_SKB) {
+                               struct sk_buff *skb = tx_buf->data;
 
-                       bytes[mac] += skb->len;
-                       done[mac]++;
-                       budget--;
-               } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
-                          tx_buf->type == MTK_TYPE_XDP_NDO) {
+                               bytes[mac] += skb->len;
+                               done[mac]++;
+                       }
                        budget--;
                }
-
-               mtk_tx_unmap(eth, tx_buf, true);
+               mtk_tx_unmap(eth, tx_buf, &bq, true);
 
                ring->last_free = desc;
                atomic_inc(&ring->free_count);
 
                cpu = next_cpu;
        }
+       xdp_flush_frame_bulk(&bq);
 
        ring->last_free_ptr = cpu;
        mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
@@ -1968,29 +2030,29 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
 {
        struct mtk_tx_ring *ring = &eth->tx_ring;
        struct mtk_tx_buf *tx_buf;
+       struct xdp_frame_bulk bq;
        struct mtk_tx_dma *desc;
        u32 cpu, dma;
 
        cpu = ring->cpu_idx;
        dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+       xdp_frame_bulk_init(&bq);
 
        while ((cpu != dma) && budget) {
                tx_buf = &ring->buf[cpu];
                if (!tx_buf->data)
                        break;
 
-               if (tx_buf->type == MTK_TYPE_SKB &&
-                   tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
-                       struct sk_buff *skb = tx_buf->data;
-                       bytes[0] += skb->len;
-                       done[0]++;
-                       budget--;
-               } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
-                          tx_buf->type == MTK_TYPE_XDP_NDO) {
+               if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+                       if (tx_buf->type == MTK_TYPE_SKB) {
+                               struct sk_buff *skb = tx_buf->data;
+
+                               bytes[0] += skb->len;
+                               done[0]++;
+                       }
                        budget--;
                }
-
-               mtk_tx_unmap(eth, tx_buf, true);
+               mtk_tx_unmap(eth, tx_buf, &bq, true);
 
                desc = ring->dma + cpu * eth->soc->txrx.txd_size;
                ring->last_free = desc;
@@ -1998,6 +2060,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
 
                cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
        }
+       xdp_flush_frame_bulk(&bq);
 
        ring->cpu_idx = cpu;
 
@@ -2207,7 +2270,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
 
        if (ring->buf) {
                for (i = 0; i < MTK_DMA_SIZE; i++)
-                       mtk_tx_unmap(eth, &ring->buf[i], false);
+                       mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
                kfree(ring->buf);
                ring->buf = NULL;
        }
index 5b11557..0eb7b83 100644 (file)
@@ -204,9 +204,13 @@ out:
 
 static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
 {
+       struct mlx4_dev *dev = persist->dev;
+       struct devlink *devlink;
        int err = 0;
 
        mlx4_enter_error_state(persist);
+       devlink = priv_to_devlink(mlx4_priv(dev));
+       devl_lock(devlink);
        mutex_lock(&persist->interface_state_mutex);
        if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
            !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
@@ -215,6 +219,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
                          err);
        }
        mutex_unlock(&persist->interface_state_mutex);
+       devl_unlock(devlink);
 }
 
 static void dump_err_buf(struct mlx4_dev *dev)
index ac5468b..82a07a3 100644 (file)
@@ -226,10 +226,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
 
        /* Create cr-space region */
        crdump->region_crspace =
-               devlink_region_create(devlink,
-                                     &region_cr_space_ops,
-                                     MAX_NUM_OF_DUMPS_TO_STORE,
-                                     pci_resource_len(pdev, 0));
+               devl_region_create(devlink,
+                                  &region_cr_space_ops,
+                                  MAX_NUM_OF_DUMPS_TO_STORE,
+                                  pci_resource_len(pdev, 0));
        if (IS_ERR(crdump->region_crspace))
                mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
                          region_cr_space_str,
@@ -237,10 +237,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
 
        /* Create fw-health region */
        crdump->region_fw_health =
-               devlink_region_create(devlink,
-                                     &region_fw_health_ops,
-                                     MAX_NUM_OF_DUMPS_TO_STORE,
-                                     HEALTH_BUFFER_SIZE);
+               devl_region_create(devlink,
+                                  &region_fw_health_ops,
+                                  MAX_NUM_OF_DUMPS_TO_STORE,
+                                  HEALTH_BUFFER_SIZE);
        if (IS_ERR(crdump->region_fw_health))
                mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
                          region_fw_health_str,
@@ -253,6 +253,6 @@ void mlx4_crdump_end(struct mlx4_dev *dev)
 {
        struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
 
-       devlink_region_destroy(crdump->region_fw_health);
-       devlink_region_destroy(crdump->region_crspace);
+       devl_region_destroy(crdump->region_fw_health);
+       devl_region_destroy(crdump->region_crspace);
 }
index b187c21..2c764d1 100644 (file)
@@ -3033,7 +3033,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
        int err;
 
-       err = devlink_port_register(devlink, &info->devlink_port, port);
+       err = devl_port_register(devlink, &info->devlink_port, port);
        if (err)
                return err;
 
@@ -3071,7 +3071,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
        err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
        if (err) {
                mlx4_err(dev, "Failed to create file for port %d\n", port);
-               devlink_port_unregister(&info->devlink_port);
+               devl_port_unregister(&info->devlink_port);
                info->port = -1;
                return err;
        }
@@ -3093,7 +3093,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
                mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
                device_remove_file(&info->dev->persist->pdev->dev,
                                   &info->port_attr);
-               devlink_port_unregister(&info->devlink_port);
+               devl_port_unregister(&info->devlink_port);
                info->port = -1;
                return err;
        }
@@ -3109,7 +3109,7 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
        device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
        device_remove_file(&info->dev->persist->pdev->dev,
                           &info->port_mtu_attr);
-       devlink_port_unregister(&info->devlink_port);
+       devl_port_unregister(&info->devlink_port);
 
 #ifdef CONFIG_RFS_ACCEL
        free_irq_cpu_rmap(info->rmap);
@@ -3333,6 +3333,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
                         int total_vfs, int *nvfs, struct mlx4_priv *priv,
                         int reset_flow)
 {
+       struct devlink *devlink = priv_to_devlink(priv);
        struct mlx4_dev *dev;
        unsigned sum = 0;
        int err;
@@ -3341,6 +3342,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
        struct mlx4_dev_cap *dev_cap = NULL;
        int existing_vfs = 0;
 
+       devl_assert_locked(devlink);
        dev = &priv->dev;
 
        INIT_LIST_HEAD(&priv->ctx_list);
@@ -3956,9 +3958,11 @@ static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
                NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported");
                return -EOPNOTSUPP;
        }
+       devl_lock(devlink);
        if (persist->num_vfs)
                mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
        mlx4_restart_one_down(persist->pdev);
+       devl_unlock(devlink);
        return 0;
 }
 
@@ -3971,8 +3975,10 @@ static int mlx4_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
        struct mlx4_dev_persistent *persist = dev->persist;
        int err;
 
+       devl_lock(devlink);
        *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
        err = mlx4_restart_one_up(persist->pdev, true, devlink);
+       devl_unlock(devlink);
        if (err)
                mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n",
                         err);
@@ -3999,6 +4005,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
        if (!devlink)
                return -ENOMEM;
+       devl_lock(devlink);
        priv = devlink_priv(devlink);
 
        dev       = &priv->dev;
@@ -4026,6 +4033,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        pci_save_state(pdev);
        devlink_set_features(devlink, DEVLINK_F_RELOAD);
+       devl_unlock(devlink);
        devlink_register(devlink);
        return 0;
 
@@ -4035,6 +4043,7 @@ err_params_unregister:
 err_devlink_unregister:
        kfree(dev->persist);
 err_devlink_free:
+       devl_unlock(devlink);
        devlink_free(devlink);
        return ret;
 }
@@ -4056,8 +4065,11 @@ static void mlx4_unload_one(struct pci_dev *pdev)
        struct mlx4_dev  *dev  = persist->dev;
        struct mlx4_priv *priv = mlx4_priv(dev);
        int               pci_dev_data;
+       struct devlink *devlink;
        int p, i;
 
+       devlink = priv_to_devlink(priv);
+       devl_assert_locked(devlink);
        if (priv->removed)
                return;
 
@@ -4137,6 +4149,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
 
        devlink_unregister(devlink);
 
+       devl_lock(devlink);
        if (mlx4_is_slave(dev))
                persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
 
@@ -4172,6 +4185,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        devlink_params_unregister(devlink, mlx4_devlink_params,
                                  ARRAY_SIZE(mlx4_devlink_params));
        kfree(dev->persist);
+       devl_unlock(devlink);
        devlink_free(devlink);
 }
 
@@ -4292,15 +4306,20 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
                                              pci_channel_state_t state)
 {
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+       struct mlx4_dev *dev = persist->dev;
+       struct devlink *devlink;
 
        mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
        mlx4_enter_error_state(persist);
 
+       devlink = priv_to_devlink(mlx4_priv(dev));
+       devl_lock(devlink);
        mutex_lock(&persist->interface_state_mutex);
        if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
                mlx4_unload_one(pdev);
 
        mutex_unlock(&persist->interface_state_mutex);
+       devl_unlock(devlink);
        if (state == pci_channel_io_perm_failure)
                return PCI_ERS_RESULT_DISCONNECT;
 
@@ -4333,6 +4352,7 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
        struct mlx4_dev  *dev  = persist->dev;
        struct mlx4_priv *priv = mlx4_priv(dev);
        int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+       struct devlink *devlink;
        int total_vfs;
        int err;
 
@@ -4340,6 +4360,8 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
        total_vfs = dev->persist->num_vfs;
        memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
 
+       devlink = priv_to_devlink(priv);
+       devl_lock(devlink);
        mutex_lock(&persist->interface_state_mutex);
        if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
                err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
@@ -4358,19 +4380,23 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
        }
 end:
        mutex_unlock(&persist->interface_state_mutex);
-
+       devl_unlock(devlink);
 }
 
 static void mlx4_shutdown(struct pci_dev *pdev)
 {
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
        struct mlx4_dev *dev = persist->dev;
+       struct devlink *devlink;
 
        mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+       devlink = priv_to_devlink(mlx4_priv(dev));
+       devl_lock(devlink);
        mutex_lock(&persist->interface_state_mutex);
        if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
                mlx4_unload_one(pdev);
        mutex_unlock(&persist->interface_state_mutex);
+       devl_unlock(devlink);
        mlx4_pci_disable_device(dev);
 }
 
@@ -4385,12 +4411,16 @@ static int __maybe_unused mlx4_suspend(struct device *dev_d)
        struct pci_dev *pdev = to_pci_dev(dev_d);
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
        struct mlx4_dev *dev = persist->dev;
+       struct devlink *devlink;
 
        mlx4_err(dev, "suspend was called\n");
+       devlink = priv_to_devlink(mlx4_priv(dev));
+       devl_lock(devlink);
        mutex_lock(&persist->interface_state_mutex);
        if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
                mlx4_unload_one(pdev);
        mutex_unlock(&persist->interface_state_mutex);
+       devl_unlock(devlink);
 
        return 0;
 }
@@ -4402,6 +4432,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
        struct mlx4_dev *dev = persist->dev;
        struct mlx4_priv *priv = mlx4_priv(dev);
        int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+       struct devlink *devlink;
        int total_vfs;
        int ret = 0;
 
@@ -4409,6 +4440,8 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
        total_vfs = dev->persist->num_vfs;
        memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
 
+       devlink = priv_to_devlink(priv);
+       devl_lock(devlink);
        mutex_lock(&persist->interface_state_mutex);
        if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
                ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
@@ -4422,6 +4455,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
                }
        }
        mutex_unlock(&persist->interface_state_mutex);
+       devl_unlock(devlink);
 
        return ret;
 }
index ccf2068..0571e40 100644 (file)
@@ -335,13 +335,12 @@ static void del_adev(struct auxiliary_device *adev)
 
 int mlx5_attach_device(struct mlx5_core_dev *dev)
 {
-       struct devlink *devlink = priv_to_devlink(dev);
        struct mlx5_priv *priv = &dev->priv;
        struct auxiliary_device *adev;
        struct auxiliary_driver *adrv;
        int ret = 0, i;
 
-       devl_lock(devlink);
+       devl_assert_locked(priv_to_devlink(dev));
        mutex_lock(&mlx5_intf_mutex);
        priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
        priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
@@ -394,20 +393,18 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
        }
        priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
        mutex_unlock(&mlx5_intf_mutex);
-       devl_unlock(devlink);
        return ret;
 }
 
 void mlx5_detach_device(struct mlx5_core_dev *dev)
 {
-       struct devlink *devlink = priv_to_devlink(dev);
        struct mlx5_priv *priv = &dev->priv;
        struct auxiliary_device *adev;
        struct auxiliary_driver *adrv;
        pm_message_t pm = {};
        int i;
 
-       devl_lock(devlink);
+       devl_assert_locked(priv_to_devlink(dev));
        mutex_lock(&mlx5_intf_mutex);
        priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
        for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
@@ -441,21 +438,17 @@ skip_suspend:
        priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
        priv->flags |= MLX5_PRIV_FLAGS_DETACH;
        mutex_unlock(&mlx5_intf_mutex);
-       devl_unlock(devlink);
 }
 
 int mlx5_register_device(struct mlx5_core_dev *dev)
 {
-       struct devlink *devlink;
        int ret;
 
-       devlink = priv_to_devlink(dev);
-       devl_lock(devlink);
+       devl_assert_locked(priv_to_devlink(dev));
        mutex_lock(&mlx5_intf_mutex);
        dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
        ret = mlx5_rescan_drivers_locked(dev);
        mutex_unlock(&mlx5_intf_mutex);
-       devl_unlock(devlink);
        if (ret)
                mlx5_unregister_device(dev);
 
@@ -464,15 +457,11 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
 
 void mlx5_unregister_device(struct mlx5_core_dev *dev)
 {
-       struct devlink *devlink;
-
-       devlink = priv_to_devlink(dev);
-       devl_lock(devlink);
+       devl_assert_locked(priv_to_devlink(dev));
        mutex_lock(&mlx5_intf_mutex);
        dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
        mlx5_rescan_drivers_locked(dev);
        mutex_unlock(&mlx5_intf_mutex);
-       devl_unlock(devlink);
 }
 
 static int add_drivers(struct mlx5_core_dev *dev)
index f85166e..1c05a70 100644 (file)
@@ -104,7 +104,16 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
        if (err)
                return err;
 
-       return mlx5_fw_reset_wait_reset_done(dev);
+       err = mlx5_fw_reset_wait_reset_done(dev);
+       if (err)
+               return err;
+
+       mlx5_unload_one_devl_locked(dev);
+       err = mlx5_health_wait_pci_up(dev);
+       if (err)
+               NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
+
+       return err;
 }
 
 static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
@@ -134,6 +143,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct pci_dev *pdev = dev->pdev;
        bool sf_dev_allocated;
+       int ret = 0;
 
        sf_dev_allocated = mlx5_sf_dev_allocated(dev);
        if (sf_dev_allocated) {
@@ -154,19 +164,25 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
                NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
        }
 
+       devl_lock(devlink);
        switch (action) {
        case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
-               mlx5_unload_one(dev);
-               return 0;
+               mlx5_unload_one_devl_locked(dev);
+               break;
        case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
                if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
-                       return mlx5_devlink_trigger_fw_live_patch(devlink, extack);
-               return mlx5_devlink_reload_fw_activate(devlink, extack);
+                       ret = mlx5_devlink_trigger_fw_live_patch(devlink, extack);
+               else
+                       ret = mlx5_devlink_reload_fw_activate(devlink, extack);
+               break;
        default:
                /* Unsupported action should not get to this function */
                WARN_ON(1);
-               return -EOPNOTSUPP;
+               ret = -EOPNOTSUPP;
        }
+
+       devl_unlock(devlink);
+       return ret;
 }
 
 static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
@@ -174,24 +190,29 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
                                  struct netlink_ext_ack *extack)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
+       int ret = 0;
 
+       devl_lock(devlink);
        *actions_performed = BIT(action);
        switch (action) {
        case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
-               return mlx5_load_one(dev, false);
+               ret = mlx5_load_one_devl_locked(dev, false);
+               break;
        case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
                if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
                        break;
                /* On fw_activate action, also driver is reloaded and reinit performed */
                *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
-               return mlx5_load_one(dev, false);
+               ret = mlx5_load_one_devl_locked(dev, false);
+               break;
        default:
                /* Unsupported action should not get to this function */
                WARN_ON(1);
-               return -EOPNOTSUPP;
+               ret = -EOPNOTSUPP;
        }
 
-       return 0;
+       devl_unlock(devlink);
+       return ret;
 }
 
 static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
@@ -828,28 +849,28 @@ static int mlx5_devlink_traps_register(struct devlink *devlink)
        struct mlx5_core_dev *core_dev = devlink_priv(devlink);
        int err;
 
-       err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
-                                          ARRAY_SIZE(mlx5_trap_groups_arr));
+       err = devl_trap_groups_register(devlink, mlx5_trap_groups_arr,
+                                       ARRAY_SIZE(mlx5_trap_groups_arr));
        if (err)
                return err;
 
-       err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
-                                    &core_dev->priv);
+       err = devl_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
+                                 &core_dev->priv);
        if (err)
                goto err_trap_group;
        return 0;
 
 err_trap_group:
-       devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
-                                      ARRAY_SIZE(mlx5_trap_groups_arr));
+       devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+                                   ARRAY_SIZE(mlx5_trap_groups_arr));
        return err;
 }
 
 static void mlx5_devlink_traps_unregister(struct devlink *devlink)
 {
-       devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
-       devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
-                                      ARRAY_SIZE(mlx5_trap_groups_arr));
+       devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
+       devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+                                   ARRAY_SIZE(mlx5_trap_groups_arr));
 }
 
 int mlx5_devlink_register(struct devlink *devlink)
index 04c0a5e..1839f1a 100644 (file)
@@ -194,4 +194,14 @@ static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
 {
        mlx5e_ktls_cleanup_rx(priv);
 }
+
+static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
+{
+       return mlx5e_ktls_init_tx(priv);
+}
+
+static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
+{
+       mlx5e_ktls_cleanup_tx(priv);
+}
 #endif /* __MLX5E_EN_ACCEL_H__ */
index d016624..948400d 100644 (file)
@@ -42,6 +42,8 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
 }
 
 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv);
 int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
 void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
 int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
@@ -62,6 +64,8 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
 struct mlx5e_tls_sw_stats {
        atomic64_t tx_tls_ctx;
        atomic64_t tx_tls_del;
+       atomic64_t tx_tls_pool_alloc;
+       atomic64_t tx_tls_pool_free;
        atomic64_t rx_tls_ctx;
        atomic64_t rx_tls_del;
 };
@@ -69,6 +73,7 @@ struct mlx5e_tls_sw_stats {
 struct mlx5e_tls {
        struct mlx5e_tls_sw_stats sw_stats;
        struct workqueue_struct *rx_wq;
+       struct mlx5e_tls_tx_pool *tx_pool;
 };
 
 int mlx5e_ktls_init(struct mlx5e_priv *priv);
@@ -83,6 +88,15 @@ static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
 {
 }
 
+static inline int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+       return 0;
+}
+
+static inline void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+}
+
 static inline int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
 {
        return 0;
index 2ab46c4..7c1c0eb 100644 (file)
@@ -41,6 +41,8 @@
 static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_alloc) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_free) },
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
 };
index fba21ed..6b6c704 100644 (file)
@@ -35,30 +35,70 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
        stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
        stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
        stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
+       stop_room += 1; /* fence nop */
 
        return stop_room;
 }
 
+static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
+{
+       MLX5_SET(tisc, tisc, tls_en, 1);
+       MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
+       MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
+}
+
 static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
 {
        u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
-       void *tisc;
 
-       tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+       mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
 
-       MLX5_SET(tisc, tisc, tls_en, 1);
+       return mlx5_core_create_tis(mdev, in, tisn);
+}
+
+static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
+                                   struct mlx5_async_ctx *async_ctx,
+                                   u32 *out, int outlen,
+                                   mlx5_async_cbk_t callback,
+                                   struct mlx5_async_work *context)
+{
+       u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+
+       mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
+       MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+       return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+                               out, outlen, callback, context);
+}
+
+static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
+                                    struct mlx5_async_ctx *async_ctx,
+                                    u32 *out, int outlen,
+                                    mlx5_async_cbk_t callback,
+                                    struct mlx5_async_work *context)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+       MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+       MLX5_SET(destroy_tis_in, in, tisn, tisn);
 
-       return mlx5e_create_tis(mdev, in, tisn);
+       return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+                               out, outlen, callback, context);
 }
 
 struct mlx5e_ktls_offload_context_tx {
-       struct tls_offload_context_tx *tx_ctx;
-       struct tls12_crypto_info_aes_gcm_128 crypto_info;
-       struct mlx5e_tls_sw_stats *sw_stats;
+       /* fast path */
        u32 expected_seq;
        u32 tisn;
-       u32 key_id;
        bool ctx_post_pending;
+       /* control / resync */
+       struct list_head list_node; /* member of the pool */
+       struct tls12_crypto_info_aes_gcm_128 crypto_info;
+       struct tls_offload_context_tx *tx_ctx;
+       struct mlx5_core_dev *mdev;
+       struct mlx5e_tls_sw_stats *sw_stats;
+       u32 key_id;
+       u8 create_err : 1;
 };
 
 static void
@@ -82,28 +122,368 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
        return *ctx;
 }
 
+/* struct for callback API management */
+struct mlx5e_async_ctx {
+       struct mlx5_async_work context;
+       struct mlx5_async_ctx async_ctx;
+       struct work_struct work;
+       struct mlx5e_ktls_offload_context_tx *priv_tx;
+       struct completion complete;
+       int err;
+       union {
+               u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
+               u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
+       };
+};
+
+static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
+{
+       struct mlx5e_async_ctx *bulk_async;
+       int i;
+
+       bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
+       if (!bulk_async)
+               return NULL;
+
+       for (i = 0; i < n; i++) {
+               struct mlx5e_async_ctx *async = &bulk_async[i];
+
+               mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
+               init_completion(&async->complete);
+       }
+
+       return bulk_async;
+}
+
+static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
+{
+       int i;
+
+       for (i = 0; i < n; i++) {
+               struct mlx5e_async_ctx *async = &bulk_async[i];
+
+               mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
+       }
+       kvfree(bulk_async);
+}
+
+static void create_tis_callback(int status, struct mlx5_async_work *context)
+{
+       struct mlx5e_async_ctx *async =
+               container_of(context, struct mlx5e_async_ctx, context);
+       struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+       if (status) {
+               async->err = status;
+               priv_tx->create_err = 1;
+               goto out;
+       }
+
+       priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
+out:
+       complete(&async->complete);
+}
+
+static void destroy_tis_callback(int status, struct mlx5_async_work *context)
+{
+       struct mlx5e_async_ctx *async =
+               container_of(context, struct mlx5e_async_ctx, context);
+       struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+       complete(&async->complete);
+       kfree(priv_tx);
+}
+
+static struct mlx5e_ktls_offload_context_tx *
+mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
+                      struct mlx5e_async_ctx *async)
+{
+       struct mlx5e_ktls_offload_context_tx *priv_tx;
+       int err;
+
+       priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
+       if (!priv_tx)
+               return ERR_PTR(-ENOMEM);
+
+       priv_tx->mdev = mdev;
+       priv_tx->sw_stats = sw_stats;
+
+       if (!async) {
+               err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
+               if (err)
+                       goto err_out;
+       } else {
+               async->priv_tx = priv_tx;
+               err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
+                                              async->out_create, sizeof(async->out_create),
+                                              create_tis_callback, &async->context);
+               if (err)
+                       goto err_out;
+       }
+
+       return priv_tx;
+
+err_out:
+       kfree(priv_tx);
+       return ERR_PTR(err);
+}
+
+static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
+                                     struct mlx5e_async_ctx *async)
+{
+       if (priv_tx->create_err) {
+               complete(&async->complete);
+               kfree(priv_tx);
+               return;
+       }
+       async->priv_tx = priv_tx;
+       mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
+                                 &async->async_ctx,
+                                 async->out_destroy, sizeof(async->out_destroy),
+                                 destroy_tis_callback, &async->context);
+}
+
+static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
+                                          struct list_head *list, int size)
+{
+       struct mlx5e_ktls_offload_context_tx *obj;
+       struct mlx5e_async_ctx *bulk_async;
+       int i;
+
+       bulk_async = mlx5e_bulk_async_init(mdev, size);
+       if (!bulk_async)
+               return;
+
+       i = 0;
+       list_for_each_entry(obj, list, list_node) {
+               mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
+               i++;
+       }
+
+       for (i = 0; i < size; i++) {
+               struct mlx5e_async_ctx *async = &bulk_async[i];
+
+               wait_for_completion(&async->complete);
+       }
+       mlx5e_bulk_async_cleanup(bulk_async, size);
+}
+
+/* Recycling pool API */
+
+#define MLX5E_TLS_TX_POOL_BULK (16)
+#define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
+#define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
+
+struct mlx5e_tls_tx_pool {
+       struct mlx5_core_dev *mdev;
+       struct mlx5e_tls_sw_stats *sw_stats;
+       struct mutex lock; /* Protects access to the pool */
+       struct list_head list;
+       size_t size;
+
+       struct workqueue_struct *wq;
+       struct work_struct create_work;
+       struct work_struct destroy_work;
+};
+
+static void create_work(struct work_struct *work)
+{
+       struct mlx5e_tls_tx_pool *pool =
+               container_of(work, struct mlx5e_tls_tx_pool, create_work);
+       struct mlx5e_ktls_offload_context_tx *obj;
+       struct mlx5e_async_ctx *bulk_async;
+       LIST_HEAD(local_list);
+       int i, j, err = 0;
+
+       bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
+       if (!bulk_async)
+               return;
+
+       for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
+               obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       break;
+               }
+               list_add(&obj->list_node, &local_list);
+       }
+
+       for (j = 0; j < i; j++) {
+               struct mlx5e_async_ctx *async = &bulk_async[j];
+
+               wait_for_completion(&async->complete);
+               if (!err && async->err)
+                       err = async->err;
+       }
+       atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
+       mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
+       if (err)
+               goto err_out;
+
+       mutex_lock(&pool->lock);
+       if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
+               mutex_unlock(&pool->lock);
+               goto err_out;
+       }
+       list_splice(&local_list, &pool->list);
+       pool->size += MLX5E_TLS_TX_POOL_BULK;
+       if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
+               queue_work(pool->wq, work);
+       mutex_unlock(&pool->lock);
+       return;
+
+err_out:
+       mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
+       atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static void destroy_work(struct work_struct *work)
+{
+       struct mlx5e_tls_tx_pool *pool =
+               container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
+       struct mlx5e_ktls_offload_context_tx *obj;
+       LIST_HEAD(local_list);
+       int i = 0;
+
+       mutex_lock(&pool->lock);
+       if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
+               mutex_unlock(&pool->lock);
+               return;
+       }
+
+       list_for_each_entry(obj, &pool->list, list_node)
+               if (++i == MLX5E_TLS_TX_POOL_BULK)
+                       break;
+
+       list_cut_position(&local_list, &pool->list, &obj->list_node);
+       pool->size -= MLX5E_TLS_TX_POOL_BULK;
+       if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
+               queue_work(pool->wq, work);
+       mutex_unlock(&pool->lock);
+
+       mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+       atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
+                                                       struct mlx5e_tls_sw_stats *sw_stats)
+{
+       struct mlx5e_tls_tx_pool *pool;
+
+       BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
+
+       pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool)
+               return NULL;
+
+       pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
+       if (!pool->wq)
+               goto err_free;
+
+       INIT_LIST_HEAD(&pool->list);
+       mutex_init(&pool->lock);
+
+       INIT_WORK(&pool->create_work, create_work);
+       INIT_WORK(&pool->destroy_work, destroy_work);
+
+       pool->mdev = mdev;
+       pool->sw_stats = sw_stats;
+
+       return pool;
+
+err_free:
+       kvfree(pool);
+       return NULL;
+}
+
+static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+       while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
+               struct mlx5e_ktls_offload_context_tx *obj;
+               LIST_HEAD(local_list);
+               int i = 0;
+
+               list_for_each_entry(obj, &pool->list, list_node)
+                       if (++i == MLX5E_TLS_TX_POOL_BULK)
+                               break;
+
+               list_cut_position(&local_list, &pool->list, &obj->list_node);
+               mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+               atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+               pool->size -= MLX5E_TLS_TX_POOL_BULK;
+       }
+       if (pool->size) {
+               mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
+               atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
+       }
+}
+
+static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+       mlx5e_tls_tx_pool_list_cleanup(pool);
+       destroy_workqueue(pool->wq);
+       kvfree(pool);
+}
+
+static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
+{
+       mutex_lock(&pool->lock);
+       list_add(&obj->list_node, &pool->list);
+       if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
+               queue_work(pool->wq, &pool->destroy_work);
+       mutex_unlock(&pool->lock);
+}
+
+static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
+{
+       struct mlx5e_ktls_offload_context_tx *obj;
+
+       mutex_lock(&pool->lock);
+       if (unlikely(pool->size == 0)) {
+               /* pool is empty:
+                * - trigger the populating work, and
+                * - serve the current context via the regular blocking api.
+                */
+               queue_work(pool->wq, &pool->create_work);
+               mutex_unlock(&pool->lock);
+               obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
+               if (!IS_ERR(obj))
+                       atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
+               return obj;
+       }
+
+       obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
+                              list_node);
+       list_del(&obj->list_node);
+       if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
+               queue_work(pool->wq, &pool->create_work);
+       mutex_unlock(&pool->lock);
+       return obj;
+}
+
+/* End of pool API */
+
 int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
                      struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
 {
        struct mlx5e_ktls_offload_context_tx *priv_tx;
+       struct mlx5e_tls_tx_pool *pool;
        struct tls_context *tls_ctx;
-       struct mlx5_core_dev *mdev;
        struct mlx5e_priv *priv;
        int err;
 
        tls_ctx = tls_get_ctx(sk);
        priv = netdev_priv(netdev);
-       mdev = priv->mdev;
+       pool = priv->tls->tx_pool;
 
-       priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
-       if (!priv_tx)
-               return -ENOMEM;
+       priv_tx = pool_pop(pool);
+       if (IS_ERR(priv_tx))
+               return PTR_ERR(priv_tx);
 
-       err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id);
+       err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
        if (err)
                goto err_create_key;
 
-       priv_tx->sw_stats = &priv->tls->sw_stats;
        priv_tx->expected_seq = start_offload_tcp_sn;
        priv_tx->crypto_info  =
                *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -111,36 +491,29 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
 
        mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
 
-       err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
-       if (err)
-               goto err_create_tis;
-
        priv_tx->ctx_post_pending = true;
        atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
 
        return 0;
 
-err_create_tis:
-       mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
 err_create_key:
-       kfree(priv_tx);
+       pool_push(pool, priv_tx);
        return err;
 }
 
 void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
 {
        struct mlx5e_ktls_offload_context_tx *priv_tx;
-       struct mlx5_core_dev *mdev;
+       struct mlx5e_tls_tx_pool *pool;
        struct mlx5e_priv *priv;
 
        priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
        priv = netdev_priv(netdev);
-       mdev = priv->mdev;
+       pool = priv->tls->tx_pool;
 
        atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
-       mlx5e_destroy_tis(mdev, priv_tx->tisn);
-       mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
-       kfree(priv_tx);
+       mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
+       pool_push(pool, priv_tx);
 }
 
 static void tx_fill_wi(struct mlx5e_txqsq *sq,
@@ -201,6 +574,16 @@ post_progress_params(struct mlx5e_txqsq *sq,
        sq->pc += num_wqebbs;
 }
 
+static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
+{
+       struct mlx5_wq_cyc *wq = &sq->wq;
+       u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+       tx_fill_wi(sq, pi, 1, 0, NULL);
+
+       mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
+}
+
 static void
 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
                              struct mlx5e_ktls_offload_context_tx *priv_tx,
@@ -212,6 +595,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
                post_static_params(sq, priv_tx, fence_first_post);
 
        post_progress_params(sq, priv_tx, progress_fence);
+       tx_post_fence_nop(sq);
 }
 
 struct tx_sync_info {
@@ -304,7 +688,7 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
 }
 
 static int
-tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
+tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
 {
        struct mlx5_wqe_ctrl_seg *cseg;
        struct mlx5_wqe_data_seg *dseg;
@@ -326,7 +710,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
        cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
        cseg->tis_tir_num      = cpu_to_be32(tisn << 8);
-       cseg->fm_ce_se         = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
 
        fsz = skb_frag_size(frag);
        dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
@@ -361,67 +744,39 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
        stats->tls_dump_bytes += wi->num_bytes;
 }
 
-static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
-{
-       struct mlx5_wq_cyc *wq = &sq->wq;
-       u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-
-       tx_fill_wi(sq, pi, 1, 0, NULL);
-
-       mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
-}
-
 static enum mlx5e_ktls_sync_retval
 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
                         struct mlx5e_txqsq *sq,
                         int datalen,
                         u32 seq)
 {
-       struct mlx5e_sq_stats *stats = sq->stats;
        enum mlx5e_ktls_sync_retval ret;
        struct tx_sync_info info = {};
-       int i = 0;
+       int i;
 
        ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
-       if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
-               if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
-                       stats->tls_skip_no_sync_data++;
-                       return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
-               }
-               /* We might get here if a retransmission reaches the driver
-                * after the relevant record is acked.
+       if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
+               /* We might get here with ret == FAIL if a retransmission
+                * reaches the driver after the relevant record is acked.
                 * It should be safe to drop the packet in this case
                 */
-               stats->tls_drop_no_sync_data++;
-               goto err_out;
-       }
-
-       stats->tls_ooo++;
+               return ret;
 
        tx_post_resync_params(sq, priv_tx, info.rcd_sn);
 
-       /* If no dump WQE was sent, we need to have a fence NOP WQE before the
-        * actual data xmit.
-        */
-       if (!info.nr_frags) {
-               tx_post_fence_nop(sq);
-               return MLX5E_KTLS_SYNC_DONE;
-       }
-
-       for (; i < info.nr_frags; i++) {
+       for (i = 0; i < info.nr_frags; i++) {
                unsigned int orig_fsz, frag_offset = 0, n = 0;
                skb_frag_t *f = &info.frags[i];
 
                orig_fsz = skb_frag_size(f);
 
                do {
-                       bool fence = !(i || frag_offset);
                        unsigned int fsz;
 
                        n++;
                        fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
                        skb_frag_size_set(f, fsz);
-                       if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+                       if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
                                page_ref_add(skb_frag_page(f), n - 1);
                                goto err_out;
                        }
@@ -469,24 +824,27 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
 
        priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
 
-       if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
+       if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
                mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
-       }
 
        seq = ntohl(tcp_hdr(skb)->seq);
        if (unlikely(priv_tx->expected_seq != seq)) {
                enum mlx5e_ktls_sync_retval ret =
                        mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
 
+               stats->tls_ooo++;
+
                switch (ret) {
                case MLX5E_KTLS_SYNC_DONE:
                        break;
                case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+                       stats->tls_skip_no_sync_data++;
                        if (likely(!skb->decrypted))
                                goto out;
                        WARN_ON_ONCE(1);
-                       fallthrough;
+                       goto err_out;
                case MLX5E_KTLS_SYNC_FAIL:
+                       stats->tls_drop_no_sync_data++;
                        goto err_out;
                }
        }
@@ -505,3 +863,24 @@ err_out:
        dev_kfree_skb_any(skb);
        return false;
 }
+
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+       if (!mlx5e_is_ktls_tx(priv->mdev))
+               return 0;
+
+       priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
+       if (!priv->tls->tx_pool)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+       if (!mlx5e_is_ktls_tx(priv->mdev))
+               return;
+
+       mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
+       priv->tls->tx_pool = NULL;
+}
index 180b2f4..24ddd43 100644 (file)
@@ -3144,6 +3144,7 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
                mlx5e_mqprio_rl_free(priv->mqprio_rl);
                priv->mqprio_rl = NULL;
        }
+       mlx5e_accel_cleanup_tx(priv);
        mlx5e_destroy_tises(priv);
 }
 
@@ -5147,9 +5148,17 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
                return err;
        }
 
+       err = mlx5e_accel_init_tx(priv);
+       if (err)
+               goto err_destroy_tises;
+
        mlx5e_set_mqprio_rl(priv);
        mlx5e_dcbnl_initialize(priv);
        return 0;
+
+err_destroy_tises:
+       mlx5e_destroy_tises(priv);
+       return err;
 }
 
 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
index 30a6c9f..6aa5804 100644 (file)
@@ -1300,20 +1300,19 @@ abort:
  */
 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
 {
-       struct devlink *devlink;
        bool toggle_lag;
        int ret;
 
        if (!mlx5_esw_allowed(esw))
                return 0;
 
+       devl_assert_locked(priv_to_devlink(esw->dev));
+
        toggle_lag = !mlx5_esw_is_fdb_created(esw);
 
        if (toggle_lag)
                mlx5_lag_disable_change(esw->dev);
 
-       devlink = priv_to_devlink(esw->dev);
-       devl_lock(devlink);
        down_write(&esw->mode_lock);
        if (!mlx5_esw_is_fdb_created(esw)) {
                ret = mlx5_eswitch_enable_locked(esw, num_vfs);
@@ -1327,7 +1326,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
                        esw->esw_funcs.num_vfs = num_vfs;
        }
        up_write(&esw->mode_lock);
-       devl_unlock(devlink);
 
        if (toggle_lag)
                mlx5_lag_enable_change(esw->dev);
@@ -1338,13 +1336,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
 /* When disabling sriov, free driver level resources. */
 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
 {
-       struct devlink *devlink;
-
        if (!mlx5_esw_allowed(esw))
                return;
 
-       devlink = priv_to_devlink(esw->dev);
-       devl_lock(devlink);
+       devl_assert_locked(priv_to_devlink(esw->dev));
        down_write(&esw->mode_lock);
        /* If driver is unloaded, this function is called twice by remove_one()
         * and mlx5_unload(). Prevent the second call.
@@ -1373,7 +1368,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
 
 unlock:
        up_write(&esw->mode_lock);
-       devl_unlock(devlink);
 }
 
 /* Free resources for corresponding eswitch mode. It is called by devlink
@@ -1407,18 +1401,14 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
 
 void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
 {
-       struct devlink *devlink;
-
        if (!mlx5_esw_allowed(esw))
                return;
 
+       devl_assert_locked(priv_to_devlink(esw->dev));
        mlx5_lag_disable_change(esw->dev);
-       devlink = priv_to_devlink(esw->dev);
-       devl_lock(devlink);
        down_write(&esw->mode_lock);
        mlx5_eswitch_disable_locked(esw);
        up_write(&esw->mode_lock);
-       devl_unlock(devlink);
        mlx5_lag_enable_change(esw->dev);
 }
 
index 052af49..e8896f3 100644 (file)
@@ -149,6 +149,9 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
        if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
                complete(&fw_reset->done);
        } else {
+               mlx5_unload_one(dev);
+               if (mlx5_health_wait_pci_up(dev))
+                       mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
                mlx5_load_one(dev, false);
                devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
                                                        BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
@@ -183,15 +186,9 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
        struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
                                                      reset_reload_work);
        struct mlx5_core_dev *dev = fw_reset->dev;
-       int err;
 
        mlx5_sync_reset_clear_reset_requested(dev, false);
        mlx5_enter_error_state(dev, true);
-       mlx5_unload_one(dev);
-       err = mlx5_health_wait_pci_up(dev);
-       if (err)
-               mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
-       fw_reset->ret = err;
        mlx5_fw_reset_complete_reload(dev);
 }
 
@@ -395,7 +392,6 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
        }
 
        mlx5_enter_error_state(dev, true);
-       mlx5_unload_one(dev);
 done:
        fw_reset->ret = err;
        mlx5_fw_reset_complete_reload(dev);
index 659021c..2cf2c99 100644 (file)
@@ -666,16 +666,20 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
        struct mlx5_fw_reporter_ctx fw_reporter_ctx;
        struct mlx5_core_health *health;
        struct mlx5_core_dev *dev;
+       struct devlink *devlink;
        struct mlx5_priv *priv;
 
        health = container_of(work, struct mlx5_core_health, fatal_report_work);
        priv = container_of(health, struct mlx5_priv, health);
        dev = container_of(priv, struct mlx5_core_dev, priv);
+       devlink = priv_to_devlink(dev);
 
        enter_error_state(dev, false);
        if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
+               devl_lock(devlink);
                if (mlx5_health_try_recover(dev))
                        mlx5_core_err(dev, "health recovery failed\n");
+               devl_unlock(devlink);
                return;
        }
        fw_reporter_ctx.err_synd = health->synd;
index 8b621c1..1de9b39 100644 (file)
@@ -1304,8 +1304,10 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
 
 int mlx5_init_one(struct mlx5_core_dev *dev)
 {
+       struct devlink *devlink = priv_to_devlink(dev);
        int err = 0;
 
+       devl_lock(devlink);
        mutex_lock(&dev->intf_state_mutex);
        dev->state = MLX5_DEVICE_STATE_UP;
 
@@ -1334,6 +1336,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
                goto err_register;
 
        mutex_unlock(&dev->intf_state_mutex);
+       devl_unlock(devlink);
        return 0;
 
 err_register:
@@ -1348,11 +1351,15 @@ function_teardown:
 err_function:
        dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
        mutex_unlock(&dev->intf_state_mutex);
+       devl_unlock(devlink);
        return err;
 }
 
 void mlx5_uninit_one(struct mlx5_core_dev *dev)
 {
+       struct devlink *devlink = priv_to_devlink(dev);
+
+       devl_lock(devlink);
        mutex_lock(&dev->intf_state_mutex);
 
        mlx5_unregister_device(dev);
@@ -1371,13 +1378,15 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
        mlx5_function_teardown(dev, true);
 out:
        mutex_unlock(&dev->intf_state_mutex);
+       devl_unlock(devlink);
 }
 
-int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
 {
        int err = 0;
        u64 timeout;
 
+       devl_assert_locked(priv_to_devlink(dev));
        mutex_lock(&dev->intf_state_mutex);
        if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
                mlx5_core_warn(dev, "interface is up, NOP\n");
@@ -1419,8 +1428,20 @@ out:
        return err;
 }
 
-void mlx5_unload_one(struct mlx5_core_dev *dev)
+int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+{
+       struct devlink *devlink = priv_to_devlink(dev);
+       int ret;
+
+       devl_lock(devlink);
+       ret = mlx5_load_one_devl_locked(dev, recovery);
+       devl_unlock(devlink);
+       return ret;
+}
+
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
 {
+       devl_assert_locked(priv_to_devlink(dev));
        mutex_lock(&dev->intf_state_mutex);
 
        mlx5_detach_device(dev);
@@ -1438,6 +1459,15 @@ out:
        mutex_unlock(&dev->intf_state_mutex);
 }
 
+void mlx5_unload_one(struct mlx5_core_dev *dev)
+{
+       struct devlink *devlink = priv_to_devlink(dev);
+
+       devl_lock(devlink);
+       mlx5_unload_one_devl_locked(dev);
+       devl_unlock(devlink);
+}
+
 static const int types[] = {
        MLX5_CAP_GENERAL,
        MLX5_CAP_GENERAL_2,
@@ -1902,7 +1932,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
 void mlx5_disable_device(struct mlx5_core_dev *dev)
 {
        mlx5_error_sw_reset(dev);
-       mlx5_unload_one(dev);
+       mlx5_unload_one_devl_locked(dev);
 }
 
 int mlx5_recover_device(struct mlx5_core_dev *dev)
@@ -1913,7 +1943,7 @@ int mlx5_recover_device(struct mlx5_core_dev *dev)
                        return -EIO;
        }
 
-       return mlx5_load_one(dev, true);
+       return mlx5_load_one_devl_locked(dev, true);
 }
 
 static struct pci_driver mlx5_core_driver = {
index 9cc7afe..ad61b86 100644 (file)
@@ -290,7 +290,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
 int mlx5_init_one(struct mlx5_core_dev *dev);
 void mlx5_uninit_one(struct mlx5_core_dev *dev);
 void mlx5_unload_one(struct mlx5_core_dev *dev);
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
 int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
 
 int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
 
index 5757cd6..ee2e1b7 100644 (file)
@@ -154,13 +154,16 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
 static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
 {
        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+       struct devlink *devlink = priv_to_devlink(dev);
        int err;
 
+       devl_lock(devlink);
        err = mlx5_device_enable_sriov(dev, num_vfs);
        if (err) {
                mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
                return err;
        }
+       devl_unlock(devlink);
 
        err = pci_enable_sriov(pdev, num_vfs);
        if (err) {
@@ -173,10 +176,13 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
 void mlx5_sriov_disable(struct pci_dev *pdev)
 {
        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+       struct devlink *devlink = priv_to_devlink(dev);
        int num_vfs = pci_num_vf(dev->pdev);
 
        pci_disable_sriov(pdev);
+       devl_lock(devlink);
        mlx5_device_disable_sriov(dev, num_vfs, true);
+       devl_unlock(devlink);
 }
 
 int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
index a48f893..7c93bd0 100644 (file)
@@ -3335,6 +3335,24 @@ u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
 }
 EXPORT_SYMBOL(mlxsw_core_read_frc_l);
 
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core)
+{
+       return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_sec);
+
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core)
+{
+       return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_nsec);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
+{
+       return mlxsw_core->driver->sdq_supports_cqe_v2;
+}
+EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
+
 void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
 {
        mlxsw_core->emad.enable_string_tlv = true;
index 7213e45..02d9cc2 100644 (file)
@@ -427,6 +427,7 @@ struct mlxsw_driver {
 
        u8 txhdr_len;
        const struct mlxsw_config_profile *profile;
+       bool sdq_supports_cqe_v2;
 };
 
 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -437,6 +438,11 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
 u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
 u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
 
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core);
+
 void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
 
 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
@@ -476,6 +482,8 @@ struct mlxsw_bus {
                        u8 *p_status);
        u32 (*read_frc_h)(void *bus_priv);
        u32 (*read_frc_l)(void *bus_priv);
+       u32 (*read_utc_sec)(void *bus_priv);
+       u32 (*read_utc_nsec)(void *bus_priv);
        u8 features;
 };
 
@@ -550,11 +558,17 @@ enum mlxsw_devlink_param_id {
        MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
 };
 
+struct mlxsw_cqe_ts {
+       u8 sec;
+       u32 nsec;
+};
+
 struct mlxsw_skb_cb {
        union {
                struct mlxsw_tx_info tx_info;
                struct mlxsw_rx_md_info rx_md_info;
        };
+       struct mlxsw_cqe_ts cqe_ts;
 };
 
 static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
index 49fee03..af37e65 100644 (file)
@@ -5,7 +5,6 @@
 #include <linux/module.h>
 #include <linux/err.h>
 #include <linux/types.h>
-#include <linux/err.h>
 #include <linux/auxiliary_bus.h>
 #include <linux/idr.h>
 #include <linux/gfp.h>
index 83659fb..50527ad 100644 (file)
@@ -103,6 +103,8 @@ struct mlxsw_pci {
        struct pci_dev *pdev;
        u8 __iomem *hw_addr;
        u64 free_running_clock_offset;
+       u64 utc_sec_offset;
+       u64 utc_nsec_offset;
        struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
        u32 doorbell_offset;
        struct mlxsw_core *core;
@@ -456,9 +458,9 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
 {
        q->u.cq.v = mlxsw_pci->max_cqe_ver;
 
-       /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
        if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
-           q->num < mlxsw_pci->num_sdq_cqs)
+           q->num < mlxsw_pci->num_sdq_cqs &&
+           !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
                q->u.cq.v = MLXSW_PCI_CQE_V1;
 }
 
@@ -511,9 +513,26 @@ static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
        return ioread32be(mlxsw_pci->hw_addr + off);
 }
 
+static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
+                                   struct sk_buff *skb,
+                                   enum mlxsw_pci_cqe_v cqe_v, char *cqe)
+{
+       if (cqe_v != MLXSW_PCI_CQE_V2)
+               return;
+
+       if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) !=
+           MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC)
+               return;
+
+       mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
+       mlxsw_skb_cb(skb)->cqe_ts.nsec =
+               mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
+}
+
 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
                                     struct mlxsw_pci_queue *q,
                                     u16 consumer_counter_limit,
+                                    enum mlxsw_pci_cqe_v cqe_v,
                                     char *cqe)
 {
        struct pci_dev *pdev = mlxsw_pci->pdev;
@@ -533,6 +552,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
 
        if (unlikely(!tx_info.is_emad &&
                     skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+               mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
                mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
                                           tx_info.local_port);
                skb = NULL;
@@ -653,6 +673,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
                mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
        }
 
+       mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+
        byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
        if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
                byte_count -= ETH_FCS_LEN;
@@ -704,7 +726,7 @@ static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
 
                        sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
                        mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
-                                                wqe_counter, ncqe);
+                                                wqe_counter, q->u.cq.v, ncqe);
                        q->u.cq.comp_sdq_count++;
                } else {
                        struct mlxsw_pci_queue *rdq;
@@ -1537,6 +1559,24 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
        mlxsw_pci->free_running_clock_offset =
                mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
 
+       if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
+               dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
+               err = -EINVAL;
+               goto err_utc_sec_bar;
+       }
+
+       mlxsw_pci->utc_sec_offset =
+               mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
+
+       if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
+               dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
+               err = -EINVAL;
+               goto err_utc_nsec_bar;
+       }
+
+       mlxsw_pci->utc_nsec_offset =
+               mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
+
        num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
        err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
        if (err)
@@ -1601,6 +1641,8 @@ err_query_resources:
 err_boardinfo:
        mlxsw_pci_fw_area_fini(mlxsw_pci);
 err_fw_area_init:
+err_utc_nsec_bar:
+err_utc_sec_bar:
 err_fr_rn_clk_bar:
 err_doorbell_page_bar:
 err_iface_rev:
@@ -1830,6 +1872,20 @@ static u32 mlxsw_pci_read_frc_l(void *bus_priv)
        return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
 }
 
+static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+       return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
+}
+
+static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+       return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
+}
+
 static const struct mlxsw_bus mlxsw_pci_bus = {
        .kind                   = "pci",
        .init                   = mlxsw_pci_init,
@@ -1839,6 +1895,8 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
        .cmd_exec               = mlxsw_pci_cmd_exec,
        .read_frc_h             = mlxsw_pci_read_frc_h,
        .read_frc_l             = mlxsw_pci_read_frc_l,
+       .read_utc_sec           = mlxsw_pci_read_utc_sec,
+       .read_utc_nsec          = mlxsw_pci_read_utc_nsec,
        .features               = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
 };
 
index 6410780..1e240cd 100644 (file)
@@ -29,6 +29,7 @@
 #include <net/pkt_cls.h>
 #include <net/netevent.h>
 #include <net/addrconf.h>
+#include <linux/ptp_classify.h>
 
 #include "spectrum.h"
 #include "pci.h"
@@ -230,8 +231,8 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
                               counter_index);
 }
 
-static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
-                                    const struct mlxsw_tx_info *tx_info)
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+                             const struct mlxsw_tx_info *tx_info)
 {
        char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
 
@@ -246,6 +247,82 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
        mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
 }
 
+int
+mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+                                 struct mlxsw_sp_port *mlxsw_sp_port,
+                                 struct sk_buff *skb,
+                                 const struct mlxsw_tx_info *tx_info)
+{
+       char *txhdr;
+       u16 max_fid;
+       int err;
+
+       if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+               err = -ENOMEM;
+               goto err_skb_cow_head;
+       }
+
+       if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
+               err = -EIO;
+               goto err_res_valid;
+       }
+       max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+       txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+       memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+       mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+       mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+       mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+       mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+       mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
+       mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+       return 0;
+
+err_res_valid:
+err_skb_cow_head:
+       this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+       dev_kfree_skb_any(skb);
+       return err;
+}
+
+static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
+{
+       unsigned int type;
+
+       if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+               return false;
+
+       type = ptp_classify_raw(skb);
+       return !!ptp_parse_header(skb, type);
+}
+
+static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
+                                struct mlxsw_sp_port *mlxsw_sp_port,
+                                struct sk_buff *skb,
+                                const struct mlxsw_tx_info *tx_info)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+       /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
+        * need special handling and cannot be transmitted as regular control
+        * packets.
+        */
+       if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
+               return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
+                                                         mlxsw_sp_port, skb,
+                                                         tx_info);
+
+       if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+               this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+               dev_kfree_skb_any(skb);
+               return -ENOMEM;
+       }
+
+       mlxsw_sp_txhdr_construct(skb, tx_info);
+       return 0;
+}
+
 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
 {
        switch (state) {
@@ -648,12 +725,6 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
        u64 len;
        int err;
 
-       if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
-               this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
-       }
-
        memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
 
        if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
@@ -664,7 +735,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
-       mlxsw_sp_txhdr_construct(skb, &tx_info);
+       err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
+                                   &tx_info);
+       if (err)
+               return NETDEV_TX_OK;
+
        /* TX header is consumed by HW on the way so we shouldn't count its
         * bytes as being sent.
         */
@@ -2666,6 +2741,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
        .get_stats_count = mlxsw_sp1_get_stats_count,
        .get_stats_strings = mlxsw_sp1_get_stats_strings,
        .get_stats      = mlxsw_sp1_get_stats,
+       .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
 };
 
 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2682,6 +2758,24 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
        .get_stats_count = mlxsw_sp2_get_stats_count,
        .get_stats_strings = mlxsw_sp2_get_stats_strings,
        .get_stats      = mlxsw_sp2_get_stats,
+       .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+};
+
+static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
+       .clock_init     = mlxsw_sp2_ptp_clock_init,
+       .clock_fini     = mlxsw_sp2_ptp_clock_fini,
+       .init           = mlxsw_sp2_ptp_init,
+       .fini           = mlxsw_sp2_ptp_fini,
+       .receive        = mlxsw_sp2_ptp_receive,
+       .transmitted    = mlxsw_sp2_ptp_transmitted,
+       .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
+       .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
+       .shaper_work    = mlxsw_sp2_ptp_shaper_work,
+       .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
+       .get_stats_count = mlxsw_sp2_get_stats_count,
+       .get_stats_strings = mlxsw_sp2_get_stats_strings,
+       .get_stats      = mlxsw_sp2_get_stats,
+       .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
 };
 
 struct mlxsw_sp_sample_trigger_node {
@@ -3327,7 +3421,7 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
        mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
        mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
        mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
-       mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+       mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
        mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
        mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
        mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
@@ -3831,6 +3925,7 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
        .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
        .txhdr_len                      = MLXSW_TXHDR_LEN,
        .profile                        = &mlxsw_sp1_config_profile,
+       .sdq_supports_cqe_v2            = false,
 };
 
 static struct mlxsw_driver mlxsw_sp2_driver = {
@@ -3869,6 +3964,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
        .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
        .txhdr_len                      = MLXSW_TXHDR_LEN,
        .profile                        = &mlxsw_sp2_config_profile,
+       .sdq_supports_cqe_v2            = true,
 };
 
 static struct mlxsw_driver mlxsw_sp3_driver = {
@@ -3907,6 +4003,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
        .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
        .txhdr_len                      = MLXSW_TXHDR_LEN,
        .profile                        = &mlxsw_sp2_config_profile,
+       .sdq_supports_cqe_v2            = true,
 };
 
 static struct mlxsw_driver mlxsw_sp4_driver = {
@@ -3943,6 +4040,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
        .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
        .txhdr_len                      = MLXSW_TXHDR_LEN,
        .profile                        = &mlxsw_sp2_config_profile,
+       .sdq_supports_cqe_v2            = true,
 };
 
 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
index 50a9380..c8ff2a6 100644 (file)
@@ -243,6 +243,10 @@ struct mlxsw_sp_ptp_ops {
        void (*get_stats_strings)(u8 **p);
        void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
                          u64 *data, int data_index);
+       int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
+                              struct mlxsw_sp_port *mlxsw_sp_port,
+                              struct sk_buff *skb,
+                              const struct mlxsw_tx_info *tx_info);
 };
 
 static inline struct mlxsw_sp_upper *
@@ -700,6 +704,12 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
                                unsigned int *p_counter_index);
 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
                                unsigned int counter_index);
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+                             const struct mlxsw_tx_info *tx_info);
+int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+                                     struct mlxsw_sp_port *mlxsw_sp_port,
+                                     struct sk_buff *skb,
+                                     const struct mlxsw_tx_info *tx_info);
 bool mlxsw_sp_port_dev_check(const struct net_device *dev);
 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
index 5116d7e..2e0b704 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/net_tstamp.h>
+#include <linux/refcount.h>
 
 #include "spectrum.h"
 #include "spectrum_ptp.h"
@@ -39,6 +40,14 @@ struct mlxsw_sp1_ptp_state {
        u32 gc_cycle;
 };
 
+struct mlxsw_sp2_ptp_state {
+       struct mlxsw_sp_ptp_state common;
+       refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
+                                         * enabled.
+                                         */
+       struct hwtstamp_config config;
+};
+
 struct mlxsw_sp1_ptp_key {
        u16 local_port;
        u8 message_type;
@@ -85,6 +94,13 @@ mlxsw_sp1_ptp_state(struct mlxsw_sp *mlxsw_sp)
                            common);
 }
 
+static struct mlxsw_sp2_ptp_state *
+mlxsw_sp2_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+       return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp2_ptp_state,
+                           common);
+}
+
 static struct mlxsw_sp1_ptp_clock *
 mlxsw_sp1_ptp_clock(struct ptp_clock_info *ptp)
 {
@@ -328,6 +344,153 @@ void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock_common)
        kfree(clock);
 }
 
+static u64 mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock *clock,
+                                 struct ptp_system_timestamp *sts)
+{
+       struct mlxsw_core *mlxsw_core = clock->core;
+       u32 utc_sec1, utc_sec2, utc_nsec;
+
+       utc_sec1 = mlxsw_core_read_utc_sec(mlxsw_core);
+       ptp_read_system_prets(sts);
+       utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+       ptp_read_system_postts(sts);
+       utc_sec2 = mlxsw_core_read_utc_sec(mlxsw_core);
+
+       if (utc_sec1 != utc_sec2) {
+               /* Wrap around. */
+               ptp_read_system_prets(sts);
+               utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+               ptp_read_system_postts(sts);
+       }
+
+       return (u64)utc_sec2 * NSEC_PER_SEC + utc_nsec;
+}
+
+static int
+mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+{
+       struct mlxsw_core *mlxsw_core = clock->core;
+       char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+       u32 sec, nsec_rem;
+
+       sec = div_u64_rem(nsec, NSEC_PER_SEC, &nsec_rem);
+       mlxsw_reg_mtutc_pack(mtutc_pl,
+                            MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE,
+                            0, sec, nsec_rem, 0);
+       return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+       struct mlxsw_sp_ptp_clock *clock =
+               container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+       s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+
+       /* In Spectrum-2 and newer ASICs, the frequency adjustment in MTUTC is
+        * reversed, positive values mean to decrease the frequency. Adjust the
+        * sign of PPB to this behavior.
+        */
+       return mlxsw_sp_ptp_phc_adjfreq(clock, -ppb);
+}
+
+static int mlxsw_sp2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       struct mlxsw_sp_ptp_clock *clock =
+               container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+       struct mlxsw_core *mlxsw_core = clock->core;
+       char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+
+       /* HW time adjustment range is s16. If out of range, set time instead. */
+       if (delta < S16_MIN || delta > S16_MAX) {
+               u64 nsec;
+
+               nsec = mlxsw_sp2_ptp_read_utc(clock, NULL);
+               nsec += delta;
+
+               return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+       }
+
+       mlxsw_reg_mtutc_pack(mtutc_pl,
+                            MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME,
+                            0, 0, 0, delta);
+       return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_gettimex(struct ptp_clock_info *ptp,
+                                 struct timespec64 *ts,
+                                 struct ptp_system_timestamp *sts)
+{
+       struct mlxsw_sp_ptp_clock *clock =
+               container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+       u64 nsec;
+
+       nsec = mlxsw_sp2_ptp_read_utc(clock, sts);
+       *ts = ns_to_timespec64(nsec);
+
+       return 0;
+}
+
+static int mlxsw_sp2_ptp_settime(struct ptp_clock_info *ptp,
+                                const struct timespec64 *ts)
+{
+       struct mlxsw_sp_ptp_clock *clock =
+               container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+       u64 nsec = timespec64_to_ns(ts);
+
+       return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+}
+
+static const struct ptp_clock_info mlxsw_sp2_ptp_clock_info = {
+       .owner          = THIS_MODULE,
+       .name           = "mlxsw_sp_clock",
+       .max_adj        = MLXSW_REG_MTUTC_MAX_FREQ_ADJ,
+       .adjfine        = mlxsw_sp2_ptp_adjfine,
+       .adjtime        = mlxsw_sp2_ptp_adjtime,
+       .gettimex64     = mlxsw_sp2_ptp_gettimex,
+       .settime64      = mlxsw_sp2_ptp_settime,
+};
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+       struct mlxsw_sp_ptp_clock *clock;
+       int err;
+
+       clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+       if (!clock)
+               return ERR_PTR(-ENOMEM);
+
+       clock->core = mlxsw_sp->core;
+
+       clock->ptp_info = mlxsw_sp2_ptp_clock_info;
+
+       err = mlxsw_sp2_ptp_phc_settime(clock, 0);
+       if (err) {
+               dev_err(dev, "setting UTC time failed %d\n", err);
+               goto err_ptp_phc_settime;
+       }
+
+       clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
+       if (IS_ERR(clock->ptp)) {
+               err = PTR_ERR(clock->ptp);
+               dev_err(dev, "ptp_clock_register failed %d\n", err);
+               goto err_ptp_clock_register;
+       }
+
+       return clock;
+
+err_ptp_clock_register:
+err_ptp_phc_settime:
+       kfree(clock);
+       return ERR_PTR(err);
+}
+
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+{
+       ptp_clock_unregister(clock->ptp);
+       kfree(clock);
+}
+
 static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
                              u8 *p_domain_number,
                              u8 *p_message_type,
@@ -835,10 +998,44 @@ static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
        return 0;
 }
 
+static int mlxsw_sp_ptp_traps_set(struct mlxsw_sp *mlxsw_sp)
+{
+       u16 event_message_type;
+       int err;
+
+       /* Deliver these message types as PTP0. */
+       event_message_type = BIT(PTP_MSGTYPE_SYNC) |
+                            BIT(PTP_MSGTYPE_DELAY_REQ) |
+                            BIT(PTP_MSGTYPE_PDELAY_REQ) |
+                            BIT(PTP_MSGTYPE_PDELAY_RESP);
+
+       err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
+                                     event_message_type);
+       if (err)
+               return err;
+
+       /* Everything else is PTP1. */
+       err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
+                                     ~event_message_type);
+       if (err)
+               goto err_mtptpt1_set;
+
+       return 0;
+
+err_mtptpt1_set:
+       mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+       return err;
+}
+
+static void mlxsw_sp_ptp_traps_unset(struct mlxsw_sp *mlxsw_sp)
+{
+       mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
+       mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+}
+
 struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
 {
        struct mlxsw_sp1_ptp_state *ptp_state;
-       u16 message_type;
        int err;
 
        err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
@@ -857,22 +1054,9 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
        if (err)
                goto err_hashtable_init;
 
-       /* Delive these message types as PTP0. */
-       message_type = BIT(PTP_MSGTYPE_SYNC) |
-                      BIT(PTP_MSGTYPE_DELAY_REQ) |
-                      BIT(PTP_MSGTYPE_PDELAY_REQ) |
-                      BIT(PTP_MSGTYPE_PDELAY_RESP);
-       err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
-                                     message_type);
+       err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
        if (err)
-               goto err_mtptpt_set;
-
-       /* Everything else is PTP1. */
-       message_type = ~message_type;
-       err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
-                                     message_type);
-       if (err)
-               goto err_mtptpt1_set;
+               goto err_ptp_traps_set;
 
        err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
        if (err)
@@ -884,10 +1068,8 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
        return &ptp_state->common;
 
 err_fifo_clr:
-       mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
-err_mtptpt1_set:
-       mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
-err_mtptpt_set:
+       mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+err_ptp_traps_set:
        rhltable_destroy(&ptp_state->unmatched_ht);
 err_hashtable_init:
        kfree(ptp_state);
@@ -904,8 +1086,7 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
        cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
        mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
        mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
-       mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
-       mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+       mlxsw_sp_ptp_traps_unset(mlxsw_sp);
        rhltable_free_and_destroy(&ptp_state->unmatched_ht,
                                  &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
        kfree(ptp_state);
@@ -1176,3 +1357,354 @@ void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
                *data++ = *(u64 *)(stats + offset);
        }
 }
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+       struct mlxsw_sp2_ptp_state *ptp_state;
+       int err;
+
+       ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
+       if (!ptp_state)
+               return ERR_PTR(-ENOMEM);
+
+       ptp_state->common.mlxsw_sp = mlxsw_sp;
+
+       err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
+       if (err)
+               goto err_ptp_traps_set;
+
+       refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+       return &ptp_state->common;
+
+err_ptp_traps_set:
+       kfree(ptp_state);
+       return ERR_PTR(err);
+}
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
+{
+       struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+       struct mlxsw_sp2_ptp_state *ptp_state;
+
+       ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+
+       mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+       kfree(ptp_state);
+}
+
+static u32 mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core *mlxsw_core,
+                                           u8 cqe_ts_sec)
+{
+       u32 utc_sec = mlxsw_core_read_utc_sec(mlxsw_core);
+
+       if (cqe_ts_sec > (utc_sec & 0xff))
+               /* Time stamp above the last bits of UTC (UTC & 0xff) means the
+                * latter has wrapped after the time stamp was collected.
+                */
+               utc_sec -= 256;
+
+       utc_sec &= ~0xff;
+       utc_sec |= cqe_ts_sec;
+
+       return utc_sec;
+}
+
+static void mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core *mlxsw_core,
+                                       const struct mlxsw_skb_cb *cb,
+                                       struct skb_shared_hwtstamps *hwtstamps)
+{
+       u64 ts_sec, ts_nsec, nsec;
+
+       WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
+
+       /* The time stamp in the CQE is represented by 38 bits, which is a short
+        * representation of UTC time. Software should create the full time
+        * stamp using the global UTC clock. The seconds have only 8 bits in the
+        * CQE, to create the full time stamp, use the current UTC time and fix
+        * the seconds according to the relation between UTC seconds and CQE
+        * seconds.
+        */
+       ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
+       ts_nsec = cb->cqe_ts.nsec;
+
+       nsec = ts_sec * NSEC_PER_SEC + ts_nsec;
+
+       hwtstamps->hwtstamp = ns_to_ktime(nsec);
+}
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+                          u16 local_port)
+{
+       struct skb_shared_hwtstamps hwtstamps;
+
+       mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+                                   &hwtstamps);
+       *skb_hwtstamps(skb) = hwtstamps;
+       mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+                              struct sk_buff *skb, u16 local_port)
+{
+       struct skb_shared_hwtstamps hwtstamps;
+
+       mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+                                   &hwtstamps);
+       skb_tstamp_tx(skb, &hwtstamps);
+       dev_kfree_skb_any(skb);
+}
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+                              struct hwtstamp_config *config)
+{
+       struct mlxsw_sp2_ptp_state *ptp_state;
+
+       ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+       *config = ptp_state->config;
+       return 0;
+}
+
+static int
+mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+                               u16 *p_ing_types, u16 *p_egr_types,
+                               enum hwtstamp_rx_filters *p_rx_filter)
+{
+       enum hwtstamp_rx_filters rx_filter = config->rx_filter;
+       enum hwtstamp_tx_types tx_type = config->tx_type;
+       u16 ing_types = 0x00;
+       u16 egr_types = 0x00;
+
+       *p_rx_filter = rx_filter;
+
+       switch (rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               ing_types = 0x00;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+               /* In Spectrum-2 and above, all packets get time stamp by
+                * default and the driver fill the time stamp only for event
+                * packets. Return all event types even if only specific types
+                * were required.
+                */
+               ing_types = 0x0f;
+               *p_rx_filter = HWTSTAMP_FILTER_SOME;
+               break;
+       case HWTSTAMP_FILTER_ALL:
+       case HWTSTAMP_FILTER_SOME:
+       case HWTSTAMP_FILTER_NTP_ALL:
+               return -ERANGE;
+       default:
+               return -EINVAL;
+       }
+
+       switch (tx_type) {
+       case HWTSTAMP_TX_OFF:
+               egr_types = 0x00;
+               break;
+       case HWTSTAMP_TX_ON:
+               egr_types = 0x0f;
+               break;
+       case HWTSTAMP_TX_ONESTEP_SYNC:
+       case HWTSTAMP_TX_ONESTEP_P2P:
+               return -ERANGE;
+       default:
+               return -EINVAL;
+       }
+
+       *p_ing_types = ing_types;
+       *p_egr_types = egr_types;
+       return 0;
+}
+
+static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
+                                   u16 ing_types, u16 egr_types)
+{
+       char mtpcpc_pl[MLXSW_REG_MTPCPC_LEN];
+
+       mlxsw_reg_mtpcpc_pack(mtpcpc_pl, false, 0, ptp_trap_en, ing_types,
+                             egr_types);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpcpc), mtpcpc_pl);
+}
+
+static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
+                               u16 egr_types,
+                               struct hwtstamp_config new_config)
+{
+       struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+       int err;
+
+       err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, true, ing_types, egr_types);
+       if (err)
+               return err;
+
+       ptp_state->config = new_config;
+       return 0;
+}
+
+static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
+                                struct hwtstamp_config new_config)
+{
+       struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+       int err;
+
+       err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, false, 0, 0);
+       if (err)
+               return err;
+
+       ptp_state->config = new_config;
+       return 0;
+}
+
+static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+                                       u16 ing_types, u16 egr_types,
+                                       struct hwtstamp_config new_config)
+{
+       struct mlxsw_sp2_ptp_state *ptp_state;
+       int err;
+
+       ASSERT_RTNL();
+
+       ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+       if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
+               return 0;
+
+       err = mlxsw_sp2_ptp_enable(mlxsw_sp_port->mlxsw_sp, ing_types,
+                                  egr_types, new_config);
+       if (err)
+               return err;
+
+       refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+
+       return 0;
+}
+
+static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+                                         struct hwtstamp_config new_config)
+{
+       struct mlxsw_sp2_ptp_state *ptp_state;
+       int err;
+
+       ASSERT_RTNL();
+
+       ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+       if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
+               return 0;
+
+       err = mlxsw_sp2_ptp_disable(mlxsw_sp_port->mlxsw_sp, new_config);
+       if (err)
+               goto err_ptp_disable;
+
+       return 0;
+
+err_ptp_disable:
+       refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+       return err;
+}
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                              struct hwtstamp_config *config)
+{
+       enum hwtstamp_rx_filters rx_filter;
+       struct hwtstamp_config new_config;
+       u16 new_ing_types, new_egr_types;
+       bool ptp_enabled;
+       int err;
+
+       err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
+                                             &new_egr_types, &rx_filter);
+       if (err)
+               return err;
+
+       new_config.flags = config->flags;
+       new_config.tx_type = config->tx_type;
+       new_config.rx_filter = rx_filter;
+
+       ptp_enabled = mlxsw_sp_port->ptp.ing_types ||
+                     mlxsw_sp_port->ptp.egr_types;
+
+       if ((new_ing_types || new_egr_types) && !ptp_enabled) {
+               err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
+                                                  new_egr_types, new_config);
+               if (err)
+                       return err;
+       } else if (!new_ing_types && !new_egr_types && ptp_enabled) {
+               err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
+               if (err)
+                       return err;
+       }
+
+       mlxsw_sp_port->ptp.ing_types = new_ing_types;
+       mlxsw_sp_port->ptp.egr_types = new_egr_types;
+
+       /* Notify the ioctl caller what we are actually timestamping. */
+       config->rx_filter = rx_filter;
+
+       return 0;
+}
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+                             struct ethtool_ts_info *info)
+{
+       info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
+
+       info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+                               SOF_TIMESTAMPING_RX_HARDWARE |
+                               SOF_TIMESTAMPING_RAW_HARDWARE;
+
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+                        BIT(HWTSTAMP_TX_ON);
+
+       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+       return 0;
+}
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+                                struct mlxsw_sp_port *mlxsw_sp_port,
+                                struct sk_buff *skb,
+                                const struct mlxsw_tx_info *tx_info)
+{
+       mlxsw_sp_txhdr_construct(skb, tx_info);
+       return 0;
+}
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+                                 struct mlxsw_sp_port *mlxsw_sp_port,
+                                 struct sk_buff *skb,
+                                 const struct mlxsw_tx_info *tx_info)
+{
+       /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
+        * their correction field correctly set on the egress port they must be
+        * transmitted as data packets. Such packets ingress the ASIC via the
+        * CPU port and must have a VLAN tag, as the CPU port is not configured
+        * with a PVID. Push the default VLAN (4095), which is configured as
+        * egress untagged on all the ports.
+        */
+       if (!skb_vlan_tagged(skb)) {
+               skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+                                               MLXSW_SP_DEFAULT_VID);
+               if (!skb) {
+                       this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+                       return -ENOMEM;
+               }
+       }
+
+       return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
+                                                tx_info);
+}
index c06cd13..2d1628f 100644 (file)
@@ -57,6 +57,40 @@ void mlxsw_sp1_get_stats_strings(u8 **p);
 void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
                         u64 *data, int data_index);
 
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+                                struct mlxsw_sp_port *mlxsw_sp_port,
+                                struct sk_buff *skb,
+                                const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock);
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp);
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+                          u16 local_port);
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+                              struct sk_buff *skb, u16 local_port);
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+                              struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                              struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+                             struct ethtool_ts_info *info);
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+                                 struct mlxsw_sp_port *mlxsw_sp_port,
+                                 struct sk_buff *skb,
+                                 const struct mlxsw_tx_info *tx_info);
+
 #else
 
 static inline struct mlxsw_sp_ptp_clock *
@@ -136,7 +170,14 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
                                       u64 *data, int data_index)
 {
 }
-#endif
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+                                struct mlxsw_sp_port *mlxsw_sp_port,
+                                struct sk_buff *skb,
+                                const struct mlxsw_tx_info *tx_info)
+{
+       return -EOPNOTSUPP;
+}
 
 static inline struct mlxsw_sp_ptp_clock *
 mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
@@ -184,16 +225,25 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
        return -EOPNOTSUPP;
 }
 
-static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
-{
-}
-
 static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
                                            struct ethtool_ts_info *info)
 {
        return mlxsw_sp_ptp_get_ts_info_noptp(info);
 }
 
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+                                 struct mlxsw_sp_port *mlxsw_sp_port,
+                                 struct sk_buff *skb,
+                                 const struct mlxsw_tx_info *tx_info)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
+static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
+{
+}
+
 static inline int mlxsw_sp2_get_stats_count(void)
 {
        return 0;
index e31f8fb..df2ab5c 100644 (file)
@@ -4233,7 +4233,7 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
                        }
 
                        /* If the chain is ended by an load/store pair then this
-                        * could serve as the new head of the the next chain.
+                        * could serve as the new head of the next chain.
                         */
                        if (curr_pair_is_memcpy(meta1, meta2)) {
                                head_ld_meta = meta1;
index 4c75948..bb06fa2 100644 (file)
@@ -8,7 +8,8 @@ sfc-y                   += efx.o efx_common.o efx_channels.o nic.o \
                           ef100.o ef100_nic.o ef100_netdev.o \
                           ef100_ethtool.o ef100_rx.o ef100_tx.o
 sfc-$(CONFIG_SFC_MTD)  += mtd.o
-sfc-$(CONFIG_SFC_SRIOV)        += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o mae.o
+sfc-$(CONFIG_SFC_SRIOV)        += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
+                           mae.o tc.o
 
 obj-$(CONFIG_SFC)      += sfc.o
 
index ab979fd..ee734b6 100644 (file)
@@ -2538,23 +2538,33 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
 
        if (rc)
                return rc;
+       down_write(&efx->filter_sem);
        rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
 
        if (rc)
-               return rc;
+               goto out_unlock;
 
        list_for_each_entry(vlan, &nic_data->vlan_list, list) {
                rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
                if (rc)
                        goto fail_add_vlan;
        }
-       return 0;
+       goto out_unlock;
 
 fail_add_vlan:
        efx_mcdi_filter_table_remove(efx);
+out_unlock:
+       up_write(&efx->filter_sem);
        return rc;
 }
 
+static void efx_ef10_filter_table_remove(struct efx_nic *efx)
+{
+       down_write(&efx->filter_sem);
+       efx_mcdi_filter_table_remove(efx);
+       up_write(&efx->filter_sem);
+}
+
 /* This creates an entry in the RX descriptor queue */
 static inline void
 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -3211,9 +3221,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
 
        efx_device_detach_sync(efx);
        efx_net_stop(efx->net_dev);
-       down_write(&efx->filter_sem);
-       efx_mcdi_filter_table_remove(efx);
-       up_write(&efx->filter_sem);
+       efx_ef10_filter_table_remove(efx);
 
        rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
        if (rc)
@@ -3243,9 +3251,7 @@ restore_vadaptor:
        if (rc2)
                goto reset_nic;
 restore_filters:
-       down_write(&efx->filter_sem);
        rc2 = efx_ef10_filter_table_probe(efx);
-       up_write(&efx->filter_sem);
        if (rc2)
                goto reset_nic;
 
@@ -3275,8 +3281,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
        efx_net_stop(efx->net_dev);
 
        mutex_lock(&efx->mac_lock);
-       down_write(&efx->filter_sem);
-       efx_mcdi_filter_table_remove(efx);
+       efx_ef10_filter_table_remove(efx);
 
        ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
                        efx->net_dev->dev_addr);
@@ -3286,7 +3291,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
                                sizeof(inbuf), NULL, 0, NULL);
 
        efx_ef10_filter_table_probe(efx);
-       up_write(&efx->filter_sem);
        mutex_unlock(&efx->mac_lock);
 
        if (was_enabled)
@@ -4092,7 +4096,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
        .ev_test_generate = efx_ef10_ev_test_generate,
        .filter_table_probe = efx_ef10_filter_table_probe,
        .filter_table_restore = efx_mcdi_filter_table_restore,
-       .filter_table_remove = efx_mcdi_filter_table_remove,
+       .filter_table_remove = efx_ef10_filter_table_remove,
        .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
        .filter_insert = efx_mcdi_filter_insert,
        .filter_remove_safe = efx_mcdi_filter_remove_safe,
index 425017f..71aab3d 100644 (file)
@@ -431,6 +431,9 @@ static void ef100_pci_remove(struct pci_dev *pci_dev)
 
        probe_data = container_of(efx, struct efx_probe_data, efx);
        ef100_remove_netdev(probe_data);
+#ifdef CONFIG_SFC_SRIOV
+       efx_fini_struct_tc(efx);
+#endif
 
        ef100_remove(efx);
        efx_fini_io(efx);
index 9e65de1..17b9d37 100644 (file)
@@ -329,6 +329,10 @@ void ef100_remove_netdev(struct efx_probe_data *probe_data)
 
        ef100_unregister_netdev(efx);
 
+#ifdef CONFIG_SFC_SRIOV
+       efx_fini_tc(efx);
+#endif
+
        down_write(&efx->filter_sem);
        efx_mcdi_filter_table_remove(efx);
        up_write(&efx->filter_sem);
index 4625d35..8061efd 100644 (file)
@@ -24,6 +24,8 @@
 #include "ef100_tx.h"
 #include "ef100_sriov.h"
 #include "ef100_netdev.h"
+#include "tc.h"
+#include "mae.h"
 #include "rx_common.h"
 
 #define EF100_MAX_VIS 4096
@@ -374,26 +376,46 @@ static int ef100_filter_table_up(struct efx_nic *efx)
 {
        int rc;
 
+       down_write(&efx->filter_sem);
        rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
-       if (rc) {
-               efx_mcdi_filter_table_down(efx);
-               return rc;
-       }
+       if (rc)
+               goto fail_unspec;
 
        rc = efx_mcdi_filter_add_vlan(efx, 0);
-       if (rc) {
-               efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
-               efx_mcdi_filter_table_down(efx);
-       }
+       if (rc)
+               goto fail_vlan0;
+       /* Drop the lock: we've finished altering table existence, and
+        * filter insertion will need to take the lock for read.
+        */
+       up_write(&efx->filter_sem);
+#ifdef CONFIG_SFC_SRIOV
+       rc = efx_tc_insert_rep_filters(efx);
+       /* Rep filter failure is nonfatal */
+       if (rc)
+               netif_warn(efx, drv, efx->net_dev,
+                          "Failed to insert representor filters, rc %d\n",
+                          rc);
+#endif
+       return 0;
 
+fail_vlan0:
+       efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
+fail_unspec:
+       efx_mcdi_filter_table_down(efx);
+       up_write(&efx->filter_sem);
        return rc;
 }
 
 static void ef100_filter_table_down(struct efx_nic *efx)
 {
+#ifdef CONFIG_SFC_SRIOV
+       efx_tc_remove_rep_filters(efx);
+#endif
+       down_write(&efx->filter_sem);
        efx_mcdi_filter_del_vlan(efx, 0);
        efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
        efx_mcdi_filter_table_down(efx);
+       up_write(&efx->filter_sem);
 }
 
 /*     Other
@@ -704,6 +726,31 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
        return 10 * EFX_RECYCLE_RING_SIZE_10G;
 }
 
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef100_get_base_mport(struct efx_nic *efx)
+{
+       struct ef100_nic_data *nic_data = efx->nic_data;
+       u32 selector, id;
+       int rc;
+
+       /* Construct mport selector for "physical network port" */
+       efx_mae_mport_wire(efx, &selector);
+       /* Look up actual mport ID */
+       rc = efx_mae_lookup_mport(efx, selector, &id);
+       if (rc)
+               return rc;
+       /* The ID should always fit in 16 bits, because that's how wide the
+        * corresponding fields in the RX prefix & TX override descriptor are
+        */
+       if (id >> 16)
+               netif_warn(efx, probe, efx->net_dev, "Bad base m-port id %#x\n",
+                          id);
+       nic_data->base_mport = id;
+       nic_data->have_mport = true;
+       return 0;
+}
+#endif
+
 static int compare_versions(const char *a, const char *b)
 {
        int a_major, a_minor, a_point, a_patch;
@@ -1064,6 +1111,34 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
        eth_hw_addr_set(net_dev, net_dev->perm_addr);
        memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
 
+       if (!nic_data->grp_mae)
+               return 0;
+
+#ifdef CONFIG_SFC_SRIOV
+       rc = efx_init_struct_tc(efx);
+       if (rc)
+               return rc;
+
+       rc = efx_ef100_get_base_mport(efx);
+       if (rc) {
+               netif_warn(efx, probe, net_dev,
+                          "Failed to probe base mport rc %d; representors will not function\n",
+                          rc);
+       }
+
+       rc = efx_init_tc(efx);
+       if (rc) {
+               /* Either we don't have an MAE at all (i.e. legacy v-switching),
+                * or we do but we failed to probe it.  In the latter case, we
+                * may not have set up default rules, in which case we won't be
+                * able to pass any traffic.  However, we don't fail the probe,
+                * because the user might need to use the netdevice to apply
+                * configuration changes to fix whatever's wrong with the MAE.
+                */
+               netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
+                          rc);
+       }
+#endif
        return 0;
 
 fail:
index 40f84a2..0295933 100644 (file)
@@ -72,6 +72,8 @@ struct ef100_nic_data {
        u8 port_id[ETH_ALEN];
        DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
        u64 stats[EF100_STAT_COUNT];
+       u32 base_mport;
+       bool have_mport; /* base_mport was populated successfully */
        bool grp_mae; /* MAE Privilege */
        u16 tso_max_hdr_len;
        u16 tso_max_payload_num_segs;
index d07539f..73ae465 100644 (file)
 #include "ef100_netdev.h"
 #include "ef100_nic.h"
 #include "mae.h"
+#include "rx_common.h"
 
 #define EFX_EF100_REP_DRIVER   "efx_ef100_rep"
 
+#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE       64
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
+
 static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
                                     unsigned int i)
 {
        efv->parent = efx;
        efv->idx = i;
        INIT_LIST_HEAD(&efv->list);
+       efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+       INIT_LIST_HEAD(&efv->dflt.acts.list);
+       INIT_LIST_HEAD(&efv->rx_list);
+       spin_lock_init(&efv->rx_lock);
        efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
                          NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
                          NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
@@ -29,6 +38,25 @@ static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
        return 0;
 }
 
+static int efx_ef100_rep_open(struct net_device *net_dev)
+{
+       struct efx_rep *efv = netdev_priv(net_dev);
+
+       netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
+                      NAPI_POLL_WEIGHT);
+       napi_enable(&efv->napi);
+       return 0;
+}
+
+static int efx_ef100_rep_close(struct net_device *net_dev)
+{
+       struct efx_rep *efv = netdev_priv(net_dev);
+
+       napi_disable(&efv->napi);
+       netif_napi_del(&efv->napi);
+       return 0;
+}
+
 static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
                                      struct net_device *dev)
 {
@@ -79,10 +107,26 @@ static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
        return 0;
 }
 
+static void efx_ef100_rep_get_stats64(struct net_device *dev,
+                                     struct rtnl_link_stats64 *stats)
+{
+       struct efx_rep *efv = netdev_priv(dev);
+
+       stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
+       stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
+       stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
+       stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
+       stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
+       stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
+}
+
 static const struct net_device_ops efx_ef100_rep_netdev_ops = {
+       .ndo_open               = efx_ef100_rep_open,
+       .ndo_stop               = efx_ef100_rep_close,
        .ndo_start_xmit         = efx_ef100_rep_xmit,
        .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
        .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
+       .ndo_get_stats64        = efx_ef100_rep_get_stats64,
 };
 
 static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
@@ -106,10 +150,37 @@ static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
        efv->msg_enable = msg_enable;
 }
 
+static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
+                                               struct ethtool_ringparam *ring,
+                                               struct kernel_ethtool_ringparam *kring,
+                                               struct netlink_ext_ack *ext_ack)
+{
+       struct efx_rep *efv = netdev_priv(net_dev);
+
+       ring->rx_max_pending = U32_MAX;
+       ring->rx_pending = efv->rx_pring_size;
+}
+
+static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
+                                              struct ethtool_ringparam *ring,
+                                              struct kernel_ethtool_ringparam *kring,
+                                              struct netlink_ext_ack *ext_ack)
+{
+       struct efx_rep *efv = netdev_priv(net_dev);
+
+       if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
+               return -EINVAL;
+
+       efv->rx_pring_size = ring->rx_pending;
+       return 0;
+}
+
 static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
        .get_drvinfo            = efx_ef100_rep_get_drvinfo,
        .get_msglevel           = efx_ef100_rep_ethtool_get_msglevel,
        .set_msglevel           = efx_ef100_rep_ethtool_set_msglevel,
+       .get_ringparam          = efx_ef100_rep_ethtool_get_ringparam,
+       .set_ringparam          = efx_ef100_rep_ethtool_set_ringparam,
 };
 
 static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
@@ -159,6 +230,7 @@ static int efx_ef100_configure_rep(struct efx_rep *efv)
        u32 selector;
        int rc;
 
+       efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
        /* Construct mport selector for corresponding VF */
        efx_mae_mport_vf(efx, efv->idx, &selector);
        /* Look up actual mport ID */
@@ -169,7 +241,14 @@ static int efx_ef100_configure_rep(struct efx_rep *efv)
        /* mport label should fit in 16 bits */
        WARN_ON(efv->mport >> 16);
 
-       return 0;
+       return efx_tc_configure_default_rule_rep(efv);
+}
+
+static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
+{
+       struct efx_nic *efx = efv->parent;
+
+       efx_tc_deconfigure_default_rule(efx, &efv->dflt);
 }
 
 static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
@@ -181,6 +260,7 @@ static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
        list_del(&efv->list);
        spin_unlock_bh(&efx->vf_reps_lock);
        rtnl_unlock();
+       synchronize_rcu();
        free_netdev(efv->net_dev);
 }
 
@@ -202,19 +282,21 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
                pci_err(efx->pci_dev,
                        "Failed to configure representor for VF %d, rc %d\n",
                        i, rc);
-               goto fail;
+               goto fail1;
        }
        rc = register_netdev(efv->net_dev);
        if (rc) {
                pci_err(efx->pci_dev,
                        "Failed to register representor for VF %d, rc %d\n",
                        i, rc);
-               goto fail;
+               goto fail2;
        }
        pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
                efv->net_dev->name);
        return 0;
-fail:
+fail2:
+       efx_ef100_deconfigure_rep(efv);
+fail1:
        efx_ef100_rep_destroy_netdev(efv);
        return rc;
 }
@@ -228,6 +310,7 @@ void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
                return;
        netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
        unregister_netdev(rep_dev);
+       efx_ef100_deconfigure_rep(efv);
        efx_ef100_rep_destroy_netdev(efv);
 }
 
@@ -242,3 +325,111 @@ void efx_ef100_fini_vfreps(struct efx_nic *efx)
        list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
                efx_ef100_vfrep_destroy(efx, efv);
 }
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
+{
+       struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
+       unsigned int read_index;
+       struct list_head head;
+       struct sk_buff *skb;
+       bool need_resched;
+       int spent = 0;
+
+       INIT_LIST_HEAD(&head);
+       /* Grab up to 'weight' pending SKBs */
+       spin_lock_bh(&efv->rx_lock);
+       read_index = efv->write_index;
+       while (spent < weight && !list_empty(&efv->rx_list)) {
+               skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
+               list_del(&skb->list);
+               list_add_tail(&skb->list, &head);
+               spent++;
+       }
+       spin_unlock_bh(&efv->rx_lock);
+       /* Receive them */
+       netif_receive_skb_list(&head);
+       if (spent < weight)
+               if (napi_complete_done(napi, spent)) {
+                       spin_lock_bh(&efv->rx_lock);
+                       efv->read_index = read_index;
+                       /* If write_index advanced while we were doing the
+                        * RX, then storing our read_index won't re-prime the
+                        * fake-interrupt.  In that case, we need to schedule
+                        * NAPI again to consume the additional packet(s).
+                        */
+                       need_resched = efv->write_index != read_index;
+                       spin_unlock_bh(&efv->rx_lock);
+                       if (need_resched)
+                               napi_schedule(&efv->napi);
+               }
+       return spent;
+}
+
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
+{
+       u8 *eh = efx_rx_buf_va(rx_buf);
+       struct sk_buff *skb;
+       bool primed;
+
+       /* Don't allow too many queued SKBs to build up, as they consume
+        * GFP_ATOMIC memory.  If we overrun, just start dropping.
+        */
+       if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
+               atomic64_inc(&efv->stats.rx_dropped);
+               if (net_ratelimit())
+                       netif_dbg(efv->parent, rx_err, efv->net_dev,
+                                 "nodesc-dropped packet of length %u\n",
+                                 rx_buf->len);
+               return;
+       }
+
+       skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
+       if (!skb) {
+               atomic64_inc(&efv->stats.rx_dropped);
+               if (net_ratelimit())
+                       netif_dbg(efv->parent, rx_err, efv->net_dev,
+                                 "noskb-dropped packet of length %u\n",
+                                 rx_buf->len);
+               return;
+       }
+       memcpy(skb->data, eh, rx_buf->len);
+       __skb_put(skb, rx_buf->len);
+
+       skb_record_rx_queue(skb, 0); /* rep is single-queue */
+
+       /* Move past the ethernet header */
+       skb->protocol = eth_type_trans(skb, efv->net_dev);
+
+       skb_checksum_none_assert(skb);
+
+       atomic64_inc(&efv->stats.rx_packets);
+       atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
+
+       /* Add it to the rx list */
+       spin_lock_bh(&efv->rx_lock);
+       primed = efv->read_index == efv->write_index;
+       list_add_tail(&skb->list, &efv->rx_list);
+       efv->write_index++;
+       spin_unlock_bh(&efv->rx_lock);
+       /* Trigger rx work */
+       if (primed)
+               napi_schedule(&efv->napi);
+}
+
+struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
+{
+       struct efx_rep *efv, *out = NULL;
+
+       /* spinlock guards against list mutation while we're walking it;
+        * but caller must also hold rcu_read_lock() to ensure the netdev
+        * isn't freed after we drop the spinlock.
+        */
+       spin_lock_bh(&efx->vf_reps_lock);
+       list_for_each_entry(efv, &efx->vf_reps, list)
+               if (efv->mport == mport) {
+                       out = efv;
+                       break;
+               }
+       spin_unlock_bh(&efx->vf_reps_lock);
+       return out;
+}
index d47fd8f..070f700 100644 (file)
@@ -14,6 +14,7 @@
 #define EF100_REP_H
 
 #include "net_driver.h"
+#include "tc.h"
 
 struct efx_rep_sw_stats {
        atomic64_t rx_packets, tx_packets;
@@ -29,7 +30,14 @@ struct efx_rep_sw_stats {
  * @msg_enable: log message enable flags
  * @mport: m-port ID of corresponding VF
  * @idx: VF index
+ * @write_index: number of packets enqueued to @rx_list
+ * @read_index: number of packets consumed from @rx_list
+ * @rx_pring_size: max length of RX list
+ * @dflt: default-rule for MAE switching
  * @list: entry on efx->vf_reps
+ * @rx_list: list of SKBs queued for receive in NAPI poll
+ * @rx_lock: protects @rx_list
+ * @napi: NAPI control structure
  * @stats: software traffic counters for netdev stats
  */
 struct efx_rep {
@@ -38,7 +46,13 @@ struct efx_rep {
        u32 msg_enable;
        u32 mport;
        unsigned int idx;
+       unsigned int write_index, read_index;
+       unsigned int rx_pring_size;
+       struct efx_tc_flow_rule dflt;
        struct list_head list;
+       struct list_head rx_list;
+       spinlock_t rx_lock;
+       struct napi_struct napi;
        struct efx_rep_sw_stats stats;
 };
 
@@ -46,4 +60,10 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
 void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv);
 void efx_ef100_fini_vfreps(struct efx_nic *efx);
 
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
+/* Returns the representor corresponding to a VF m-port, or NULL
+ * @mport is an m-port label, *not* an m-port ID!
+ * Caller must hold rcu_read_lock().
+ */
+struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
 #endif /* EF100_REP_H */
index 85207ac..65bbe37 100644 (file)
@@ -55,10 +55,14 @@ static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
 
 void __ef100_rx_packet(struct efx_channel *channel)
 {
-       struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+       struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+       struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
+                                                    channel->rx_pkt_index);
        struct efx_nic *efx = channel->efx;
+       struct ef100_nic_data *nic_data;
        u8 *eh = efx_rx_buf_va(rx_buf);
        __wsum csum = 0;
+       u16 ing_port;
        u32 *prefix;
 
        prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
@@ -76,6 +80,37 @@ void __ef100_rx_packet(struct efx_channel *channel)
                goto out;
        }
 
+       ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT));
+
+       nic_data = efx->nic_data;
+
+       if (nic_data->have_mport && ing_port != nic_data->base_mport) {
+#ifdef CONFIG_SFC_SRIOV
+               struct efx_rep *efv;
+
+               rcu_read_lock();
+               efv = efx_ef100_find_rep_by_mport(efx, ing_port);
+               if (efv) {
+                       if (efv->net_dev->flags & IFF_UP)
+                               efx_ef100_rep_rx_packet(efv, rx_buf);
+                       rcu_read_unlock();
+                       /* Representor Rx doesn't care about PF Rx buffer
+                        * ownership, it just makes a copy. So, we are done
+                        * with the Rx buffer from PF point of view and should
+                        * free it.
+                        */
+                       goto free_rx_buffer;
+               }
+               rcu_read_unlock();
+#endif
+               if (net_ratelimit())
+                       netif_warn(efx, drv, efx->net_dev,
+                                  "Unrecognised ing_port %04x (base %04x), dropping\n",
+                                  ing_port, nic_data->base_mport);
+               channel->n_rx_mport_bad++;
+               goto free_rx_buffer;
+       }
+
        if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
                if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
                        ++channel->n_rx_ip_hdr_chksum_err;
@@ -87,17 +122,16 @@ void __ef100_rx_packet(struct efx_channel *channel)
        }
 
        if (channel->type->receive_skb) {
-               struct efx_rx_queue *rx_queue =
-                       efx_channel_get_rx_queue(channel);
-
                /* no support for special channels yet, so just discard */
                WARN_ON_ONCE(1);
-               efx_free_rx_buffers(rx_queue, rx_buf, 1);
-               goto out;
+               goto free_rx_buffer;
        }
 
        efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
+       goto out;
 
+free_rx_buffer:
+       efx_free_rx_buffers(rx_queue, rx_buf, 1);
 out:
        channel->rx_pkt_n_frags = 0;
 }
index 92550c7..9aae0d8 100644 (file)
@@ -501,14 +501,11 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
                efx_device_detach_sync(vf->efx);
                efx_net_stop(vf->efx->net_dev);
 
-               down_write(&vf->efx->filter_sem);
                vf->efx->type->filter_table_remove(vf->efx);
 
                rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
-               if (rc) {
-                       up_write(&vf->efx->filter_sem);
+               if (rc)
                        return rc;
-               }
        }
 
        rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
@@ -539,12 +536,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
        if (vf->efx) {
                /* VF cannot use the vport_id that the PF created */
                rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
-               if (rc) {
-                       up_write(&vf->efx->filter_sem);
+               if (rc)
                        return rc;
-               }
                vf->efx->type->filter_table_probe(vf->efx);
-               up_write(&vf->efx->filter_sem);
                efx_net_open(vf->efx->net_dev);
                efx_device_attach_if_not_resetting(vf->efx);
        }
@@ -580,7 +574,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
                efx_net_stop(vf->efx->net_dev);
 
                mutex_lock(&vf->efx->mac_lock);
-               down_write(&vf->efx->filter_sem);
                vf->efx->type->filter_table_remove(vf->efx);
 
                rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
@@ -654,7 +647,6 @@ restore_filters:
                if (rc2)
                        goto reset_nic_up_write;
 
-               up_write(&vf->efx->filter_sem);
                mutex_unlock(&vf->efx->mac_lock);
 
                rc2 = efx_net_open(vf->efx->net_dev);
@@ -666,10 +658,8 @@ restore_filters:
        return rc;
 
 reset_nic_up_write:
-       if (vf->efx) {
-               up_write(&vf->efx->filter_sem);
+       if (vf->efx)
                mutex_unlock(&vf->efx->mac_lock);
-       }
 reset_nic:
        if (vf->efx) {
                netif_err(efx, drv, efx->net_dev,
index 58ad9d6..bc840ed 100644 (file)
@@ -91,6 +91,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mport_bad),
 #ifdef CONFIG_RFS_ACCEL
        EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
index 40b2af8..4d92883 100644 (file)
@@ -88,6 +88,7 @@ enum efx_filter_priority {
  *     the automatic filter in its place.
  * @EFX_FILTER_FLAG_RX: Filter is for RX
  * @EFX_FILTER_FLAG_TX: Filter is for TX
+ * @EFX_FILTER_FLAG_VPORT_ID: Virtual port ID for adapter switching.
  */
 enum efx_filter_flags {
        EFX_FILTER_FLAG_RX_RSS = 0x01,
@@ -95,6 +96,7 @@ enum efx_filter_flags {
        EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
        EFX_FILTER_FLAG_RX = 0x08,
        EFX_FILTER_FLAG_TX = 0x10,
+       EFX_FILTER_FLAG_VPORT_ID = 0x20,
 };
 
 /** enum efx_encap_type - types of encapsulation
@@ -127,6 +129,9 @@ enum efx_encap_type {
  *     MCFW context_id.
  * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
  *     an RX drop filter
+ * @vport_id: Virtual port ID associated with RX queue, for adapter switching,
+ *     if %EFX_FILTER_FLAG_VPORT_ID is set.  This is an MCFW vport_id, or on
+ *     EF100 an mport selector.
  * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
  * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
  * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
@@ -156,6 +161,7 @@ struct efx_filter_spec {
        u32     priority:2;
        u32     flags:6;
        u32     dmaq_id:12;
+       u32     vport_id;
        u32     rss_context;
        __be16  outer_vid __aligned(4); /* allow jhash2() of match values */
        __be16  inner_vid;
@@ -292,6 +298,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
        return 0;
 }
 
+/**
+ * efx_filter_set_vport_id - override virtual port id relating to filter
+ * @spec: Specification to initialise
+ * @vport_id: firmware ID of the virtual port
+ */
+static inline void efx_filter_set_vport_id(struct efx_filter_spec *spec,
+                                          u32 vport_id)
+{
+       spec->flags |= EFX_FILTER_FLAG_VPORT_ID;
+       spec->vport_id = vport_id;
+}
+
 static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
                                             enum efx_encap_type encap_type)
 {
index 011ebd4..97627f5 100644 (file)
 
 #include "mae.h"
 #include "mcdi.h"
-#include "mcdi_pcol.h"
+#include "mcdi_pcol_mae.h"
+
+int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
+{
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN);
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN);
+       size_t outlen;
+       int rc;
+
+       if (WARN_ON_ONCE(!id))
+               return -EINVAL;
+       if (WARN_ON_ONCE(!label))
+               return -EINVAL;
+
+       MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_TYPE,
+                      MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS);
+       MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT,
+                      MAE_MPORT_SELECTOR_ASSIGNED);
+       rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_ALLOC, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+       if (outlen < sizeof(outbuf))
+               return -EIO;
+       *id = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID);
+       *label = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_LABEL);
+       return 0;
+}
+
+int efx_mae_free_mport(struct efx_nic *efx, u32 id)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_FREE_IN_LEN);
+
+       BUILD_BUG_ON(MC_CMD_MAE_MPORT_FREE_OUT_LEN);
+       MCDI_SET_DWORD(inbuf, MAE_MPORT_FREE_IN_MPORT_ID, id);
+       return efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_FREE, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+void efx_mae_mport_wire(struct efx_nic *efx, u32 *out)
+{
+       efx_dword_t mport;
+
+       EFX_POPULATE_DWORD_2(mport,
+                            MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_PPORT,
+                            MAE_MPORT_SELECTOR_PPORT_ID, efx->port_num);
+       *out = EFX_DWORD_VAL(mport);
+}
+
+void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
+{
+       efx_dword_t mport;
+
+       EFX_POPULATE_DWORD_3(mport,
+                            MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
+                            MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
+                            MAE_MPORT_SELECTOR_FUNC_VF_ID, MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL);
+       *out = EFX_DWORD_VAL(mport);
+}
 
 void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
 {
@@ -24,6 +82,17 @@ void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
        *out = EFX_DWORD_VAL(mport);
 }
 
+/* Constructs an mport selector from an mport ID, because they're not the same */
+void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
+{
+       efx_dword_t mport;
+
+       EFX_POPULATE_DWORD_2(mport,
+                            MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_MPORT_ID,
+                            MAE_MPORT_SELECTOR_MPORT_ID, mport_id);
+       *out = EFX_DWORD_VAL(mport);
+}
+
 /* id is really only 24 bits wide */
 int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
 {
@@ -42,3 +111,236 @@ int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
        *id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID);
        return 0;
 }
+
+static bool efx_mae_asl_id(u32 id)
+{
+       return !!(id & BIT(31));
+}
+
+int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
+{
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN);
+       size_t outlen;
+       int rc;
+
+       MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+                      MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+       MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+                      MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+       MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
+                      MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
+       MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID,
+                      MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL);
+       MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID,
+                      MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL);
+       if (act->deliver)
+               MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DELIVER,
+                              act->dest_mport);
+       BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL);
+       rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_ALLOC, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+       if (outlen < sizeof(outbuf))
+               return -EIO;
+       act->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_ALLOC_OUT_AS_ID);
+       /* We rely on the high bit of AS IDs always being clear.
+        * The firmware API guarantees this, but let's check it ourselves.
+        */
+       if (WARN_ON_ONCE(efx_mae_asl_id(act->fw_id))) {
+               efx_mae_free_action_set(efx, act->fw_id);
+               return -EIO;
+       }
+       return 0;
+}
+
+int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id)
+{
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1));
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1));
+       size_t outlen;
+       int rc;
+
+       MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_FREE_IN_AS_ID, fw_id);
+       rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_FREE, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+       if (outlen < sizeof(outbuf))
+               return -EIO;
+       /* FW freed a different ID than we asked for, should never happen.
+        * Warn because it means we've now got a different idea to the FW of
+        * what action-sets exist, which could cause mayhem later.
+        */
+       if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_FREE_OUT_FREED_AS_ID) != fw_id))
+               return -EIO;
+       return 0;
+}
+
+int efx_mae_alloc_action_set_list(struct efx_nic *efx,
+                                 struct efx_tc_action_set_list *acts)
+{
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
+       struct efx_tc_action_set *act;
+       size_t inlen, outlen, i = 0;
+       efx_dword_t *inbuf;
+       int rc;
+
+       list_for_each_entry(act, &acts->list, list)
+               i++;
+       if (i == 0)
+               return -EINVAL;
+       if (i == 1) {
+               /* Don't wrap an ASL around a single AS, just use the AS_ID
+                * directly.  ASLs are a more limited resource.
+                */
+               act = list_first_entry(&acts->list, struct efx_tc_action_set, list);
+               acts->fw_id = act->fw_id;
+               return 0;
+       }
+       if (i > MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2)
+               return -EOPNOTSUPP; /* Too many actions */
+       inlen = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(i);
+       inbuf = kzalloc(inlen, GFP_KERNEL);
+       if (!inbuf)
+               return -ENOMEM;
+       i = 0;
+       list_for_each_entry(act, &acts->list, list) {
+               MCDI_SET_ARRAY_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS,
+                                    i, act->fw_id);
+               i++;
+       }
+       MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, i);
+       rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_ALLOC, inbuf, inlen,
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               goto out_free;
+       if (outlen < sizeof(outbuf)) {
+               rc = -EIO;
+               goto out_free;
+       }
+       acts->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
+       /* We rely on the high bit of ASL IDs always being set.
+        * The firmware API guarantees this, but let's check it ourselves.
+        */
+       if (WARN_ON_ONCE(!efx_mae_asl_id(acts->fw_id))) {
+               efx_mae_free_action_set_list(efx, acts);
+               rc = -EIO;
+       }
+out_free:
+       kfree(inbuf);
+       return rc;
+}
+
+int efx_mae_free_action_set_list(struct efx_nic *efx,
+                                struct efx_tc_action_set_list *acts)
+{
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1));
+       size_t outlen;
+       int rc;
+
+       /* If this is just an AS_ID with no ASL wrapper, then there is
+        * nothing for us to free.  (The AS will be freed later.)
+        */
+       if (efx_mae_asl_id(acts->fw_id)) {
+               MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_FREE_IN_ASL_ID,
+                              acts->fw_id);
+               rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_FREE, inbuf,
+                                 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+               if (rc)
+                       return rc;
+               if (outlen < sizeof(outbuf))
+                       return -EIO;
+               /* FW freed a different ID than we asked for, should never happen.
+                * Warn because it means we've now got a different idea to the FW of
+                * what action-set-lists exist, which could cause mayhem later.
+                */
+               if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) != acts->fw_id))
+                       return -EIO;
+       }
+       /* We're probably about to free @acts, but let's just make sure its
+        * fw_id is blatted so that it won't look valid if it leaks out.
+        */
+       acts->fw_id = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL;
+       return 0;
+}
+
+static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
+                                          const struct efx_tc_match *match)
+{
+       if (match->mask.ingress_port) {
+               if (~match->mask.ingress_port)
+                       return -EOPNOTSUPP;
+               MCDI_STRUCT_SET_DWORD(match_crit,
+                                     MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR,
+                                     match->value.ingress_port);
+       }
+       MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
+                             match->mask.ingress_port);
+       return 0;
+}
+
+int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
+                       u32 prio, u32 acts_id, u32 *id)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN));
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN);
+       MCDI_DECLARE_STRUCT_PTR(match_crit);
+       MCDI_DECLARE_STRUCT_PTR(response);
+       size_t outlen;
+       int rc;
+
+       if (!id)
+               return -EINVAL;
+
+       match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA);
+       response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE);
+       if (efx_mae_asl_id(acts_id)) {
+               MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, acts_id);
+               MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID,
+                                     MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL);
+       } else {
+               /* We only had one AS, so we didn't wrap it in an ASL */
+               MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID,
+                                     MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+               MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, acts_id);
+       }
+       MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio);
+       rc = efx_mae_populate_match_criteria(match_crit, match);
+       if (rc)
+               return rc;
+
+       rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+       if (outlen < sizeof(outbuf))
+               return -EIO;
+       *id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID);
+       return 0;
+}
+
+int efx_mae_delete_rule(struct efx_nic *efx, u32 id)
+{
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1));
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1));
+       size_t outlen;
+       int rc;
+
+       MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_DELETE_IN_AR_ID, id);
+       rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_DELETE, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+       if (outlen < sizeof(outbuf))
+               return -EIO;
+       /* FW freed a different ID than we asked for, should also never happen.
+        * Warn because it means we've now got a different idea to the FW of
+        * what rules exist, which could cause mayhem later.
+        */
+       if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID) != id))
+               return -EIO;
+       return 0;
+}
index 27e69e8..0369be4 100644 (file)
 /* MCDI interface for the ef100 Match-Action Engine */
 
 #include "net_driver.h"
+#include "tc.h"
+#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */
 
+int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label);
+int efx_mae_free_mport(struct efx_nic *efx, u32 id);
+
+void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
+void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
 void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
+void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
 
 int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
 
+int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
+int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
+
+int efx_mae_alloc_action_set_list(struct efx_nic *efx,
+                                 struct efx_tc_action_set_list *acts);
+int efx_mae_free_action_set_list(struct efx_nic *efx,
+                                struct efx_tc_action_set_list *acts);
+
+int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
+                       u32 prio, u32 acts_id, u32 *id);
+int efx_mae_delete_rule(struct efx_nic *efx, u32 id);
+
 #endif /* EF100_MAE_H */
index f74f6ce..26bc69f 100644 (file)
@@ -205,6 +205,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
        ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
 #define _MCDI_DWORD(_buf, _field)                                      \
        ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+#define _MCDI_STRUCT_DWORD(_buf, _field)                               \
+       ((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2))
 
 #define MCDI_BYTE(_buf, _field)                                                \
        ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1),       \
@@ -214,6 +216,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
         le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
 #define MCDI_SET_DWORD(_buf, _field, _value)                           \
        EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_STRUCT_SET_DWORD(_buf, _field, _value)                    \
+       EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value)
 #define MCDI_DWORD(_buf, _field)                                       \
        EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
 #define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1)           \
index 1523be7..4ff6586 100644 (file)
@@ -221,7 +221,10 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
                efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
        }
 
-       MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
+       if (flags & EFX_FILTER_FLAG_VPORT_ID)
+               MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, spec->vport_id);
+       else
+               MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
                       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
                       MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
@@ -488,6 +491,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
                        saved_spec->flags |= spec->flags;
                        saved_spec->rss_context = spec->rss_context;
                        saved_spec->dmaq_id = spec->dmaq_id;
+                       saved_spec->vport_id = spec->vport_id;
                }
        } else if (!replacing) {
                kfree(saved_spec);
index 06426aa..c0d6558 100644 (file)
@@ -89,6 +89,7 @@ struct efx_mcdi_filter_table {
         */
        bool mc_chaining;
        bool vlan_filter;
+       /* Entries on the vlan_list are added/removed under filter_sem */
        struct list_head vlan_list;
 };
 
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol_mae.h b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h
new file mode 100644 (file)
index 0000000..ff6d80c
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2019-2022 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef MCDI_PCOL_MAE_H
+#define MCDI_PCOL_MAE_H
+/* MCDI definitions for Match-Action Engine functionality, that are
+ * missing from the main mcdi_pcol.h
+ */
+
+/* MC_CMD_MAE_COUNTER_LIST_ALLOC is not (yet) a released API, but the
+ * following value is needed as an argument to MC_CMD_MAE_ACTION_SET_ALLOC.
+ */
+/* enum: A counter ID that is guaranteed never to represent a real counter */
+#define          MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL 0xffffffff
+
+#endif /* MCDI_PCOL_MAE_H */
index 4cde54c..7ef823d 100644 (file)
@@ -478,6 +478,8 @@ enum efx_sync_events_state {
  * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
  * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
  * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
+ * @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
+ *     not recognised
  * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
  *     __efx_rx_packet(), or zero if there is none
  * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -540,6 +542,7 @@ struct efx_channel {
        unsigned int n_rx_xdp_bad_drops;
        unsigned int n_rx_xdp_tx;
        unsigned int n_rx_xdp_redirect;
+       unsigned int n_rx_mport_bad;
 
        unsigned int rx_pkt_n_frags;
        unsigned int rx_pkt_index;
@@ -975,6 +978,7 @@ enum efx_xdp_tx_queues_mode {
  * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
  *      xdp_rxq_info structures?
  * @netdev_notifier: Netdevice notifier.
+ * @tc: state for TC offload (EF100).
  * @mem_bar: The BAR that is mapped into membase.
  * @reg_base: Offset from the start of the bar to the function control window.
  * @monitor_work: Hardware monitor workitem
@@ -1158,6 +1162,7 @@ struct efx_nic {
        bool xdp_rxq_info_failed;
 
        struct notifier_block netdev_notifier;
+       struct efx_tc_state *tc;
 
        unsigned int mem_bar;
        u32 reg_base;
index 4625f85..10ad0b9 100644 (file)
@@ -1100,7 +1100,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
 
        tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
        if (tx_queue && tx_queue->timestamping) {
+               /* This code invokes normal driver TX code which is always
+                * protected from softirqs when called from generic TX code,
+                * which in turn disables preemption. Look at __dev_queue_xmit
+                * which uses rcu_read_lock_bh disabling preemption for RCU
+                * plus disabling softirqs. We do not need RCU reader
+                * protection here.
+                *
+                * Although it is theoretically safe for current PTP TX/RX code
+                * running without disabling softirqs, there are three good
+                * reasond for doing so:
+                *
+                *      1) The code invoked is mainly implemented for non-PTP
+                *         packets and it is always executed with softirqs
+                *         disabled.
+                *      2) This being a single PTP packet, better to not
+                *         interrupt its processing by softirqs which can lead
+                *         to high latencies.
+                *      3) netdev_xmit_more checks preemption is disabled and
+                *         triggers a BUG_ON if not.
+                */
+               local_bh_disable();
                efx_enqueue_skb(tx_queue, skb);
+               local_bh_enable();
        } else {
                WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
                dev_kfree_skb_any(skb);
index bd21d6a..4826e6a 100644 (file)
@@ -793,7 +793,6 @@ int efx_probe_filters(struct efx_nic *efx)
        int rc;
 
        mutex_lock(&efx->mac_lock);
-       down_write(&efx->filter_sem);
        rc = efx->type->filter_table_probe(efx);
        if (rc)
                goto out_unlock;
@@ -830,7 +829,6 @@ int efx_probe_filters(struct efx_nic *efx)
        }
 #endif
 out_unlock:
-       up_write(&efx->filter_sem);
        mutex_unlock(&efx->mac_lock);
        return rc;
 }
@@ -846,9 +844,7 @@ void efx_remove_filters(struct efx_nic *efx)
                channel->rps_flow_id = NULL;
        }
 #endif
-       down_write(&efx->filter_sem);
        efx->type->filter_table_remove(efx);
-       up_write(&efx->filter_sem);
 }
 
 #ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
new file mode 100644 (file)
index 0000000..0c0aeb9
--- /dev/null
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "tc.h"
+#include "mae.h"
+#include "ef100_rep.h"
+#include "efx.h"
+
+static void efx_tc_free_action_set(struct efx_nic *efx,
+                                  struct efx_tc_action_set *act, bool in_hw)
+{
+       /* Failure paths calling this on the 'running action' set in_hw=false,
+        * because if the alloc had succeeded we'd've put it in acts.list and
+        * not still have it in act.
+        */
+       if (in_hw) {
+               efx_mae_free_action_set(efx, act->fw_id);
+               /* in_hw is true iff we are on an acts.list; make sure to
+                * remove ourselves from that list before we are freed.
+                */
+               list_del(&act->list);
+       }
+       kfree(act);
+}
+
+static void efx_tc_free_action_set_list(struct efx_nic *efx,
+                                       struct efx_tc_action_set_list *acts,
+                                       bool in_hw)
+{
+       struct efx_tc_action_set *act, *next;
+
+       /* Failure paths set in_hw=false, because usually the acts didn't get
+        * to efx_mae_alloc_action_set_list(); if they did, the failure tree
+        * has a separate efx_mae_free_action_set_list() before calling us.
+        */
+       if (in_hw)
+               efx_mae_free_action_set_list(efx, acts);
+       /* Any act that's on the list will be in_hw even if the list isn't */
+       list_for_each_entry_safe(act, next, &acts->list, list)
+               efx_tc_free_action_set(efx, act, true);
+       /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
+}
+
+static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
+{
+       efx_mae_delete_rule(efx, rule->fw_id);
+
+       /* Release entries in subsidiary tables */
+       efx_tc_free_action_set_list(efx, &rule->acts, true);
+       rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
+                                        u32 eg_port, struct efx_tc_flow_rule *rule)
+{
+       struct efx_tc_action_set_list *acts = &rule->acts;
+       struct efx_tc_match *match = &rule->match;
+       struct efx_tc_action_set *act;
+       int rc;
+
+       match->value.ingress_port = ing_port;
+       match->mask.ingress_port = ~0;
+       act = kzalloc(sizeof(*act), GFP_KERNEL);
+       if (!act)
+               return -ENOMEM;
+       act->deliver = 1;
+       act->dest_mport = eg_port;
+       rc = efx_mae_alloc_action_set(efx, act);
+       if (rc)
+               goto fail1;
+       EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
+       list_add_tail(&act->list, &acts->list);
+       rc = efx_mae_alloc_action_set_list(efx, acts);
+       if (rc)
+               goto fail2;
+       rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
+                                acts->fw_id, &rule->fw_id);
+       if (rc)
+               goto fail3;
+       return 0;
+fail3:
+       efx_mae_free_action_set_list(efx, acts);
+fail2:
+       list_del(&act->list);
+       efx_mae_free_action_set(efx, act->fw_id);
+fail1:
+       kfree(act);
+       return rc;
+}
+
+static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
+{
+       struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
+       u32 ing_port, eg_port;
+
+       efx_mae_mport_uplink(efx, &ing_port);
+       efx_mae_mport_wire(efx, &eg_port);
+       return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
+{
+       struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
+       u32 ing_port, eg_port;
+
+       efx_mae_mport_wire(efx, &ing_port);
+       efx_mae_mport_uplink(efx, &eg_port);
+       return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
+{
+       struct efx_tc_flow_rule *rule = &efv->dflt;
+       struct efx_nic *efx = efv->parent;
+       u32 ing_port, eg_port;
+
+       efx_mae_mport_mport(efx, efv->mport, &ing_port);
+       efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
+       return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+                                    struct efx_tc_flow_rule *rule)
+{
+       if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
+               efx_tc_delete_rule(efx, rule);
+       rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static int efx_tc_configure_rep_mport(struct efx_nic *efx)
+{
+       u32 rep_mport_label;
+       int rc;
+
+       rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
+       if (rc)
+               return rc;
+       pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
+               efx->tc->reps_mport_id, rep_mport_label);
+       /* Use mport *selector* as vport ID */
+       efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
+                           &efx->tc->reps_mport_vport_id);
+       return 0;
+}
+
+static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
+{
+       efx_mae_free_mport(efx, efx->tc->reps_mport_id);
+       efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
+}
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx)
+{
+       struct efx_filter_spec promisc, allmulti;
+       int rc;
+
+       if (efx->type->is_vf)
+               return 0;
+       if (!efx->tc)
+               return 0;
+       efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
+       efx_filter_set_uc_def(&promisc);
+       efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
+       rc = efx_filter_insert_filter(efx, &promisc, false);
+       if (rc < 0)
+               return rc;
+       efx->tc->reps_filter_uc = rc;
+       efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
+       efx_filter_set_mc_def(&allmulti);
+       efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
+       rc = efx_filter_insert_filter(efx, &allmulti, false);
+       if (rc < 0)
+               return rc;
+       efx->tc->reps_filter_mc = rc;
+       return 0;
+}
+
+void efx_tc_remove_rep_filters(struct efx_nic *efx)
+{
+       if (efx->type->is_vf)
+               return;
+       if (!efx->tc)
+               return;
+       if (efx->tc->reps_filter_mc >= 0)
+               efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
+       efx->tc->reps_filter_mc = -1;
+       if (efx->tc->reps_filter_uc >= 0)
+               efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
+       efx->tc->reps_filter_uc = -1;
+}
+
+int efx_init_tc(struct efx_nic *efx)
+{
+       int rc;
+
+       rc = efx_tc_configure_default_rule_pf(efx);
+       if (rc)
+               return rc;
+       rc = efx_tc_configure_default_rule_wire(efx);
+       if (rc)
+               return rc;
+       return efx_tc_configure_rep_mport(efx);
+}
+
+void efx_fini_tc(struct efx_nic *efx)
+{
+       /* We can get called even if efx_init_struct_tc() failed */
+       if (!efx->tc)
+               return;
+       efx_tc_deconfigure_rep_mport(efx);
+       efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
+       efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
+}
+
+int efx_init_struct_tc(struct efx_nic *efx)
+{
+       if (efx->type->is_vf)
+               return 0;
+
+       efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
+       if (!efx->tc)
+               return -ENOMEM;
+
+       efx->tc->reps_filter_uc = -1;
+       efx->tc->reps_filter_mc = -1;
+       INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
+       efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+       INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
+       efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+       return 0;
+}
+
+void efx_fini_struct_tc(struct efx_nic *efx)
+{
+       if (!efx->tc)
+               return;
+
+       EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
+                            MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+       EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
+                            MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+       kfree(efx->tc);
+       efx->tc = NULL;
+}
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
new file mode 100644 (file)
index 0000000..309123c
--- /dev/null
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_H
+#define EFX_TC_H
+#include "net_driver.h"
+
+struct efx_tc_action_set {
+       u16 deliver:1;
+       u32 dest_mport;
+       u32 fw_id; /* index of this entry in firmware actions table */
+       struct list_head list;
+};
+
+struct efx_tc_match_fields {
+       /* L1 */
+       u32 ingress_port;
+};
+
+struct efx_tc_match {
+       struct efx_tc_match_fields value;
+       struct efx_tc_match_fields mask;
+};
+
+struct efx_tc_action_set_list {
+       struct list_head list;
+       u32 fw_id;
+};
+
+struct efx_tc_flow_rule {
+       struct efx_tc_match match;
+       struct efx_tc_action_set_list acts;
+       u32 fw_id;
+};
+
+enum efx_tc_rule_prios {
+       EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
+       EFX_TC_PRIO__NUM
+};
+
+/**
+ * struct efx_tc_state - control plane data for TC offload
+ *
+ * @reps_mport_id: MAE port allocated for representor RX
+ * @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
+ * @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
+ * @reps_mport_vport_id: vport_id for representor RX filters
+ * @dflt: Match-action rules for default switching; at priority
+ *     %EFX_TC_PRIO_DFLT.  Named by *ingress* port
+ * @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
+ * @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
+ */
+struct efx_tc_state {
+       u32 reps_mport_id, reps_mport_vport_id;
+       s32 reps_filter_uc, reps_filter_mc;
+       struct {
+               struct efx_tc_flow_rule pf;
+               struct efx_tc_flow_rule wire;
+       } dflt;
+};
+
+struct efx_rep;
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+                                    struct efx_tc_flow_rule *rule);
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx);
+void efx_tc_remove_rep_filters(struct efx_nic *efx);
+
+int efx_init_tc(struct efx_nic *efx);
+void efx_fini_tc(struct efx_nic *efx);
+
+int efx_init_struct_tc(struct efx_nic *efx);
+void efx_fini_struct_tc(struct efx_nic *efx);
+
+#endif /* EFX_TC_H */
index ca8ab29..d42e1af 100644 (file)
@@ -688,18 +688,19 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
 
        ret = mediatek_dwmac_clks_config(priv_plat, true);
        if (ret)
-               return ret;
+               goto err_remove_config_dt;
 
        ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
-       if (ret) {
-               stmmac_remove_config_dt(pdev, plat_dat);
+       if (ret)
                goto err_drv_probe;
-       }
 
        return 0;
 
 err_drv_probe:
        mediatek_dwmac_clks_config(priv_plat, false);
+err_remove_config_dt:
+       stmmac_remove_config_dt(pdev, plat_dat);
+
        return ret;
 }
 
index 2495a57..018d365 100644 (file)
@@ -815,6 +815,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
        fl4->saddr = info->key.u.ipv4.src;
        fl4->fl4_dport = dport;
        fl4->fl4_sport = sport;
+       fl4->flowi4_flags = info->key.flow_flags;
 
        tos = info->key.tos;
        if ((tos == 1) && !geneve->cfg.collect_md) {
index 3233d14..495e85a 100644 (file)
@@ -214,7 +214,7 @@ struct ipa_init_modem_driver_req {
 
 /* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
  * QMI response, but contains other information as well.  Currently we
- * simply wait for the the INIT_DRIVER transaction to complete and
+ * simply wait for the INIT_DRIVER transaction to complete and
  * ignore any other data that might be returned.
  */
 struct ipa_init_modem_driver_rsp {
index c881e1b..f1683ce 100644 (file)
@@ -243,6 +243,7 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 #define DEFAULT_SEND_SCI true
 #define DEFAULT_ENCRYPT false
 #define DEFAULT_ENCODING_SA 0
+#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
 
 static bool send_sci(const struct macsec_secy *secy)
 {
@@ -1697,7 +1698,7 @@ static bool validate_add_rxsa(struct nlattr **attrs)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_PN] &&
-           *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
+           nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -1753,7 +1754,8 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
        }
 
        pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
-       if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+       if (tb_sa[MACSEC_SA_ATTR_PN] &&
+           nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
                pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
                          nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
                rtnl_unlock();
@@ -1769,7 +1771,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
                if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
                        pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
                                  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
-                                 MACSEC_SA_ATTR_SALT);
+                                 MACSEC_SALT_LEN);
                        rtnl_unlock();
                        return -EINVAL;
                }
@@ -1842,7 +1844,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
        return 0;
 
 cleanup:
-       kfree(rx_sa);
+       macsec_rxsa_put(rx_sa);
        rtnl_unlock();
        return err;
 }
@@ -1939,7 +1941,7 @@ static bool validate_add_txsa(struct nlattr **attrs)
        if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
                return false;
 
-       if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+       if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -2011,7 +2013,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
                if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
                        pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
                                  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
-                                 MACSEC_SA_ATTR_SALT);
+                                 MACSEC_SALT_LEN);
                        rtnl_unlock();
                        return -EINVAL;
                }
@@ -2085,7 +2087,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
 
 cleanup:
        secy->operational = was_operational;
-       kfree(tx_sa);
+       macsec_txsa_put(tx_sa);
        rtnl_unlock();
        return err;
 }
@@ -2293,7 +2295,7 @@ static bool validate_upd_sa(struct nlattr **attrs)
        if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
                return false;
 
-       if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+       if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -3745,9 +3747,6 @@ static int macsec_changelink_common(struct net_device *dev,
                secy->operational = tx_sa && tx_sa->active;
        }
 
-       if (data[IFLA_MACSEC_WINDOW])
-               secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
-
        if (data[IFLA_MACSEC_ENCRYPT])
                tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
 
@@ -3793,6 +3792,16 @@ static int macsec_changelink_common(struct net_device *dev,
                }
        }
 
+       if (data[IFLA_MACSEC_WINDOW]) {
+               secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
+
+               /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
+                * for XPN cipher suites */
+               if (secy->xpn &&
+                   secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
+                       return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -3822,7 +3831,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
 
        ret = macsec_changelink_common(dev, data);
        if (ret)
-               return ret;
+               goto cleanup;
 
        /* If h/w offloading is available, propagate to the device */
        if (macsec_is_offloaded(macsec)) {
index a438202..5085426 100644 (file)
@@ -351,10 +351,12 @@ nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
 {
        struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
 
-       nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
+       nmap->entry[idx].key = kmalloc(offmap->map.key_size,
+                                      GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
        if (!nmap->entry[idx].key)
                return -ENOMEM;
-       nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
+       nmap->entry[idx].value = kmalloc(offmap->map.value_size,
+                                        GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
        if (!nmap->entry[idx].value) {
                kfree(nmap->entry[idx].key);
                nmap->entry[idx].key = NULL;
@@ -496,7 +498,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
        if (offmap->map.map_flags)
                return -EINVAL;
 
-       nmap = kzalloc(sizeof(*nmap), GFP_USER);
+       nmap = kzalloc(sizeof(*nmap), GFP_KERNEL_ACCOUNT);
        if (!nmap)
                return -ENOMEM;
 
index ab0af1d..70f88ea 100644 (file)
@@ -986,7 +986,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
         */
        ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS);
        if (ret < 0)
-               return false;
+               return ret;
 
        if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) {
                int speed_value;
index ff22b6b..36803d9 100644 (file)
@@ -450,6 +450,7 @@ static int bcm5421_init(struct mii_phy* phy)
                int can_low_power = 1;
                if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
                        can_low_power = 0;
+               of_node_put(np);
                if (can_low_power) {
                        /* Enable automatic low-power */
                        sungem_phy_write(phy, 0x1c, 0x9002);
index 1e5c153..8438934 100644 (file)
@@ -8,13 +8,13 @@
  *
  *  Based on the work of
  *             Donald Becker
- * 
+ *
  *  Old chipset support added by Simon Evans <spse@secret.org.uk> 2002
  *    - adds support for Belkin F5U011
  */
 
 /*
- * 
+ *
  * Should you need to contact me, the author, you can do so either by
  * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
  * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
@@ -54,7 +54,7 @@ static const char driver_name[] = "catc";
 
 /*
  * Some defines.
- */ 
+ */
 
 #define STATS_UPDATE           (HZ)    /* Time between stats updates */
 #define TX_TIMEOUT             (5*HZ)  /* Max time the queue can be stopped */
@@ -332,7 +332,7 @@ static void catc_irq_done(struct urb *urb)
                                dev_err(&catc->usbdev->dev,
                                        "submit(rx_urb) status %d\n", res);
                        }
-               } 
+               }
        }
 resubmit:
        res = usb_submit_urb (urb, GFP_ATOMIC);
@@ -538,7 +538,7 @@ static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value,
        unsigned long flags;
 
        spin_lock_irqsave(&catc->ctrl_lock, flags);
-       
+
        q = catc->ctrl_queue + catc->ctrl_head;
 
        q->dir = dir;
@@ -639,7 +639,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
        if (netdev->flags & IFF_PROMISC) {
                memset(catc->multicast, 0xff, 64);
                rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc;
-       } 
+       }
 
        if (netdev->flags & IFF_ALLMULTI) {
                memset(catc->multicast, 0xff, 64);
@@ -806,7 +806,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
        catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
        catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
-       if ((!catc->ctrl_urb) || (!catc->tx_urb) || 
+       if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
            (!catc->rx_urb) || (!catc->irq_urb)) {
                dev_err(&intf->dev, "No free urbs available.\n");
                ret = -ENOMEM;
@@ -814,17 +814,17 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        }
 
        /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
-       if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && 
+       if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
            le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
            le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
                dev_dbg(dev, "Testing for f5u011\n");
-               catc->is_f5u011 = 1;            
+               catc->is_f5u011 = 1;
                atomic_set(&catc->recq_sz, 0);
                pktsz = RX_PKT_SZ;
        } else {
                pktsz = RX_MAX_BURST * (PKT_SZ + 2);
        }
-       
+
        usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0),
                NULL, NULL, 0, catc_ctrl_done, catc);
 
@@ -854,7 +854,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                *buf = 0x87654321;
                catc_write_mem(catc, 0xfa80, buf, 4);
                catc_read_mem(catc, 0x7a80, buf, 4);
-         
+
                switch (*buf) {
                case 0x12345678:
                        catc_set_reg(catc, TxBufCount, 8);
@@ -873,32 +873,32 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                }
 
                kfree(buf);
-         
+
                dev_dbg(dev, "Getting MAC from SEEROM.\n");
-         
+
                catc_get_mac(catc, macbuf);
                eth_hw_addr_set(netdev, macbuf);
-               
+
                dev_dbg(dev, "Setting MAC into registers.\n");
-         
+
                for (i = 0; i < 6; i++)
                        catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
-               
+
                dev_dbg(dev, "Filling the multicast list.\n");
-         
+
                eth_broadcast_addr(broadcast);
                catc_multicast(broadcast, catc->multicast);
                catc_multicast(netdev->dev_addr, catc->multicast);
                catc_write_mem(catc, 0xfa80, catc->multicast, 64);
-               
+
                dev_dbg(dev, "Clearing error counters.\n");
-               
+
                for (i = 0; i < 8; i++)
                        catc_set_reg(catc, EthStats + i, 0);
                catc->last_stats = jiffies;
-               
+
                dev_dbg(dev, "Enabling.\n");
-               
+
                catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
                catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
                catc_set_reg(catc, LEDCtrl, LEDLink);
@@ -908,7 +908,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                catc_reset(catc);
                catc_get_mac(catc, macbuf);
                eth_hw_addr_set(netdev, macbuf);
-               
+
                dev_dbg(dev, "Setting RX Mode\n");
                catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
                catc->rxmode[1] = 0;
index 32637df..f4a44f0 100644 (file)
@@ -120,7 +120,7 @@ static const struct driver_info     an2720_info = {
 
 #endif /* CONFIG_USB_AN2720 */
 
-\f
+
 #ifdef CONFIG_USB_BELKIN
 #define        HAVE_HARDWARE
 
@@ -140,7 +140,7 @@ static const struct driver_info     belkin_info = {
 #endif /* CONFIG_USB_BELKIN */
 
 
-\f
+
 #ifdef CONFIG_USB_EPSON2888
 #define        HAVE_HARDWARE
 
@@ -167,7 +167,7 @@ static const struct driver_info     epson2888_info = {
 
 #endif /* CONFIG_USB_EPSON2888 */
 
-\f
+
 /*-------------------------------------------------------------------------
  *
  * info from Jonathan McDowell <noodles@earth.li>
@@ -181,7 +181,7 @@ static const struct driver_info kc2190_info = {
 };
 #endif /* CONFIG_USB_KC2190 */
 
-\f
+
 #ifdef CONFIG_USB_ARMLINUX
 #define        HAVE_HARDWARE
 
@@ -222,7 +222,7 @@ static const struct driver_info     blob_info = {
 
 #endif /* CONFIG_USB_ARMLINUX */
 
-\f
+
 /*-------------------------------------------------------------------------*/
 
 #ifndef        HAVE_HARDWARE
index 9b2bc19..c9efb7d 100644 (file)
@@ -221,7 +221,7 @@ struct kaweth_device
        dma_addr_t rxbufferhandle;
        __u8 *rx_buf;
 
-       
+
        struct sk_buff *tx_skb;
 
        __u8 *firmware_buf;
index 17c9c63..2c82fbc 100644 (file)
@@ -18,7 +18,7 @@
 
 
 /*
- * Prolific PL-2301/PL-2302 driver ... http://www.prolific.com.tw/ 
+ * Prolific PL-2301/PL-2302 driver ... http://www.prolific.com.tw/
  *
  * The protocol and handshaking used here should be bug-compatible
  * with the Linux 2.2 "plusb" driver, by Deti Fliegl.
index e415465..aaa89b4 100644 (file)
@@ -381,7 +381,7 @@ insanity:
 }
 EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
 
-\f
+
 /*-------------------------------------------------------------------------
  *
  * Network Device Driver (peer link to "Host Device", from USB host)
index 356cf8d..ec8e1b3 100644 (file)
@@ -242,9 +242,15 @@ struct virtnet_info {
        /* Packet virtio header size */
        u8 hdr_len;
 
-       /* Work struct for refilling if we run low on memory. */
+       /* Work struct for delayed refilling if we run low on memory. */
        struct delayed_work refill;
 
+       /* Is delayed refill enabled? */
+       bool refill_enabled;
+
+       /* The lock to synchronize the access to refill_enabled */
+       spinlock_t refill_lock;
+
        /* Work struct for config space updates */
        struct work_struct config_work;
 
@@ -348,6 +354,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
        return p;
 }
 
+static void enable_delayed_refill(struct virtnet_info *vi)
+{
+       spin_lock_bh(&vi->refill_lock);
+       vi->refill_enabled = true;
+       spin_unlock_bh(&vi->refill_lock);
+}
+
+static void disable_delayed_refill(struct virtnet_info *vi)
+{
+       spin_lock_bh(&vi->refill_lock);
+       vi->refill_enabled = false;
+       spin_unlock_bh(&vi->refill_lock);
+}
+
 static void virtqueue_napi_schedule(struct napi_struct *napi,
                                    struct virtqueue *vq)
 {
@@ -1527,8 +1547,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
        }
 
        if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
-               if (!try_fill_recv(vi, rq, GFP_ATOMIC))
-                       schedule_delayed_work(&vi->refill, 0);
+               if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
+                       spin_lock(&vi->refill_lock);
+                       if (vi->refill_enabled)
+                               schedule_delayed_work(&vi->refill, 0);
+                       spin_unlock(&vi->refill_lock);
+               }
        }
 
        u64_stats_update_begin(&rq->stats.syncp);
@@ -1651,6 +1675,8 @@ static int virtnet_open(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i, err;
 
+       enable_delayed_refill(vi);
+
        for (i = 0; i < vi->max_queue_pairs; i++) {
                if (i < vi->curr_queue_pairs)
                        /* Make sure we have some buffers: if oom use wq. */
@@ -2033,6 +2059,8 @@ static int virtnet_close(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i;
 
+       /* Make sure NAPI doesn't schedule refill work */
+       disable_delayed_refill(vi);
        /* Make sure refill_work doesn't re-enable napi! */
        cancel_delayed_work_sync(&vi->refill);
 
@@ -2792,6 +2820,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
 
        virtio_device_ready(vdev);
 
+       enable_delayed_refill(vi);
+
        if (netif_running(vi->dev)) {
                err = virtnet_open(vi->dev);
                if (err)
@@ -3535,6 +3565,7 @@ static int virtnet_probe(struct virtio_device *vdev)
        vdev->priv = vi;
 
        INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+       spin_lock_init(&vi->refill_lock);
 
        /* If we can receive ANY GSO packets, we must allocate large ones. */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
index dd831ad..53b3b24 100644 (file)
@@ -2075,17 +2075,8 @@ vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
        rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
 
        if (rxd_done < budget) {
-               struct Vmxnet3_RxCompDesc *rcd;
-#ifdef __BIG_ENDIAN_BITFIELD
-               struct Vmxnet3_RxCompDesc rxComp;
-#endif
                napi_complete_done(napi, rxd_done);
                vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
-               /* after unmasking the interrupt, check if any descriptors were completed */
-               vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
-                                 &rxComp);
-               if (rcd->gen == rq->comp_ring.gen && napi_reschedule(napi))
-                       vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
        }
        return rxd_done;
 }
index 8b0710b..90811ab 100644 (file)
@@ -2243,7 +2243,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
                                      struct vxlan_sock *sock4,
                                      struct sk_buff *skb, int oif, u8 tos,
                                      __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
-                                     struct dst_cache *dst_cache,
+                                     __u8 flow_flags, struct dst_cache *dst_cache,
                                      const struct ip_tunnel_info *info)
 {
        bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
@@ -2270,6 +2270,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
        fl4.saddr = *saddr;
        fl4.fl4_dport = dport;
        fl4.fl4_sport = sport;
+       fl4.flowi4_flags = flow_flags;
 
        rt = ip_route_output_key(vxlan->net, &fl4);
        if (!IS_ERR(rt)) {
@@ -2459,7 +2460,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        unsigned int pkt_len = skb->len;
        __be16 src_port = 0, dst_port;
        struct dst_entry *ndst = NULL;
-       __u8 tos, ttl;
+       __u8 tos, ttl, flow_flags = 0;
        int ifindex;
        int err;
        u32 flags = vxlan->cfg.flags;
@@ -2525,6 +2526,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
                dst = &remote_ip;
                dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+               flow_flags = info->key.flow_flags;
                vni = tunnel_id_to_key32(info->key.tun_id);
                ifindex = 0;
                dst_cache = &info->dst_cache;
@@ -2555,7 +2557,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
                                     dst->sin.sin_addr.s_addr,
                                     &local_ip.sin.sin_addr.s_addr,
-                                    dst_port, src_port,
+                                    dst_port, src_port, flow_flags,
                                     dst_cache, info);
                if (IS_ERR(rt)) {
                        err = PTR_ERR(rt);
@@ -3061,7 +3063,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
                rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
                                     info->key.u.ipv4.dst,
                                     &info->key.u.ipv4.src, dport, sport,
-                                    &info->dst_cache, info);
+                                    info->key.flow_flags, &info->dst_cache,
+                                    info);
                if (IS_ERR(rt))
                        return PTR_ERR(rt);
                ip_rt_put(rt);
index d7d33d5..c474147 100644 (file)
@@ -140,8 +140,53 @@ ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
        return ab->pci.msi.irqs[vector];
 }
 
+static inline u32
+ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+       u32 window_start = 0;
+
+       /* If offset lies within DP register range, use 1st window */
+       if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+               window_start = ATH11K_PCI_WINDOW_START;
+       /* If offset lies within CE register range, use 2nd window */
+       else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+                ATH11K_PCI_WINDOW_RANGE_MASK)
+               window_start = 2 * ATH11K_PCI_WINDOW_START;
+
+       return window_start;
+}
+
+static void
+ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
+{
+       u32 window_start;
+
+       /* WCN6750 uses static window based register access*/
+       window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+       iowrite32(value, ab->mem + window_start +
+                 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+}
+
+static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+       u32 window_start;
+       u32 val;
+
+       /* WCN6750 uses static window based register access */
+       window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+       val = ioread32(ab->mem + window_start +
+                      (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+       return val;
+}
+
 static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
+       .wakeup = NULL,
+       .release = NULL,
        .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
+       .window_write32 = ath11k_ahb_window_write32_wcn6750,
+       .window_read32 = ath11k_ahb_window_read32_wcn6750,
 };
 
 static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
@@ -971,11 +1016,16 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
        }
 
        ab->hif.ops = hif_ops;
-       ab->pci.ops = pci_ops;
        ab->pdev = pdev;
        ab->hw_rev = hw_rev;
        platform_set_drvdata(pdev, ab);
 
+       ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+       if (ret) {
+               ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+               goto err_core_free;
+       }
+
        ret = ath11k_core_pre_init(ab);
        if (ret)
                goto err_core_free;
index c8e0bc9..c3e9e4f 100644 (file)
@@ -54,9 +54,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .target_ce_count = 11,
                .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
                .svc_to_ce_map_len = 21,
-               .rfkill_pin = 0,
-               .rfkill_cfg = 0,
-               .rfkill_on_level = 0,
                .single_pdev_only = false,
                .rxdma1_enable = true,
                .num_rxmda_per_pdev = 1,
@@ -107,8 +104,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .fixed_mem_region = true,
                .static_window_map = false,
                .hybrid_bus_type = false,
-               .dp_window_idx = 0,
-               .ce_window_idx = 0,
                .fixed_fw_mem = false,
                .support_off_channel_tx = false,
        },
@@ -133,9 +128,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .target_ce_count = 11,
                .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
                .svc_to_ce_map_len = 19,
-               .rfkill_pin = 0,
-               .rfkill_cfg = 0,
-               .rfkill_on_level = 0,
                .single_pdev_only = false,
                .rxdma1_enable = true,
                .num_rxmda_per_pdev = 1,
@@ -183,8 +175,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .fixed_mem_region = true,
                .static_window_map = false,
                .hybrid_bus_type = false,
-               .dp_window_idx = 0,
-               .ce_window_idx = 0,
                .fixed_fw_mem = false,
                .support_off_channel_tx = false,
        },
@@ -209,9 +199,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .target_ce_count = 9,
                .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
                .svc_to_ce_map_len = 14,
-               .rfkill_pin = 48,
-               .rfkill_cfg = 0,
-               .rfkill_on_level = 1,
                .single_pdev_only = true,
                .rxdma1_enable = false,
                .num_rxmda_per_pdev = 2,
@@ -258,8 +245,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .fixed_mem_region = false,
                .static_window_map = false,
                .hybrid_bus_type = false,
-               .dp_window_idx = 0,
-               .ce_window_idx = 0,
                .fixed_fw_mem = false,
                .support_off_channel_tx = true,
        },
@@ -284,9 +269,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .target_ce_count = 9,
                .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
                .svc_to_ce_map_len = 18,
-               .rfkill_pin = 0,
-               .rfkill_cfg = 0,
-               .rfkill_on_level = 0,
                .rxdma1_enable = true,
                .num_rxmda_per_pdev = 1,
                .rx_mac_buf_ring = false,
@@ -333,8 +315,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .fixed_mem_region = false,
                .static_window_map = true,
                .hybrid_bus_type = false,
-               .dp_window_idx = 3,
-               .ce_window_idx = 2,
                .fixed_fw_mem = false,
                .support_off_channel_tx = false,
        },
@@ -359,9 +339,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .target_ce_count = 9,
                .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
                .svc_to_ce_map_len = 14,
-               .rfkill_pin = 0,
-               .rfkill_cfg = 0,
-               .rfkill_on_level = 0,
                .single_pdev_only = true,
                .rxdma1_enable = false,
                .num_rxmda_per_pdev = 2,
@@ -408,8 +385,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .fixed_mem_region = false,
                .static_window_map = false,
                .hybrid_bus_type = false,
-               .dp_window_idx = 0,
-               .ce_window_idx = 0,
                .fixed_fw_mem = false,
                .support_off_channel_tx = true,
        },
@@ -434,9 +409,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .target_ce_count = 9,
                .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
                .svc_to_ce_map_len = 14,
-               .rfkill_pin = 0,
-               .rfkill_cfg = 0,
-               .rfkill_on_level = 0,
                .single_pdev_only = true,
                .rxdma1_enable = false,
                .num_rxmda_per_pdev = 2,
@@ -482,8 +454,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .fixed_mem_region = false,
                .static_window_map = false,
                .hybrid_bus_type = false,
-               .dp_window_idx = 0,
-               .ce_window_idx = 0,
                .fixed_fw_mem = false,
                .support_off_channel_tx = true,
        },
@@ -508,9 +478,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .target_ce_count = 9,
                .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
                .svc_to_ce_map_len = 14,
-               .rfkill_pin = 0,
-               .rfkill_cfg = 0,
-               .rfkill_on_level = 0,
                .single_pdev_only = true,
                .rxdma1_enable = false,
                .num_rxmda_per_pdev = 1,
@@ -556,8 +523,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .fixed_mem_region = false,
                .static_window_map = true,
                .hybrid_bus_type = true,
-               .dp_window_idx = 1,
-               .ce_window_idx = 2,
                .fixed_fw_mem = true,
                .support_off_channel_tx = false,
        },
@@ -1402,27 +1367,6 @@ static int ath11k_core_start_firmware(struct ath11k_base *ab,
        return ret;
 }
 
-static int ath11k_core_rfkill_config(struct ath11k_base *ab)
-{
-       struct ath11k *ar;
-       int ret = 0, i;
-
-       if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
-               return 0;
-
-       for (i = 0; i < ab->num_radios; i++) {
-               ar = ab->pdevs[i].ar;
-
-               ret = ath11k_mac_rfkill_config(ar);
-               if (ret && ret != -EOPNOTSUPP) {
-                       ath11k_warn(ab, "failed to configure rfkill: %d", ret);
-                       return ret;
-               }
-       }
-
-       return ret;
-}
-
 int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
 {
        int ret;
@@ -1475,13 +1419,6 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
                goto err_core_stop;
        }
        ath11k_hif_irq_enable(ab);
-
-       ret = ath11k_core_rfkill_config(ab);
-       if (ret && ret != -EOPNOTSUPP) {
-               ath11k_err(ab, "failed to config rfkill: %d\n", ret);
-               goto err_core_stop;
-       }
-
        mutex_unlock(&ab->core_lock);
 
        return 0;
@@ -1550,7 +1487,6 @@ void ath11k_core_halt(struct ath11k *ar)
        cancel_delayed_work_sync(&ar->scan.timeout);
        cancel_work_sync(&ar->regd_update_work);
        cancel_work_sync(&ab->update_11d_work);
-       cancel_work_sync(&ab->rfkill_work);
 
        rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
        synchronize_rcu();
@@ -1558,28 +1494,6 @@ void ath11k_core_halt(struct ath11k *ar)
        idr_init(&ar->txmgmt_idr);
 }
 
-static void ath11k_rfkill_work(struct work_struct *work)
-{
-       struct ath11k_base *ab = container_of(work, struct ath11k_base, rfkill_work);
-       struct ath11k *ar;
-       bool rfkill_radio_on;
-       int i;
-
-       spin_lock_bh(&ab->base_lock);
-       rfkill_radio_on = ab->rfkill_radio_on;
-       spin_unlock_bh(&ab->base_lock);
-
-       for (i = 0; i < ab->num_radios; i++) {
-               ar = ab->pdevs[i].ar;
-               if (!ar)
-                       continue;
-
-               /* notify cfg80211 radio state change */
-               ath11k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
-               wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on);
-       }
-}
-
 static void ath11k_update_11d(struct work_struct *work)
 {
        struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
@@ -1891,7 +1805,6 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
        init_waitqueue_head(&ab->qmi.cold_boot_waitq);
        INIT_WORK(&ab->restart_work, ath11k_core_restart);
        INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
-       INIT_WORK(&ab->rfkill_work, ath11k_rfkill_work);
        INIT_WORK(&ab->reset_work, ath11k_core_reset);
        timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
        init_completion(&ab->htc_suspend);
index 2bd5eb9..afad8f5 100644 (file)
@@ -929,10 +929,6 @@ struct ath11k_base {
 
        struct ath11k_dbring_cap *db_caps;
        u32 num_db_cap;
-       struct work_struct rfkill_work;
-
-       /* true means radio is on */
-       bool rfkill_radio_on;
 
        /* To synchronize 11d scan vdev id */
        struct mutex vdev_id_11d_lock;
index 77dc5c8..bb5ac94 100644 (file)
@@ -153,9 +153,6 @@ struct ath11k_hw_params {
        u32 svc_to_ce_map_len;
 
        bool single_pdev_only;
-       u32 rfkill_pin;
-       u32 rfkill_cfg;
-       u32 rfkill_on_level;
 
        bool rxdma1_enable;
        int num_rxmda_per_pdev;
@@ -201,8 +198,6 @@ struct ath11k_hw_params {
        bool fixed_mem_region;
        bool static_window_map;
        bool hybrid_bus_type;
-       u8 dp_window_idx;
-       u8 ce_window_idx;
        bool fixed_fw_mem;
        bool support_off_channel_tx;
 };
index d83d3c9..7e91e34 100644 (file)
@@ -5611,63 +5611,6 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
        return 0;
 }
 
-int ath11k_mac_rfkill_config(struct ath11k *ar)
-{
-       struct ath11k_base *ab = ar->ab;
-       u32 param;
-       int ret;
-
-       if (ab->hw_params.rfkill_pin == 0)
-               return -EOPNOTSUPP;
-
-       ath11k_dbg(ab, ATH11K_DBG_MAC,
-                  "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
-                  ab->hw_params.rfkill_pin, ab->hw_params.rfkill_cfg,
-                  ab->hw_params.rfkill_on_level);
-
-       param = FIELD_PREP(WMI_RFKILL_CFG_RADIO_LEVEL,
-                          ab->hw_params.rfkill_on_level) |
-               FIELD_PREP(WMI_RFKILL_CFG_GPIO_PIN_NUM,
-                          ab->hw_params.rfkill_pin) |
-               FIELD_PREP(WMI_RFKILL_CFG_PIN_AS_GPIO,
-                          ab->hw_params.rfkill_cfg);
-
-       ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
-                                       param, ar->pdev->pdev_id);
-       if (ret) {
-               ath11k_warn(ab,
-                           "failed to set rfkill config 0x%x: %d\n",
-                           param, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable)
-{
-       enum wmi_rfkill_enable_radio param;
-       int ret;
-
-       if (enable)
-               param = WMI_RFKILL_ENABLE_RADIO_ON;
-       else
-               param = WMI_RFKILL_ENABLE_RADIO_OFF;
-
-       ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac %d rfkill enable %d",
-                  ar->pdev_idx, param);
-
-       ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE,
-                                       param, ar->pdev->pdev_id);
-       if (ret) {
-               ath11k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n",
-                           param, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
 static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
                             struct ieee80211_tx_control *control,
                             struct sk_buff *skb)
@@ -5922,7 +5865,6 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
        cancel_delayed_work_sync(&ar->scan.timeout);
        cancel_work_sync(&ar->regd_update_work);
        cancel_work_sync(&ar->ab->update_11d_work);
-       cancel_work_sync(&ar->ab->rfkill_work);
 
        if (ar->state_11d == ATH11K_11D_PREPARING) {
                ar->state_11d = ATH11K_11D_IDLE;
index 57ebfc5..2a0d3af 100644 (file)
@@ -148,8 +148,6 @@ u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
 
 void __ath11k_mac_scan_finish(struct ath11k *ar);
 void ath11k_mac_scan_finish(struct ath11k *ar);
-int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable);
-int ath11k_mac_rfkill_config(struct ath11k *ar);
 
 struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
 struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
index 487a303..5bd34a6 100644 (file)
@@ -50,6 +50,22 @@ static void ath11k_pci_bus_release(struct ath11k_base *ab)
        mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
 }
 
+static u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 offset)
+{
+       if (!ab->hw_params.static_window_map)
+               return ATH11K_PCI_WINDOW_START;
+
+       if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+               /* if offset lies within DP register range, use 3rd window */
+               return 3 * ATH11K_PCI_WINDOW_START;
+       else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+                ATH11K_PCI_WINDOW_RANGE_MASK)
+                /* if offset lies within CE register range, use 2nd window */
+               return 2 * ATH11K_PCI_WINDOW_START;
+       else
+               return ATH11K_PCI_WINDOW_START;
+}
+
 static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
 {
        struct ath11k_base *ab = ab_pci->ab;
@@ -70,26 +86,39 @@ static void
 ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
 {
        struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
-       u32 window_start = ATH11K_PCI_WINDOW_START;
+       u32 window_start;
 
-       spin_lock_bh(&ab_pci->window_lock);
-       ath11k_pci_select_window(ab_pci, offset);
-       iowrite32(value, ab->mem + window_start +
-                 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
-       spin_unlock_bh(&ab_pci->window_lock);
+       window_start = ath11k_pci_get_window_start(ab, offset);
+
+       if (window_start == ATH11K_PCI_WINDOW_START) {
+               spin_lock_bh(&ab_pci->window_lock);
+               ath11k_pci_select_window(ab_pci, offset);
+               iowrite32(value, ab->mem + window_start +
+                         (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+               spin_unlock_bh(&ab_pci->window_lock);
+       } else {
+               iowrite32(value, ab->mem + window_start +
+                         (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+       }
 }
 
 static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
 {
        struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
-       u32 window_start = ATH11K_PCI_WINDOW_START;
-       u32 val;
+       u32 window_start, val;
 
-       spin_lock_bh(&ab_pci->window_lock);
-       ath11k_pci_select_window(ab_pci, offset);
-       val = ioread32(ab->mem + window_start +
-                      (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
-       spin_unlock_bh(&ab_pci->window_lock);
+       window_start = ath11k_pci_get_window_start(ab, offset);
+
+       if (window_start == ATH11K_PCI_WINDOW_START) {
+               spin_lock_bh(&ab_pci->window_lock);
+               ath11k_pci_select_window(ab_pci, offset);
+               val = ioread32(ab->mem + window_start +
+                              (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+               spin_unlock_bh(&ab_pci->window_lock);
+       } else {
+               val = ioread32(ab->mem + window_start +
+                              (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+       }
 
        return val;
 }
@@ -110,6 +139,8 @@ static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
 };
 
 static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
+       .wakeup = NULL,
+       .release = NULL,
        .get_msi_irq = ath11k_pci_get_msi_irq,
        .window_write32 = ath11k_pci_window_write32,
        .window_read32 = ath11k_pci_window_read32,
@@ -697,6 +728,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
        struct ath11k_base *ab;
        struct ath11k_pci *ab_pci;
        u32 soc_hw_version_major, soc_hw_version_minor, addr;
+       const struct ath11k_pci_ops *pci_ops;
        int ret;
 
        ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
@@ -754,10 +786,10 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
                        goto err_pci_free_region;
                }
 
-               ab->pci.ops = &ath11k_pci_ops_qca6390;
+               pci_ops = &ath11k_pci_ops_qca6390;
                break;
        case QCN9074_DEVICE_ID:
-               ab->pci.ops = &ath11k_pci_ops_qcn9074;
+               pci_ops = &ath11k_pci_ops_qcn9074;
                ab->hw_rev = ATH11K_HW_QCN9074_HW10;
                break;
        case WCN6855_DEVICE_ID:
@@ -787,7 +819,7 @@ unsupported_wcn6855_soc:
                        goto err_pci_free_region;
                }
 
-               ab->pci.ops = &ath11k_pci_ops_qca6390;
+               pci_ops = &ath11k_pci_ops_qca6390;
                break;
        default:
                dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
@@ -796,6 +828,12 @@ unsupported_wcn6855_soc:
                goto err_pci_free_region;
        }
 
+       ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+       if (ret) {
+               ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+               goto err_pci_free_region;
+       }
+
        ret = ath11k_pcic_init_msi_config(ab);
        if (ret) {
                ath11k_err(ab, "failed to init msi config: %d\n", ret);
index cf12b98..1adf20e 100644 (file)
@@ -140,23 +140,8 @@ int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
 }
 EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
 
-static inline u32 ath11k_pcic_get_window_start(struct ath11k_base *ab,
-                                              u32 offset)
-{
-       u32 window_start = 0;
-
-       if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
-               window_start = ab->hw_params.dp_window_idx * ATH11K_PCI_WINDOW_START;
-       else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
-                ATH11K_PCI_WINDOW_RANGE_MASK)
-               window_start = ab->hw_params.ce_window_idx * ATH11K_PCI_WINDOW_START;
-
-       return window_start;
-}
-
 void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
 {
-       u32 window_start;
        int ret = 0;
 
        /* for offset beyond BAR + 4K - 32, may
@@ -166,15 +151,10 @@ void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
            offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
                ret = ab->pci.ops->wakeup(ab);
 
-       if (offset < ATH11K_PCI_WINDOW_START) {
+       if (offset < ATH11K_PCI_WINDOW_START)
                iowrite32(value, ab->mem  + offset);
-       } else if (ab->hw_params.static_window_map) {
-               window_start = ath11k_pcic_get_window_start(ab, offset);
-               iowrite32(value, ab->mem + window_start +
-                         (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
-       } else if (ab->pci.ops->window_write32) {
+       else
                ab->pci.ops->window_write32(ab, offset, value);
-       }
 
        if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
            offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
@@ -185,9 +165,8 @@ EXPORT_SYMBOL(ath11k_pcic_write32);
 
 u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
 {
-       u32 val = 0;
-       u32 window_start;
        int ret = 0;
+       u32 val;
 
        /* for offset beyond BAR + 4K - 32, may
         * need to wakeup the device to access.
@@ -196,15 +175,10 @@ u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
            offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
                ret = ab->pci.ops->wakeup(ab);
 
-       if (offset < ATH11K_PCI_WINDOW_START) {
+       if (offset < ATH11K_PCI_WINDOW_START)
                val = ioread32(ab->mem + offset);
-       } else if (ab->hw_params.static_window_map) {
-               window_start = ath11k_pcic_get_window_start(ab, offset);
-               val = ioread32(ab->mem + window_start +
-                              (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
-       } else if (ab->pci.ops->window_read32) {
+       else
                val = ab->pci.ops->window_read32(ab, offset);
-       }
 
        if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
            offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
@@ -516,11 +490,6 @@ static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
 static int
 ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
 {
-       if (!ab->pci.ops->get_msi_irq) {
-               WARN_ONCE(1, "get_msi_irq pci op not defined");
-               return -EOPNOTSUPP;
-       }
-
        return ab->pci.ops->get_msi_irq(ab, vector);
 }
 
@@ -746,3 +715,19 @@ int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
        return 0;
 }
 EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
+
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+                                const struct ath11k_pci_ops *pci_ops)
+{
+       if (!pci_ops)
+               return 0;
+
+       /* Return error if mandatory pci_ops callbacks are missing */
+       if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
+           !pci_ops->window_read32)
+               return -EINVAL;
+
+       ab->pci.ops = pci_ops;
+       return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
index c53d862..0afbb34 100644 (file)
@@ -43,4 +43,6 @@ int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
 void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab);
 void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
 int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+                                const struct ath11k_pci_ops *pci_ops);
 #endif
index 5d9437e..88ee4f9 100644 (file)
@@ -129,8 +129,6 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
                = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
        [WMI_TAG_STATS_EVENT]
                = { .min_len = sizeof(struct wmi_stats_event) },
-       [WMI_TAG_RFKILL_EVENT] = {
-               .min_len = sizeof(struct wmi_rfkill_state_change_ev) },
        [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
                = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
        [WMI_TAG_HOST_SWFDA_EVENT] = {
@@ -533,8 +531,6 @@ static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
        cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
        cap->num_msdu_desc = ev->num_msdu_desc;
 
-       ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi sys cap info 0x%x\n", cap->sys_cap_info);
-
        return 0;
 }
 
@@ -7566,40 +7562,6 @@ exit:
        kfree(tb);
 }
 
-static void ath11k_rfkill_state_change_event(struct ath11k_base *ab,
-                                            struct sk_buff *skb)
-{
-       const struct wmi_rfkill_state_change_ev *ev;
-       const void **tb;
-       int ret;
-
-       tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
-       if (IS_ERR(tb)) {
-               ret = PTR_ERR(tb);
-               ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
-               return;
-       }
-
-       ev = tb[WMI_TAG_RFKILL_EVENT];
-       if (!ev) {
-               kfree(tb);
-               return;
-       }
-
-       ath11k_dbg(ab, ATH11K_DBG_MAC,
-                  "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
-                  ev->gpio_pin_num,
-                  ev->int_type,
-                  ev->radio_state);
-
-       spin_lock_bh(&ab->base_lock);
-       ab->rfkill_radio_on = (ev->radio_state == WMI_RFKILL_RADIO_STATE_ON);
-       spin_unlock_bh(&ab->base_lock);
-
-       queue_work(ab->workqueue, &ab->rfkill_work);
-       kfree(tb);
-}
-
 static void
 ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
                                  struct sk_buff *skb)
@@ -7995,9 +7957,6 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
        case WMI_11D_NEW_COUNTRY_EVENTID:
                ath11k_reg_11d_new_cc_event(ab, skb);
                break;
-       case WMI_RFKILL_STATE_CHANGE_EVENTID:
-               ath11k_rfkill_state_change_event(ab, skb);
-               break;
        case WMI_DIAG_EVENTID:
                ath11k_wmi_diag_event(ab, skb);
                break;
index b1fad47..4da248f 100644 (file)
@@ -5328,31 +5328,6 @@ struct target_resource_config {
        u32 twt_ap_sta_count;
 };
 
-enum wmi_sys_cap_info_flags {
-       WMI_SYS_CAP_INFO_RXTX_LED       = BIT(0),
-       WMI_SYS_CAP_INFO_RFKILL = BIT(1),
-};
-
-#define WMI_RFKILL_CFG_GPIO_PIN_NUM            GENMASK(5, 0)
-#define WMI_RFKILL_CFG_RADIO_LEVEL             BIT(6)
-#define WMI_RFKILL_CFG_PIN_AS_GPIO             GENMASK(10, 7)
-
-enum wmi_rfkill_enable_radio {
-       WMI_RFKILL_ENABLE_RADIO_ON      = 0,
-       WMI_RFKILL_ENABLE_RADIO_OFF     = 1,
-};
-
-enum wmi_rfkill_radio_state {
-       WMI_RFKILL_RADIO_STATE_OFF      = 1,
-       WMI_RFKILL_RADIO_STATE_ON       = 2,
-};
-
-struct wmi_rfkill_state_change_ev {
-       u32 gpio_pin_num;
-       u32 int_type;
-       u32 radio_state;
-} __packed;
-
 enum wmi_debug_log_param {
        WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
        WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
index 2741370..26bec79 100644 (file)
@@ -5,6 +5,7 @@ wcn36xx-y +=   main.o \
                txrx.o \
                smd.o \
                pmc.o \
-               debug.o
+               debug.o \
+               firmware.o
 
 wcn36xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
index 6af306a..58b3c05 100644 (file)
@@ -21,6 +21,7 @@
 #include "wcn36xx.h"
 #include "debug.h"
 #include "pmc.h"
+#include "firmware.h"
 
 #ifdef CONFIG_WCN36XX_DEBUGFS
 
@@ -136,6 +137,42 @@ static const struct file_operations fops_wcn36xx_dump = {
        .write =       write_file_dump,
 };
 
+static ssize_t read_file_firmware_feature_caps(struct file *file,
+                                              char __user *user_buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct wcn36xx *wcn = file->private_data;
+       size_t len = 0, buf_len = 2048;
+       char *buf;
+       int i;
+       int ret;
+
+       buf = kzalloc(buf_len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&wcn->hal_mutex);
+       for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
+               if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, i)) {
+                       len += scnprintf(buf + len, buf_len - len, "%s\n",
+                                        wcn36xx_firmware_get_cap_name(i));
+               }
+               if (len >= buf_len)
+                       break;
+       }
+       mutex_unlock(&wcn->hal_mutex);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return ret;
+}
+
+static const struct file_operations fops_wcn36xx_firmware_feat_caps = {
+       .open = simple_open,
+       .read = read_file_firmware_feature_caps,
+};
+
 #define ADD_FILE(name, mode, fop, priv_data)           \
        do {                                                    \
                struct dentry *d;                               \
@@ -163,6 +200,8 @@ void wcn36xx_debugfs_init(struct wcn36xx *wcn)
 
        ADD_FILE(bmps_switcher, 0600, &fops_wcn36xx_bmps, wcn);
        ADD_FILE(dump, 0200, &fops_wcn36xx_dump, wcn);
+       ADD_FILE(firmware_feat_caps, 0200,
+                &fops_wcn36xx_firmware_feat_caps, wcn);
 }
 
 void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
index 46307aa..7116d96 100644 (file)
@@ -31,6 +31,7 @@ struct wcn36xx_dfs_entry {
        struct dentry *rootdir;
        struct wcn36xx_dfs_file file_bmps_switcher;
        struct wcn36xx_dfs_file file_dump;
+       struct wcn36xx_dfs_file file_firmware_feat_caps;
 };
 
 void wcn36xx_debugfs_init(struct wcn36xx *wcn);
diff --git a/drivers/net/wireless/ath/wcn36xx/firmware.c b/drivers/net/wireless/ath/wcn36xx/firmware.c
new file mode 100644 (file)
index 0000000..4b7f439
--- /dev/null
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "wcn36xx.h"
+#include "firmware.h"
+
+#define DEFINE(s)[s] = #s
+
+static const char * const wcn36xx_firmware_caps_names[] = {
+       DEFINE(MCC),
+       DEFINE(P2P),
+       DEFINE(DOT11AC),
+       DEFINE(SLM_SESSIONIZATION),
+       DEFINE(DOT11AC_OPMODE),
+       DEFINE(SAP32STA),
+       DEFINE(TDLS),
+       DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
+       DEFINE(WLANACTIVE_OFFLOAD),
+       DEFINE(BEACON_OFFLOAD),
+       DEFINE(SCAN_OFFLOAD),
+       DEFINE(ROAM_OFFLOAD),
+       DEFINE(BCN_MISS_OFFLOAD),
+       DEFINE(STA_POWERSAVE),
+       DEFINE(STA_ADVANCED_PWRSAVE),
+       DEFINE(AP_UAPSD),
+       DEFINE(AP_DFS),
+       DEFINE(BLOCKACK),
+       DEFINE(PHY_ERR),
+       DEFINE(BCN_FILTER),
+       DEFINE(RTT),
+       DEFINE(RATECTRL),
+       DEFINE(WOW),
+       DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
+       DEFINE(SPECULATIVE_PS_POLL),
+       DEFINE(SCAN_SCH),
+       DEFINE(IBSS_HEARTBEAT_OFFLOAD),
+       DEFINE(WLAN_SCAN_OFFLOAD),
+       DEFINE(WLAN_PERIODIC_TX_PTRN),
+       DEFINE(ADVANCE_TDLS),
+       DEFINE(BATCH_SCAN),
+       DEFINE(FW_IN_TX_PATH),
+       DEFINE(EXTENDED_NSOFFLOAD_SLOT),
+       DEFINE(CH_SWITCH_V1),
+       DEFINE(HT40_OBSS_SCAN),
+       DEFINE(UPDATE_CHANNEL_LIST),
+       DEFINE(WLAN_MCADDR_FLT),
+       DEFINE(WLAN_CH144),
+       DEFINE(NAN),
+       DEFINE(TDLS_SCAN_COEXISTENCE),
+       DEFINE(LINK_LAYER_STATS_MEAS),
+       DEFINE(MU_MIMO),
+       DEFINE(EXTENDED_SCAN),
+       DEFINE(DYNAMIC_WMM_PS),
+       DEFINE(MAC_SPOOFED_SCAN),
+       DEFINE(BMU_ERROR_GENERIC_RECOVERY),
+       DEFINE(DISA),
+       DEFINE(FW_STATS),
+       DEFINE(WPS_PRBRSP_TMPL),
+       DEFINE(BCN_IE_FLT_DELTA),
+       DEFINE(TDLS_OFF_CHANNEL),
+       DEFINE(RTT3),
+       DEFINE(MGMT_FRAME_LOGGING),
+       DEFINE(ENHANCED_TXBD_COMPLETION),
+       DEFINE(LOGGING_ENHANCEMENT),
+       DEFINE(EXT_SCAN_ENHANCED),
+       DEFINE(MEMORY_DUMP_SUPPORTED),
+       DEFINE(PER_PKT_STATS_SUPPORTED),
+       DEFINE(EXT_LL_STAT),
+       DEFINE(WIFI_CONFIG),
+       DEFINE(ANTENNA_DIVERSITY_SELECTION),
+};
+
+#undef DEFINE
+
+const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x)
+{
+       if (x >= ARRAY_SIZE(wcn36xx_firmware_caps_names))
+               return "UNKNOWN";
+       return wcn36xx_firmware_caps_names[x];
+}
+
+void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
+                                   enum wcn36xx_firmware_feat_caps cap)
+{
+       int arr_idx, bit_idx;
+
+       if (cap < 0 || cap > 127) {
+               wcn36xx_warn("error cap idx %d\n", cap);
+               return;
+       }
+
+       arr_idx = cap / 32;
+       bit_idx = cap % 32;
+       bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
+                                  enum wcn36xx_firmware_feat_caps cap)
+{
+       int arr_idx, bit_idx;
+
+       if (cap < 0 || cap > 127) {
+               wcn36xx_warn("error cap idx %d\n", cap);
+               return -EINVAL;
+       }
+
+       arr_idx = cap / 32;
+       bit_idx = cap % 32;
+
+       return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+}
+
+void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
+                                     enum wcn36xx_firmware_feat_caps cap)
+{
+       int arr_idx, bit_idx;
+
+       if (cap < 0 || cap > 127) {
+               wcn36xx_warn("error cap idx %d\n", cap);
+               return;
+       }
+
+       arr_idx = cap / 32;
+       bit_idx = cap % 32;
+       bitmap[arr_idx] &= ~(1 << bit_idx);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/firmware.h b/drivers/net/wireless/ath/wcn36xx/firmware.h
new file mode 100644 (file)
index 0000000..f991cf9
--- /dev/null
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _FIRMWARE_H_
+#define _FIRMWARE_H_
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum wcn36xx_firmware_feat_caps {
+       MCC = 0,
+       P2P = 1,
+       DOT11AC = 2,
+       SLM_SESSIONIZATION = 3,
+       DOT11AC_OPMODE = 4,
+       SAP32STA = 5,
+       TDLS = 6,
+       P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+       WLANACTIVE_OFFLOAD = 8,
+       BEACON_OFFLOAD = 9,
+       SCAN_OFFLOAD = 10,
+       ROAM_OFFLOAD = 11,
+       BCN_MISS_OFFLOAD = 12,
+       STA_POWERSAVE = 13,
+       STA_ADVANCED_PWRSAVE = 14,
+       AP_UAPSD = 15,
+       AP_DFS = 16,
+       BLOCKACK = 17,
+       PHY_ERR = 18,
+       BCN_FILTER = 19,
+       RTT = 20,
+       RATECTRL = 21,
+       WOW = 22,
+       WLAN_ROAM_SCAN_OFFLOAD = 23,
+       SPECULATIVE_PS_POLL = 24,
+       SCAN_SCH = 25,
+       IBSS_HEARTBEAT_OFFLOAD = 26,
+       WLAN_SCAN_OFFLOAD = 27,
+       WLAN_PERIODIC_TX_PTRN = 28,
+       ADVANCE_TDLS = 29,
+       BATCH_SCAN = 30,
+       FW_IN_TX_PATH = 31,
+       EXTENDED_NSOFFLOAD_SLOT = 32,
+       CH_SWITCH_V1 = 33,
+       HT40_OBSS_SCAN = 34,
+       UPDATE_CHANNEL_LIST = 35,
+       WLAN_MCADDR_FLT = 36,
+       WLAN_CH144 = 37,
+       NAN = 38,
+       TDLS_SCAN_COEXISTENCE = 39,
+       LINK_LAYER_STATS_MEAS = 40,
+       MU_MIMO = 41,
+       EXTENDED_SCAN = 42,
+       DYNAMIC_WMM_PS = 43,
+       MAC_SPOOFED_SCAN = 44,
+       BMU_ERROR_GENERIC_RECOVERY = 45,
+       DISA = 46,
+       FW_STATS = 47,
+       WPS_PRBRSP_TMPL = 48,
+       BCN_IE_FLT_DELTA = 49,
+       TDLS_OFF_CHANNEL = 51,
+       RTT3 = 52,
+       MGMT_FRAME_LOGGING = 53,
+       ENHANCED_TXBD_COMPLETION = 54,
+       LOGGING_ENHANCEMENT = 55,
+       EXT_SCAN_ENHANCED = 56,
+       MEMORY_DUMP_SUPPORTED = 57,
+       PER_PKT_STATS_SUPPORTED = 58,
+       EXT_LL_STAT = 60,
+       WIFI_CONFIG = 61,
+       ANTENNA_DIVERSITY_SELECTION = 62,
+
+       MAX_FEATURE_SUPPORTED = 128,
+};
+
+void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
+                                   enum wcn36xx_firmware_feat_caps cap);
+int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
+                                  enum wcn36xx_firmware_feat_caps cap);
+void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
+                                     enum wcn36xx_firmware_feat_caps cap);
+
+const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x);
+
+#endif /* _FIRMWARE_H_ */
+
index a1afe1f..f1a43fd 100644 (file)
@@ -4758,74 +4758,6 @@ struct wcn36xx_hal_set_power_params_resp {
        u32 status;
 } __packed;
 
-/* Capability bitmap exchange definitions and macros starts */
-
-enum place_holder_in_cap_bitmap {
-       MCC = 0,
-       P2P = 1,
-       DOT11AC = 2,
-       SLM_SESSIONIZATION = 3,
-       DOT11AC_OPMODE = 4,
-       SAP32STA = 5,
-       TDLS = 6,
-       P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
-       WLANACTIVE_OFFLOAD = 8,
-       BEACON_OFFLOAD = 9,
-       SCAN_OFFLOAD = 10,
-       ROAM_OFFLOAD = 11,
-       BCN_MISS_OFFLOAD = 12,
-       STA_POWERSAVE = 13,
-       STA_ADVANCED_PWRSAVE = 14,
-       AP_UAPSD = 15,
-       AP_DFS = 16,
-       BLOCKACK = 17,
-       PHY_ERR = 18,
-       BCN_FILTER = 19,
-       RTT = 20,
-       RATECTRL = 21,
-       WOW = 22,
-       WLAN_ROAM_SCAN_OFFLOAD = 23,
-       SPECULATIVE_PS_POLL = 24,
-       SCAN_SCH = 25,
-       IBSS_HEARTBEAT_OFFLOAD = 26,
-       WLAN_SCAN_OFFLOAD = 27,
-       WLAN_PERIODIC_TX_PTRN = 28,
-       ADVANCE_TDLS = 29,
-       BATCH_SCAN = 30,
-       FW_IN_TX_PATH = 31,
-       EXTENDED_NSOFFLOAD_SLOT = 32,
-       CH_SWITCH_V1 = 33,
-       HT40_OBSS_SCAN = 34,
-       UPDATE_CHANNEL_LIST = 35,
-       WLAN_MCADDR_FLT = 36,
-       WLAN_CH144 = 37,
-       NAN = 38,
-       TDLS_SCAN_COEXISTENCE = 39,
-       LINK_LAYER_STATS_MEAS = 40,
-       MU_MIMO = 41,
-       EXTENDED_SCAN = 42,
-       DYNAMIC_WMM_PS = 43,
-       MAC_SPOOFED_SCAN = 44,
-       BMU_ERROR_GENERIC_RECOVERY = 45,
-       DISA = 46,
-       FW_STATS = 47,
-       WPS_PRBRSP_TMPL = 48,
-       BCN_IE_FLT_DELTA = 49,
-       TDLS_OFF_CHANNEL = 51,
-       RTT3 = 52,
-       MGMT_FRAME_LOGGING = 53,
-       ENHANCED_TXBD_COMPLETION = 54,
-       LOGGING_ENHANCEMENT = 55,
-       EXT_SCAN_ENHANCED = 56,
-       MEMORY_DUMP_SUPPORTED = 57,
-       PER_PKT_STATS_SUPPORTED = 58,
-       EXT_LL_STAT = 60,
-       WIFI_CONFIG = 61,
-       ANTENNA_DIVERSITY_SELECTION = 62,
-
-       MAX_FEATURE_SUPPORTED = 128,
-};
-
 #define WCN36XX_HAL_CAPS_SIZE 4
 
 struct wcn36xx_hal_feat_caps_msg {
index dc59caf..6b8d288 100644 (file)
@@ -28,6 +28,7 @@
 #include <net/ipv6.h>
 #include "wcn36xx.h"
 #include "testmode.h"
+#include "firmware.h"
 
 unsigned int wcn36xx_dbg_mask;
 module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
@@ -192,88 +193,15 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
               sta_priv->sta_index;
 }
 
-#define DEFINE(s) [s] = #s
-
-static const char * const wcn36xx_caps_names[] = {
-       DEFINE(MCC),
-       DEFINE(P2P),
-       DEFINE(DOT11AC),
-       DEFINE(SLM_SESSIONIZATION),
-       DEFINE(DOT11AC_OPMODE),
-       DEFINE(SAP32STA),
-       DEFINE(TDLS),
-       DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
-       DEFINE(WLANACTIVE_OFFLOAD),
-       DEFINE(BEACON_OFFLOAD),
-       DEFINE(SCAN_OFFLOAD),
-       DEFINE(ROAM_OFFLOAD),
-       DEFINE(BCN_MISS_OFFLOAD),
-       DEFINE(STA_POWERSAVE),
-       DEFINE(STA_ADVANCED_PWRSAVE),
-       DEFINE(AP_UAPSD),
-       DEFINE(AP_DFS),
-       DEFINE(BLOCKACK),
-       DEFINE(PHY_ERR),
-       DEFINE(BCN_FILTER),
-       DEFINE(RTT),
-       DEFINE(RATECTRL),
-       DEFINE(WOW),
-       DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
-       DEFINE(SPECULATIVE_PS_POLL),
-       DEFINE(SCAN_SCH),
-       DEFINE(IBSS_HEARTBEAT_OFFLOAD),
-       DEFINE(WLAN_SCAN_OFFLOAD),
-       DEFINE(WLAN_PERIODIC_TX_PTRN),
-       DEFINE(ADVANCE_TDLS),
-       DEFINE(BATCH_SCAN),
-       DEFINE(FW_IN_TX_PATH),
-       DEFINE(EXTENDED_NSOFFLOAD_SLOT),
-       DEFINE(CH_SWITCH_V1),
-       DEFINE(HT40_OBSS_SCAN),
-       DEFINE(UPDATE_CHANNEL_LIST),
-       DEFINE(WLAN_MCADDR_FLT),
-       DEFINE(WLAN_CH144),
-       DEFINE(NAN),
-       DEFINE(TDLS_SCAN_COEXISTENCE),
-       DEFINE(LINK_LAYER_STATS_MEAS),
-       DEFINE(MU_MIMO),
-       DEFINE(EXTENDED_SCAN),
-       DEFINE(DYNAMIC_WMM_PS),
-       DEFINE(MAC_SPOOFED_SCAN),
-       DEFINE(BMU_ERROR_GENERIC_RECOVERY),
-       DEFINE(DISA),
-       DEFINE(FW_STATS),
-       DEFINE(WPS_PRBRSP_TMPL),
-       DEFINE(BCN_IE_FLT_DELTA),
-       DEFINE(TDLS_OFF_CHANNEL),
-       DEFINE(RTT3),
-       DEFINE(MGMT_FRAME_LOGGING),
-       DEFINE(ENHANCED_TXBD_COMPLETION),
-       DEFINE(LOGGING_ENHANCEMENT),
-       DEFINE(EXT_SCAN_ENHANCED),
-       DEFINE(MEMORY_DUMP_SUPPORTED),
-       DEFINE(PER_PKT_STATS_SUPPORTED),
-       DEFINE(EXT_LL_STAT),
-       DEFINE(WIFI_CONFIG),
-       DEFINE(ANTENNA_DIVERSITY_SELECTION),
-};
-
-#undef DEFINE
-
-static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
-{
-       if (x >= ARRAY_SIZE(wcn36xx_caps_names))
-               return "UNKNOWN";
-       return wcn36xx_caps_names[x];
-}
-
 static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
 {
        int i;
 
        for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
-               if (get_feat_caps(wcn->fw_feat_caps, i))
-                       wcn36xx_dbg(WCN36XX_DBG_MAC, "FW Cap %s\n", wcn36xx_get_cap_name(i));
+               if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, i)) {
+                       wcn36xx_dbg(WCN36XX_DBG_MAC, "FW Cap %s\n",
+                                   wcn36xx_firmware_get_cap_name(i));
+               }
        }
 }
 
@@ -705,7 +633,7 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
 {
        struct wcn36xx *wcn = hw->priv;
 
-       if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+       if (!wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
                /* fallback to mac80211 software scan */
                return 1;
        }
@@ -743,7 +671,7 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
        wcn->scan_aborted = true;
        mutex_unlock(&wcn->scan_lock);
 
-       if (get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+       if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
                /* ieee80211_scan_completed will be called on FW scan
                 * indication */
                wcn36xx_smd_stop_hw_scan(wcn);
index 46ab218..566f0b9 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/bitops.h>
 #include <linux/rpmsg.h>
 #include "smd.h"
+#include "firmware.h"
 
 struct wcn36xx_cfg_val {
        u32 cfg_id;
@@ -295,7 +296,7 @@ static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn,
                sta_params->vht_capable = sta->deflink.vht_cap.vht_supported;
                sta_params->vht_ldpc_enabled =
                        is_cap_supported(caps, IEEE80211_VHT_CAP_RXLDPC);
-               if (get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
+               if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
                        sta_params->vht_tx_mu_beamformee_capable =
                                is_cap_supported(caps, IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
                        if (sta_params->vht_tx_mu_beamformee_capable)
@@ -2431,49 +2432,6 @@ out:
        return ret;
 }
 
-void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
-       int arr_idx, bit_idx;
-
-       if (cap < 0 || cap > 127) {
-               wcn36xx_warn("error cap idx %d\n", cap);
-               return;
-       }
-
-       arr_idx = cap / 32;
-       bit_idx = cap % 32;
-       bitmap[arr_idx] |= (1 << bit_idx);
-}
-
-int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
-       int arr_idx, bit_idx;
-
-       if (cap < 0 || cap > 127) {
-               wcn36xx_warn("error cap idx %d\n", cap);
-               return -EINVAL;
-       }
-
-       arr_idx = cap / 32;
-       bit_idx = cap % 32;
-
-       return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
-}
-
-void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
-       int arr_idx, bit_idx;
-
-       if (cap < 0 || cap > 127) {
-               wcn36xx_warn("error cap idx %d\n", cap);
-               return;
-       }
-
-       arr_idx = cap / 32;
-       bit_idx = cap % 32;
-       bitmap[arr_idx] &= ~(1 << bit_idx);
-}
-
 int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
 {
        struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
@@ -2482,11 +2440,12 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
        mutex_lock(&wcn->hal_mutex);
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
 
-       set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+       wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
        if (wcn->rf_id == RF_IRIS_WCN3680) {
-               set_feat_caps(msg_body.feat_caps, DOT11AC);
-               set_feat_caps(msg_body.feat_caps, WLAN_CH144);
-               set_feat_caps(msg_body.feat_caps, ANTENNA_DIVERSITY_SELECTION);
+               wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, DOT11AC);
+               wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, WLAN_CH144);
+               wcn36xx_firmware_set_feat_caps(msg_body.feat_caps,
+                                              ANTENNA_DIVERSITY_SELECTION);
        }
 
        PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -3300,7 +3259,7 @@ int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn,
        size_t payload_size;
        int ret;
 
-       if (!get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
+       if (!wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
                return -EOPNOTSUPP;
 
        mutex_lock(&wcn->hal_mutex);
index 3fd598a..cf15cde 100644 (file)
@@ -125,9 +125,6 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
 int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
                             u32 arg3, u32 arg4, u32 arg5);
 int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
-void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
-int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
-void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
 
 int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
                struct ieee80211_sta *sta,
index fe84362..04d1aa0 100644 (file)
@@ -1010,7 +1010,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
        void *cmd;
        int cmdlen = len - sizeof(struct wmi_cmd_hdr);
        u16 cmdid;
-       int rc, rc1;
+       int rc1;
 
        if (cmdlen < 0 || *ppos != 0)
                return -EINVAL;
@@ -1027,7 +1027,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
 
        wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
 
-       return rc;
+       return len;
 }
 
 static const struct file_operations fops_wmi = {
index 008ee1f..b2539a9 100644 (file)
@@ -105,7 +105,7 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
 module_param_named(verbose, b43_modparam_verbose, int, 0644);
 MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
 
-static int b43_modparam_pio = 0;
+static int b43_modparam_pio;
 module_param_named(pio, b43_modparam_pio, int, 0644);
 MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
 
index cbc21c7..4022c54 100644 (file)
@@ -2944,7 +2944,7 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
                        dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
        }
 
-b43legacy_mac_suspend(dev);
+       b43legacy_mac_suspend(dev);
        free_irq(dev->dev->irq, dev);
        b43legacydbg(wl, "Wireless interface stopped\n");
 }
index 9c598ea..d639bb8 100644 (file)
@@ -784,9 +784,11 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
        sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
 {
+       if (!IS_ENABLED(CONFIG_PM_SLEEP))
+               return 0;
+
        sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
        if (!sdiodev->freezer)
                return -ENOMEM;
@@ -802,6 +804,7 @@ static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
        if (sdiodev->freezer) {
                WARN_ON(atomic_read(&sdiodev->freezer->freezing));
                kfree(sdiodev->freezer);
+               sdiodev->freezer = NULL;
        }
 }
 
@@ -833,7 +836,8 @@ static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
 
 bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
 {
-       return atomic_read(&sdiodev->freezer->freezing);
+       return IS_ENABLED(CONFIG_PM_SLEEP) &&
+               atomic_read(&sdiodev->freezer->freezing);
 }
 
 void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
@@ -847,23 +851,15 @@ void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
 
 void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
 {
-       atomic_inc(&sdiodev->freezer->thread_count);
+       if (IS_ENABLED(CONFIG_PM_SLEEP))
+               atomic_inc(&sdiodev->freezer->thread_count);
 }
 
 void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
 {
-       atomic_dec(&sdiodev->freezer->thread_count);
-}
-#else
-static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
-{
-       return 0;
-}
-
-static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
-{
+       if (IS_ENABLED(CONFIG_PM_SLEEP))
+               atomic_dec(&sdiodev->freezer->thread_count);
 }
-#endif /* CONFIG_PM_SLEEP */
 
 int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
 {
@@ -875,13 +871,9 @@ int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
 
        brcmf_sdiod_freezer_detach(sdiodev);
 
-       /* Disable Function 2 */
-       sdio_claim_host(sdiodev->func2);
-       sdio_disable_func(sdiodev->func2);
-       sdio_release_host(sdiodev->func2);
-
-       /* Disable Function 1 */
+       /* Disable functions 2 then 1. */
        sdio_claim_host(sdiodev->func1);
+       sdio_disable_func(sdiodev->func2);
        sdio_disable_func(sdiodev->func1);
        sdio_release_host(sdiodev->func1);
 
@@ -911,7 +903,7 @@ int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
        if (ret) {
                brcmf_err("Failed to set F1 blocksize\n");
                sdio_release_host(sdiodev->func1);
-               goto out;
+               return ret;
        }
        switch (sdiodev->func2->device) {
        case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
@@ -933,7 +925,7 @@ int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
        if (ret) {
                brcmf_err("Failed to set F2 blocksize\n");
                sdio_release_host(sdiodev->func1);
-               goto out;
+               return ret;
        } else {
                brcmf_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz);
        }
@@ -1136,7 +1128,6 @@ notsup:
        brcmf_dbg(SDIO, "WOWL not supported\n");
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int brcmf_ops_sdio_suspend(struct device *dev)
 {
        struct sdio_func *func;
@@ -1204,11 +1195,9 @@ static int brcmf_ops_sdio_resume(struct device *dev)
        return ret;
 }
 
-static const struct dev_pm_ops brcmf_sdio_pm_ops = {
-       .suspend        = brcmf_ops_sdio_suspend,
-       .resume         = brcmf_ops_sdio_resume,
-};
-#endif /* CONFIG_PM_SLEEP */
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmf_sdio_pm_ops,
+                               brcmf_ops_sdio_suspend,
+                               brcmf_ops_sdio_resume);
 
 static struct sdio_driver brcmf_sdmmc_driver = {
        .probe = brcmf_ops_sdio_probe,
@@ -1217,9 +1206,7 @@ static struct sdio_driver brcmf_sdmmc_driver = {
        .id_table = brcmf_sdmmc_ids,
        .drv = {
                .owner = THIS_MODULE,
-#ifdef CONFIG_PM_SLEEP
-               .pm = &brcmf_sdio_pm_ops,
-#endif /* CONFIG_PM_SLEEP */
+               .pm = pm_sleep_ptr(&brcmf_sdio_pm_ops),
                .coredump = brcmf_dev_coredump,
        },
 };
index 3ae6779..db45da3 100644 (file)
@@ -7481,6 +7481,9 @@ int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg,
 
 static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr)
 {
+       if (drvr->settings->trivial_ccode_map)
+               return true;
+
        switch (drvr->bus_if->chip) {
        case BRCM_CC_4345_CHIP_ID:
        case BRCM_CC_43602_CHIP_ID:
index fe01da9..7485e78 100644 (file)
@@ -190,6 +190,31 @@ done:
        return err;
 }
 
+int brcmf_c_set_cur_etheraddr(struct brcmf_if *ifp, const u8 *addr)
+{
+       s32 err;
+
+       err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", addr, ETH_ALEN);
+       if (err < 0)
+               bphy_err(ifp->drvr, "Setting cur_etheraddr failed, %d\n", err);
+
+       return err;
+}
+
+/* On some boards there is no eeprom to hold the nvram, in this case instead
+ * a board specific nvram is loaded from /lib/firmware. On most boards the
+ * macaddr setting in the /lib/firmware nvram file is ignored because the
+ * wifibt chip has a unique MAC programmed into the chip itself.
+ * But in some cases the actual MAC from the /lib/firmware nvram file gets
+ * used, leading to MAC conflicts.
+ * The MAC addresses in the troublesome nvram files seem to all come from
+ * the same nvram file template, so we only need to check for 1 known
+ * address to detect this.
+ */
+static const u8 brcmf_default_mac_address[ETH_ALEN] = {
+       0x00, 0x90, 0x4c, 0xc5, 0x12, 0x38
+};
+
 int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 {
        struct brcmf_pub *drvr = ifp->drvr;
@@ -204,12 +229,9 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 
        if (is_valid_ether_addr(ifp->mac_addr)) {
                /* set mac address */
-               err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
-                                              ETH_ALEN);
-               if (err < 0) {
-                       bphy_err(ifp->drvr, "Setting cur_etheraddr failed, %d\n", err);
+               err = brcmf_c_set_cur_etheraddr(ifp, ifp->mac_addr);
+               if (err < 0)
                        goto done;
-               }
        } else {
                /* retrieve mac address */
                err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
@@ -218,6 +240,15 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
                        bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err);
                        goto done;
                }
+
+               if (ether_addr_equal_unaligned(ifp->mac_addr, brcmf_default_mac_address)) {
+                       bphy_err(drvr, "Default MAC is used, replacing with random MAC to avoid conflicts\n");
+                       eth_random_addr(ifp->mac_addr);
+                       ifp->ndev->addr_assign_type = NET_ADDR_RANDOM;
+                       err = brcmf_c_set_cur_etheraddr(ifp, ifp->mac_addr);
+                       if (err < 0)
+                               goto done;
+               }
        }
 
        memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
index 15accc8..6c5a22a 100644 (file)
@@ -38,6 +38,7 @@ extern struct brcmf_mp_global_t brcmf_mp_global;
  * @fcmode: FWS flow control.
  * @roamoff: Firmware roaming off?
  * @ignore_probe_fail: Ignore probe failure.
+ * @trivial_ccode_map: Assume firmware uses ISO3166 country codes with rev 0
  * @country_codes: If available, pointer to struct for translating country codes
  * @bus: Bus specific platform data. Only SDIO at the mmoment.
  */
@@ -48,6 +49,7 @@ struct brcmf_mp_device {
        bool            roamoff;
        bool            iapp;
        bool            ignore_probe_fail;
+       bool            trivial_ccode_map;
        struct brcmfmac_pd_cc *country_codes;
        const char      *board_type;
        unsigned char   mac[ETH_ALEN];
@@ -65,6 +67,7 @@ void brcmf_release_module_param(struct brcmf_mp_device *module_param);
 
 /* Sets dongle media info (drv_version, mac address). */
 int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_set_cur_etheraddr(struct brcmf_if *ifp, const u8 *addr);
 
 #ifdef CONFIG_DMI
 void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev);
index 87aef21..bd164a0 100644 (file)
@@ -233,16 +233,12 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
        struct sockaddr *sa = (struct sockaddr *)addr;
-       struct brcmf_pub *drvr = ifp->drvr;
        int err;
 
        brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
 
-       err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", sa->sa_data,
-                                      ETH_ALEN);
-       if (err < 0) {
-               bphy_err(drvr, "Setting cur_etheraddr failed, %d\n", err);
-       } else {
+       err = brcmf_c_set_cur_etheraddr(ifp, sa->sa_data);
+       if (err >= 0) {
                brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data);
                memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN);
                eth_hw_addr_set(ifp->ndev, ifp->mac_addr);
index 083ac58..79388d4 100644 (file)
@@ -24,6 +24,12 @@ static int brcmf_of_get_country_codes(struct device *dev,
 
        count = of_property_count_strings(np, "brcm,ccode-map");
        if (count < 0) {
+               /* If no explicit country code map is specified, check whether
+                * the trivial map should be used.
+                */
+               settings->trivial_ccode_map =
+                       of_property_read_bool(np, "brcm,ccode-map-trivial");
+
                /* The property is optional, so return success if it doesn't
                 * exist. Otherwise propagate the error code.
                 */
@@ -72,7 +78,6 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
        /* Set board-type to the first string of the machine compatible prop */
        root = of_find_node_by_path("/");
        if (root) {
-               int i;
                char *board_type;
                const char *tmp;
 
@@ -84,10 +89,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
                        of_node_put(root);
                        return;
                }
-               for (i = 0; i < board_type[i]; i++) {
-                       if (board_type[i] == '/')
-                               board_type[i] = '-';
-               }
+               strreplace(board_type, '/', '-');
                settings->board_type = board_type;
 
                of_node_put(root);
index 2136c3c..8968809 100644 (file)
@@ -4020,15 +4020,14 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
         */
        brcmf_sdiod_sgtable_alloc(sdiodev);
 
-#ifdef CONFIG_PM_SLEEP
        /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
         * is true or when platform data OOB irq is true).
         */
-       if ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
+       if (IS_ENABLED(CONFIG_PM_SLEEP) &&
+           (sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
            ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) ||
             (sdiodev->settings->bus.sdio.oob_irq_supported)))
                sdiodev->bus_if->wowl_supported = true;
-#endif
 
        if (brcmf_sdio_kso_init(bus)) {
                brcmf_err("error enabling KSO\n");
@@ -4152,7 +4151,6 @@ int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
 
 static int brcmf_sdio_bus_reset(struct device *dev)
 {
-       int ret = 0;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 
@@ -4169,14 +4167,7 @@ static int brcmf_sdio_bus_reset(struct device *dev)
        sdio_release_host(sdiodev->func1);
 
        brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
-
-       ret = brcmf_sdiod_probe(sdiodev);
-       if (ret) {
-               brcmf_err("Failed to probe after sdio device reset: ret %d\n",
-                         ret);
-       }
-
-       return ret;
+       return 0;
 }
 
 static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
index 15d2c02..47351ff 100644 (file)
@@ -346,26 +346,10 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func);
 void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
                              enum brcmf_sdiod_state state);
-#ifdef CONFIG_PM_SLEEP
 bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev);
-#else
-static inline bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
-{
-       return false;
-}
-static inline void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
-{
-}
-static inline void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
-{
-}
-static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
-{
-}
-#endif /* CONFIG_PM_SLEEP */
 
 int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev);
 int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev);
index 777c568..8c5b97f 100644 (file)
@@ -1105,10 +1105,10 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
        IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
        IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
        IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
-       IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index);
+       IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
        IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
        IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
-       IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread);
+       IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
        IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
        IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
        IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
index b296f49..ff0d3b3 100644 (file)
@@ -1861,6 +1861,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
                        iwl_mvm_txq_from_mac80211(sta->txq[i]);
 
                mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               list_del_init(&mvmtxq->list);
        }
 }
 
index 5d6dc1d..32fdc41 100644 (file)
@@ -287,6 +287,7 @@ static int if_usb_probe(struct usb_interface *intf,
        return 0;
 
 err_get_fw:
+       usb_put_dev(udev);
        lbs_remove_card(priv);
 err_add_card:
        if_usb_reset_device(cardp);
index d5fb294..43bdcbc 100644 (file)
@@ -3373,7 +3373,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
        } else {
                mwifiex_dbg(adapter, INFO,
                            "%s(): calling free_irq()\n", __func__);
-              free_irq(card->dev->irq, &card->share_irq_ctx);
+               free_irq(card->dev->irq, &card->share_irq_ctx);
 
                if (card->msi_enable)
                        pci_disable_msi(pdev);
index 76004bd..9b91580 100644 (file)
@@ -1549,7 +1549,7 @@ done:
 /*
  * This function decode sdio aggreation pkt.
  *
- * Based on the the data block size and pkt_len,
+ * Based on the data block size and pkt_len,
  * skb data will be decoded to few packets.
  */
 static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
index c1bf899..4dc7e2e 100644 (file)
@@ -1880,7 +1880,7 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
         * packets ever exceeds the ampdu_min_traffic threshold, we will allow
         * an ampdu stream to be started.
         */
-       if (jiffies - tx_stats->start_time > HZ) {
+       if (time_after(jiffies, (unsigned long)tx_stats->start_time + HZ)) {
                tx_stats->pkts = 0;
                tx_stats->start_time = 0;
        } else
index 5c2c7f1..3ac373d 100644 (file)
@@ -1312,12 +1312,11 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
        if (idx != 0)
                return -ENOENT;
 
-       sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
-
        ret = wilc_get_rssi(vif, &sinfo->signal);
        if (ret)
                return ret;
 
+       sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
        memcpy(mac, vif->priv.associated_bss, ETH_ALEN);
        return 0;
 }
index 021e0db..b89519a 100644 (file)
@@ -635,7 +635,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
        conn_info->req_ies_len = 0;
 }
 
-static inline void host_int_handle_disconnect(struct wilc_vif *vif)
+inline void wilc_handle_disconnect(struct wilc_vif *vif)
 {
        struct host_if_drv *hif_drv = vif->hif_drv;
 
@@ -647,8 +647,6 @@ static inline void host_int_handle_disconnect(struct wilc_vif *vif)
        if (hif_drv->conn_info.conn_result)
                hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF,
                                               0, hif_drv->conn_info.arg);
-       else
-               netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
 
        eth_zero_addr(hif_drv->assoc_bssid);
 
@@ -684,7 +682,7 @@ static void handle_rcvd_gnrl_async_info(struct work_struct *work)
                host_int_parse_assoc_resp_info(vif, mac_info->status);
        } else if (mac_info->status == WILC_MAC_STATUS_DISCONNECTED) {
                if (hif_drv->hif_state == HOST_IF_CONNECTED) {
-                       host_int_handle_disconnect(vif);
+                       wilc_handle_disconnect(vif);
                } else if (hif_drv->usr_scan_req.scan_result) {
                        del_timer(&hif_drv->scan_timer);
                        handle_scan_done(vif, SCAN_EVENT_ABORTED);
index d8dd94d..69ba1d4 100644 (file)
@@ -215,4 +215,5 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
 void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
                                struct cfg80211_crypto_settings *crypto);
 int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index);
+inline void wilc_handle_disconnect(struct wilc_vif *vif);
 #endif
index fcc4e61..9b319a4 100644 (file)
@@ -97,12 +97,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
        struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
 
        list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
-               if (vif->mode == WILC_STATION_MODE)
+               if (vif->iftype == WILC_STATION_MODE)
                        if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
                                ndev = vif->ndev;
                                goto out;
                        }
-               if (vif->mode == WILC_AP_MODE)
+               if (vif->iftype == WILC_AP_MODE)
                        if (ether_addr_equal_unaligned(h->addr1, vif->bssid)) {
                                ndev = vif->ndev;
                                goto out;
@@ -122,7 +122,7 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
        else
                eth_zero_addr(vif->bssid);
 
-       vif->mode = mode;
+       vif->iftype = mode;
 }
 
 int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
@@ -472,7 +472,7 @@ static int wlan_initialize_threads(struct net_device *dev)
                                       "%s-tx", dev->name);
        if (IS_ERR(wilc->txq_thread)) {
                netdev_err(dev, "couldn't create TXQ thread\n");
-               wilc->close = 0;
+               wilc->close = 1;
                return PTR_ERR(wilc->txq_thread);
        }
        wait_for_completion(&wilc->txq_thread_started);
@@ -780,6 +780,7 @@ static int wilc_mac_close(struct net_device *ndev)
        if (vif->ndev) {
                netif_stop_queue(vif->ndev);
 
+               wilc_handle_disconnect(vif);
                wilc_deinit_host_int(vif->ndev);
        }
 
index 822e65d..43c085c 100644 (file)
@@ -177,7 +177,6 @@ struct wilc_vif {
        u8 bssid[ETH_ALEN];
        struct host_if_drv *hif_drv;
        struct net_device *ndev;
-       u8 mode;
        struct timer_list during_ip_timer;
        struct timer_list periodic_rssi;
        struct rf_info periodic_stat;
index 7962c11..600cc57 100644 (file)
@@ -26,6 +26,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
 struct wilc_sdio {
        bool irq_gpio;
        u32 block_size;
+       bool isinit;
        int has_thrpt_enh3;
 };
 
@@ -193,6 +194,13 @@ static int wilc_sdio_reset(struct wilc *wilc)
        return 0;
 }
 
+static bool wilc_sdio_is_init(struct wilc *wilc)
+{
+       struct wilc_sdio *sdio_priv = wilc->bus_data;
+
+       return sdio_priv->isinit;
+}
+
 static int wilc_sdio_suspend(struct device *dev)
 {
        struct sdio_func *func = dev_to_sdio_func(dev);
@@ -581,6 +589,9 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
 
 static int wilc_sdio_deinit(struct wilc *wilc)
 {
+       struct wilc_sdio *sdio_priv = wilc->bus_data;
+
+       sdio_priv->isinit = false;
        return 0;
 }
 
@@ -700,6 +711,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
                         sdio_priv->has_thrpt_enh3);
        }
 
+       sdio_priv->isinit = true;
        return 0;
 }
 
@@ -981,6 +993,7 @@ static const struct wilc_hif_func wilc_hif_sdio = {
        .enable_interrupt = wilc_sdio_enable_interrupt,
        .disable_interrupt = wilc_sdio_disable_interrupt,
        .hif_reset = wilc_sdio_reset,
+       .hif_is_init = wilc_sdio_is_init,
 };
 
 static int wilc_sdio_resume(struct device *dev)
index 2ae8dd3..b0fc5e6 100644 (file)
@@ -1029,6 +1029,13 @@ static int wilc_spi_reset(struct wilc *wilc)
        return result;
 }
 
+static bool wilc_spi_is_init(struct wilc *wilc)
+{
+       struct wilc_spi *spi_priv = wilc->bus_data;
+
+       return spi_priv->isinit;
+}
+
 static int wilc_spi_deinit(struct wilc *wilc)
 {
        struct wilc_spi *spi_priv = wilc->bus_data;
@@ -1250,4 +1257,5 @@ static const struct wilc_hif_func wilc_hif_spi = {
        .hif_block_rx_ext = wilc_spi_read,
        .hif_sync_ext = wilc_spi_sync_ext,
        .hif_reset = wilc_spi_reset,
+       .hif_is_init = wilc_spi_is_init,
 };
index f3f504d..947d9a0 100644 (file)
@@ -1481,9 +1481,12 @@ int wilc_wlan_init(struct net_device *dev)
 
        wilc->quit = 0;
 
-       if (wilc->hif_func->hif_init(wilc, false)) {
-               ret = -EIO;
-               goto fail;
+       if (!wilc->hif_func->hif_is_init(wilc)) {
+               acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+               ret = wilc->hif_func->hif_init(wilc, false);
+               release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+               if (ret)
+                       goto fail;
        }
 
        if (!wilc->tx_buffer)
index b45e727..a72cd5c 100644 (file)
@@ -373,6 +373,7 @@ struct wilc_hif_func {
        int (*enable_interrupt)(struct wilc *nic);
        void (*disable_interrupt)(struct wilc *nic);
        int (*hif_reset)(struct wilc *wilc);
+       bool (*hif_is_init)(struct wilc *wilc);
 };
 
 #define WILC_MAX_CFG_FRAME_SIZE                1468
index dba3013..1313888 100644 (file)
@@ -22,6 +22,7 @@ static const struct wilc_cfg_byte g_cfg_byte[] = {
        {WID_STATUS, 0},
        {WID_RSSI, 0},
        {WID_LINKSPEED, 0},
+       {WID_TX_POWER, 0},
        {WID_WOWLAN_TRIGGER, 0},
        {WID_NIL, 0}
 };
@@ -180,9 +181,10 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
                                i++;
 
                        if (cfg->s[i].id == wid)
-                               memcpy(cfg->s[i].str, &info[2], info[2] + 2);
+                               memcpy(cfg->s[i].str, &info[2],
+                                      get_unaligned_le16(&info[2]) + 2);
 
-                       len = 2 + info[2];
+                       len = 2 + get_unaligned_le16(&info[2]);
                        break;
 
                default:
index 8519cf0..39e54b3 100644 (file)
@@ -562,7 +562,7 @@ static void sta_queue_cleanup_timer_callb(struct timer_list *t)
                if (tx->station[sidx].flag & STATION_HEARTBEAT_FLAG) {
                        tx->station[sidx].flag ^= STATION_HEARTBEAT_FLAG;
                } else {
-                       memset(tx->station[sidx].mac, 0, ETH_ALEN);
+                       eth_zero_addr(tx->station[sidx].mac);
                        tx->station[sidx].flag = 0;
                }
        }
index 33a1d91..c66f072 100644 (file)
@@ -6658,7 +6658,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
        if (!hw) {
                ret = -ENOMEM;
                priv = NULL;
-               goto exit;
+               goto err_put_dev;
        }
 
        priv = hw->priv;
@@ -6680,24 +6680,24 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
 
        ret = rtl8xxxu_parse_usb(priv, interface);
        if (ret)
-               goto exit;
+               goto err_set_intfdata;
 
        ret = rtl8xxxu_identify_chip(priv);
        if (ret) {
                dev_err(&udev->dev, "Fatal - failed to identify chip\n");
-               goto exit;
+               goto err_set_intfdata;
        }
 
        ret = rtl8xxxu_read_efuse(priv);
        if (ret) {
                dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
-               goto exit;
+               goto err_set_intfdata;
        }
 
        ret = priv->fops->parse_efuse(priv);
        if (ret) {
                dev_err(&udev->dev, "Fatal - failed to parse EFuse\n");
-               goto exit;
+               goto err_set_intfdata;
        }
 
        rtl8xxxu_print_chipinfo(priv);
@@ -6705,12 +6705,12 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
        ret = priv->fops->load_firmware(priv);
        if (ret) {
                dev_err(&udev->dev, "Fatal - failed to load firmware\n");
-               goto exit;
+               goto err_set_intfdata;
        }
 
        ret = rtl8xxxu_init_device(hw);
        if (ret)
-               goto exit;
+               goto err_set_intfdata;
 
        hw->wiphy->max_scan_ssids = 1;
        hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
@@ -6760,12 +6760,12 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
        if (ret) {
                dev_err(&udev->dev, "%s: Failed to register: %i\n",
                        __func__, ret);
-               goto exit;
+               goto err_set_intfdata;
        }
 
        return 0;
 
-exit:
+err_set_intfdata:
        usb_set_intfdata(interface, NULL);
 
        if (priv) {
@@ -6773,9 +6773,10 @@ exit:
                mutex_destroy(&priv->usb_buf_mutex);
                mutex_destroy(&priv->h2c_mutex);
        }
-       usb_put_dev(udev);
 
        ieee80211_free_hw(hw);
+err_put_dev:
+       usb_put_dev(udev);
 
        return ret;
 }
index 837febe..ca01270 100644 (file)
@@ -1703,7 +1703,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                rtlpriv->sec.key_len[key_idx] = 0;
                eth_zero_addr(mac_addr);
                /*
-                *mac80211 will delete entrys one by one,
+                *mac80211 will delete entries one by one,
                 *so don't use rtl_cam_reset_all_entry
                 *or clear all entry here.
                 */
index 985ee36..76dc9da 100644 (file)
@@ -1992,6 +1992,10 @@ int rtw_core_init(struct rtw_dev *rtwdev)
        timer_setup(&rtwdev->tx_report.purge_timer,
                    rtw_tx_report_purge_timer, 0);
        rtwdev->tx_wq = alloc_workqueue("rtw_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
+       if (!rtwdev->tx_wq) {
+               rtw_warn(rtwdev, "alloc_workqueue rtw_tx_wq failed\n");
+               return -ENOMEM;
+       }
 
        INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
        INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
index 73b3b7e..c68fec9 100644 (file)
@@ -3111,7 +3111,7 @@ void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
        rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
 
        if (rtwpci->under_recovery) {
-               rtwpci->intrs[0] = 0;
+               rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN;
                rtwpci->intrs[1] = 0;
        } else {
                rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
index 99479bb..320bcd4 100644 (file)
@@ -1281,7 +1281,6 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x018, 0x00011124},
        {0x000, 0x00033C00},
        {0x01A, 0x00040004},
-       {0x0FE, 0x00000000},
        {0x055, 0x00080000},
        {0x056, 0x0008FFF0},
        {0x057, 0x0000C485},
@@ -20496,7 +20495,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20516,7 +20515,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20542,7 +20541,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20562,7 +20561,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20588,7 +20587,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20608,7 +20607,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20622,17 +20621,17 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0xB0000000, 0x00000000},
        {0x033, 0x0000002E},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20644,15 +20643,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20664,21 +20663,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000002F},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20690,15 +20689,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20710,21 +20709,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000030},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20736,15 +20735,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20756,21 +20755,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000031},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20782,15 +20781,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20802,21 +20801,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000032},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20828,15 +20827,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20848,21 +20847,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000033},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20874,15 +20873,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20894,21 +20893,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000034},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20920,15 +20919,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20940,21 +20939,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000035},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20966,15 +20965,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20986,21 +20985,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000036},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21012,15 +21011,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21032,21 +21031,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000037},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21058,15 +21057,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21078,21 +21077,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000038},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21104,15 +21103,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21124,21 +21123,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000039},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21150,15 +21149,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21170,21 +21169,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003A},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21196,15 +21195,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21216,21 +21215,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003B},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21242,15 +21241,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21262,21 +21261,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003C},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21288,15 +21287,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21308,21 +21307,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003D},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21334,15 +21333,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21354,21 +21353,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003E},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21380,15 +21379,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21400,21 +21399,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003F},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21426,15 +21425,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21446,7 +21445,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x0EF, 0x00000000},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
@@ -21596,8 +21595,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
        {0x087, 0x00000427},
        {0xB0000000, 0x00000000},
        {0x002, 0x00000000},
-       {0x067, 0x00000052},
-
+       {0x067, 0x00000056},
 };
 
 static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
@@ -21671,7 +21669,6 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x018, 0x00011124},
        {0x000, 0x00033C00},
        {0x01A, 0x00040004},
-       {0x0FE, 0x00000000},
        {0x055, 0x00080000},
        {0x056, 0x0008FFF0},
        {0x057, 0x0000C485},
@@ -41142,7 +41139,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41162,7 +41159,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41188,7 +41185,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41208,7 +41205,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41234,7 +41231,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41254,7 +41251,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x000001FF},
+       {0x03F, 0x000001FB},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x000001FF},
        {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41268,17 +41265,17 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0xB0000000, 0x00000000},
        {0x033, 0x0000002E},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41290,15 +41287,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41310,21 +41307,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000002F},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41336,15 +41333,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41356,21 +41353,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000030},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41382,15 +41379,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41402,21 +41399,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000031},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41428,15 +41425,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41448,21 +41445,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000032},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41474,15 +41471,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41494,21 +41491,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000033},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41520,15 +41517,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41540,21 +41537,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000034},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41566,15 +41563,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41586,21 +41583,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000035},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41612,15 +41609,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41632,21 +41629,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000036},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41658,15 +41655,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41678,21 +41675,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000037},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41704,15 +41701,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41724,21 +41721,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000038},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41750,15 +41747,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41770,21 +41767,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x00000039},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41796,15 +41793,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41816,21 +41813,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003A},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41842,15 +41839,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41862,21 +41859,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003B},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41888,15 +41885,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41908,21 +41905,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003C},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41934,15 +41931,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41954,21 +41951,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003D},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41980,15 +41977,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42000,21 +41997,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003E},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -42026,15 +42023,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42046,21 +42043,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x033, 0x0000003F},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -42072,15 +42069,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003B},
        {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42092,7 +42089,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
        {0x03F, 0x0000003F},
        {0xA0000000, 0x00000000},
-       {0x03F, 0x0000003F},
+       {0x03F, 0x000000EB},
        {0xB0000000, 0x00000000},
        {0x0EF, 0x00000000},
        {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
@@ -42243,8 +42240,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
        {0x087, 0x00000427},
        {0xB0000000, 0x00000000},
        {0x002, 0x00000000},
-       {0x067, 0x00000052},
-
+       {0x067, 0x00000056},
 };
 
 static const struct rtw89_reg2_def rtw89_8852a_phy_nctl_regs[] = {
index c6da0cf..d06a2c4 100644 (file)
@@ -1924,13 +1924,10 @@ static int wl12xx_remove(struct platform_device *pdev)
        struct wl1271 *wl = platform_get_drvdata(pdev);
        struct wl12xx_priv *priv;
 
-       if (!wl)
-               goto out;
        priv = wl->priv;
 
        kfree(priv->rx_mem_addr);
 
-out:
        return wlcore_remove(pdev);
 }
 
index f52960d..bff144c 100644 (file)
@@ -32,7 +32,7 @@ config DEBUG_PINCTRL
          Say Y here to add some extra checks and diagnostics to PINCTRL calls.
 
 config PINCTRL_AMD
-       tristate "AMD GPIO pin control"
+       bool "AMD GPIO pin control"
        depends on HAS_IOMEM
        depends on ACPI || COMPILE_TEST
        select GPIOLIB
index a140b6b..bcde042 100644 (file)
@@ -102,7 +102,7 @@ struct armada_37xx_pinctrl {
        struct device                   *dev;
        struct gpio_chip                gpio_chip;
        struct irq_chip                 irq_chip;
-       spinlock_t                      irq_lock;
+       raw_spinlock_t                  irq_lock;
        struct pinctrl_desc             pctl;
        struct pinctrl_dev              *pctl_dev;
        struct armada_37xx_pin_group    *groups;
@@ -523,9 +523,9 @@ static void armada_37xx_irq_ack(struct irq_data *d)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        writel(d->mask, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 }
 
 static void armada_37xx_irq_mask(struct irq_data *d)
@@ -536,10 +536,10 @@ static void armada_37xx_irq_mask(struct irq_data *d)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        val = readl(info->base + reg);
        writel(val & ~d->mask, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 }
 
 static void armada_37xx_irq_unmask(struct irq_data *d)
@@ -550,10 +550,10 @@ static void armada_37xx_irq_unmask(struct irq_data *d)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        val = readl(info->base + reg);
        writel(val | d->mask, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 }
 
 static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -564,14 +564,14 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        val = readl(info->base + reg);
        if (on)
                val |= (BIT(d->hwirq % GPIO_PER_REG));
        else
                val &= ~(BIT(d->hwirq % GPIO_PER_REG));
        writel(val, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 
        return 0;
 }
@@ -583,7 +583,7 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
        u32 val, reg = IRQ_POL;
        unsigned long flags;
 
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        armada_37xx_irq_update_reg(&reg, d);
        val = readl(info->base + reg);
        switch (type) {
@@ -607,11 +607,11 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
                break;
        }
        default:
-               spin_unlock_irqrestore(&info->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&info->irq_lock, flags);
                return -EINVAL;
        }
        writel(val, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 
        return 0;
 }
@@ -626,7 +626,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
 
        regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
 
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        p = readl(info->base + IRQ_POL + 4 * reg_idx);
        if ((p ^ l) & (1 << bit_num)) {
                /*
@@ -647,7 +647,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
                ret = -1;
        }
 
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
        return ret;
 }
 
@@ -664,11 +664,11 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
                u32 status;
                unsigned long flags;
 
-               spin_lock_irqsave(&info->irq_lock, flags);
+               raw_spin_lock_irqsave(&info->irq_lock, flags);
                status = readl_relaxed(info->base + IRQ_STATUS + 4 * i);
                /* Manage only the interrupt that was enabled */
                status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
-               spin_unlock_irqrestore(&info->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&info->irq_lock, flags);
                while (status) {
                        u32 hwirq = ffs(status) - 1;
                        u32 virq = irq_find_mapping(d, hwirq +
@@ -695,12 +695,12 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
 
 update_status:
                        /* Update status in case a new IRQ appears */
-                       spin_lock_irqsave(&info->irq_lock, flags);
+                       raw_spin_lock_irqsave(&info->irq_lock, flags);
                        status = readl_relaxed(info->base +
                                               IRQ_STATUS + 4 * i);
                        /* Manage only the interrupt that was enabled */
                        status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
-                       spin_unlock_irqrestore(&info->irq_lock, flags);
+                       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
                }
        }
        chained_irq_exit(chip, desc);
@@ -731,7 +731,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
        struct device *dev = &pdev->dev;
        unsigned int i, nr_irq_parent;
 
-       spin_lock_init(&info->irq_lock);
+       raw_spin_lock_init(&info->irq_lock);
 
        nr_irq_parent = of_irq_count(np);
        if (!nr_irq_parent) {
@@ -1107,25 +1107,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = {
        { },
 };
 
+static const struct regmap_config armada_37xx_pinctrl_regmap_config = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .use_raw_spinlock = true,
+};
+
 static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev)
 {
        struct armada_37xx_pinctrl *info;
        struct device *dev = &pdev->dev;
-       struct device_node *np = dev->of_node;
        struct regmap *regmap;
+       void __iomem *base;
        int ret;
 
+       base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+       if (IS_ERR(base)) {
+               dev_err(dev, "failed to ioremap base address: %pe\n", base);
+               return PTR_ERR(base);
+       }
+
+       regmap = devm_regmap_init_mmio(dev, base,
+                                      &armada_37xx_pinctrl_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(dev, "failed to create regmap: %pe\n", regmap);
+               return PTR_ERR(regmap);
+       }
+
        info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
        info->dev = dev;
-
-       regmap = syscon_node_to_regmap(np);
-       if (IS_ERR(regmap))
-               return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n");
        info->regmap = regmap;
-
        info->data = of_device_get_match_data(dev);
 
        ret = armada_37xx_pinctrl_register(pdev, info);
index 5f4a8c5..dfc8ea9 100644 (file)
 #define ocelot_clrsetbits(addr, clear, set) \
        writel((readl(addr) & ~(clear)) | (set), (addr))
 
-/* PINCONFIG bits (sparx5 only) */
 enum {
        PINCONF_BIAS,
        PINCONF_SCHMITT,
        PINCONF_DRIVE_STRENGTH,
 };
 
-#define BIAS_PD_BIT BIT(4)
-#define BIAS_PU_BIT BIT(3)
-#define BIAS_BITS   (BIAS_PD_BIT|BIAS_PU_BIT)
-#define SCHMITT_BIT BIT(2)
-#define DRIVE_BITS  GENMASK(1, 0)
-
 /* GPIO standard registers */
 #define OCELOT_GPIO_OUT_SET    0x0
 #define OCELOT_GPIO_OUT_CLR    0x4
@@ -321,6 +314,13 @@ struct ocelot_pin_caps {
        unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */
 };
 
+struct ocelot_pincfg_data {
+       u8 pd_bit;
+       u8 pu_bit;
+       u8 drive_bits;
+       u8 schmitt_bit;
+};
+
 struct ocelot_pinctrl {
        struct device *dev;
        struct pinctrl_dev *pctl;
@@ -328,10 +328,16 @@ struct ocelot_pinctrl {
        struct regmap *map;
        struct regmap *pincfg;
        struct pinctrl_desc *desc;
+       const struct ocelot_pincfg_data *pincfg_data;
        struct ocelot_pmx_func func[FUNC_MAX];
        u8 stride;
 };
 
+struct ocelot_match_data {
+       struct pinctrl_desc desc;
+       struct ocelot_pincfg_data pincfg_data;
+};
+
 #define LUTON_P(p, f0, f1)                                             \
 static struct ocelot_pin_caps luton_pin_##p = {                                \
        .pin = p,                                                       \
@@ -1325,24 +1331,27 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
        int ret = -EOPNOTSUPP;
 
        if (info->pincfg) {
+               const struct ocelot_pincfg_data *opd = info->pincfg_data;
                u32 regcfg;
 
-               ret = regmap_read(info->pincfg, pin, &regcfg);
+               ret = regmap_read(info->pincfg,
+                                 pin * regmap_get_reg_stride(info->pincfg),
+                                 &regcfg);
                if (ret)
                        return ret;
 
                ret = 0;
                switch (reg) {
                case PINCONF_BIAS:
-                       *val = regcfg & BIAS_BITS;
+                       *val = regcfg & (opd->pd_bit | opd->pu_bit);
                        break;
 
                case PINCONF_SCHMITT:
-                       *val = regcfg & SCHMITT_BIT;
+                       *val = regcfg & opd->schmitt_bit;
                        break;
 
                case PINCONF_DRIVE_STRENGTH:
-                       *val = regcfg & DRIVE_BITS;
+                       *val = regcfg & opd->drive_bits;
                        break;
 
                default:
@@ -1359,14 +1368,18 @@ static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr,
        u32 val;
        int ret;
 
-       ret = regmap_read(info->pincfg, regaddr, &val);
+       ret = regmap_read(info->pincfg,
+                         regaddr * regmap_get_reg_stride(info->pincfg),
+                         &val);
        if (ret)
                return ret;
 
        val &= ~clrbits;
        val |= setbits;
 
-       ret = regmap_write(info->pincfg, regaddr, val);
+       ret = regmap_write(info->pincfg,
+                          regaddr * regmap_get_reg_stride(info->pincfg),
+                          val);
 
        return ret;
 }
@@ -1379,23 +1392,27 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
        int ret = -EOPNOTSUPP;
 
        if (info->pincfg) {
+               const struct ocelot_pincfg_data *opd = info->pincfg_data;
 
                ret = 0;
                switch (reg) {
                case PINCONF_BIAS:
-                       ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS,
+                       ret = ocelot_pincfg_clrsetbits(info, pin,
+                                                      opd->pd_bit | opd->pu_bit,
                                                       val);
                        break;
 
                case PINCONF_SCHMITT:
-                       ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT,
+                       ret = ocelot_pincfg_clrsetbits(info, pin,
+                                                      opd->schmitt_bit,
                                                       val);
                        break;
 
                case PINCONF_DRIVE_STRENGTH:
                        if (val <= 3)
                                ret = ocelot_pincfg_clrsetbits(info, pin,
-                                                              DRIVE_BITS, val);
+                                                              opd->drive_bits,
+                                                              val);
                        else
                                ret = -EINVAL;
                        break;
@@ -1425,17 +1442,20 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
                if (param == PIN_CONFIG_BIAS_DISABLE)
                        val = (val == 0);
                else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
-                       val = (val & BIAS_PD_BIT ? true : false);
+                       val = !!(val & info->pincfg_data->pd_bit);
                else    /* PIN_CONFIG_BIAS_PULL_UP */
-                       val = (val & BIAS_PU_BIT ? true : false);
+                       val = !!(val & info->pincfg_data->pu_bit);
                break;
 
        case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+               if (!info->pincfg_data->schmitt_bit)
+                       return -EOPNOTSUPP;
+
                err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val);
                if (err)
                        return err;
 
-               val = (val & SCHMITT_BIT ? true : false);
+               val = !!(val & info->pincfg_data->schmitt_bit);
                break;
 
        case PIN_CONFIG_DRIVE_STRENGTH:
@@ -1479,6 +1499,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                              unsigned long *configs, unsigned int num_configs)
 {
        struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       const struct ocelot_pincfg_data *opd = info->pincfg_data;
        u32 param, arg, p;
        int cfg, err = 0;
 
@@ -1491,8 +1512,8 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                case PIN_CONFIG_BIAS_PULL_UP:
                case PIN_CONFIG_BIAS_PULL_DOWN:
                        arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 :
-                       (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT :
-                       BIAS_PD_BIT;
+                             (param == PIN_CONFIG_BIAS_PULL_UP) ?
+                               opd->pu_bit : opd->pd_bit;
 
                        err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg);
                        if (err)
@@ -1501,7 +1522,10 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                        break;
 
                case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
-                       arg = arg ? SCHMITT_BIT : 0;
+                       if (!opd->schmitt_bit)
+                               return -EOPNOTSUPP;
+
+                       arg = arg ? opd->schmitt_bit : 0;
                        err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT,
                                                  arg);
                        if (err)
@@ -1562,69 +1586,94 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
        .dt_free_map = pinconf_generic_dt_free_map,
 };
 
-static struct pinctrl_desc luton_desc = {
-       .name = "luton-pinctrl",
-       .pins = luton_pins,
-       .npins = ARRAY_SIZE(luton_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data luton_desc = {
+       .desc = {
+               .name = "luton-pinctrl",
+               .pins = luton_pins,
+               .npins = ARRAY_SIZE(luton_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc serval_desc = {
-       .name = "serval-pinctrl",
-       .pins = serval_pins,
-       .npins = ARRAY_SIZE(serval_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data serval_desc = {
+       .desc = {
+               .name = "serval-pinctrl",
+               .pins = serval_pins,
+               .npins = ARRAY_SIZE(serval_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc ocelot_desc = {
-       .name = "ocelot-pinctrl",
-       .pins = ocelot_pins,
-       .npins = ARRAY_SIZE(ocelot_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data ocelot_desc = {
+       .desc = {
+               .name = "ocelot-pinctrl",
+               .pins = ocelot_pins,
+               .npins = ARRAY_SIZE(ocelot_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc jaguar2_desc = {
-       .name = "jaguar2-pinctrl",
-       .pins = jaguar2_pins,
-       .npins = ARRAY_SIZE(jaguar2_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data jaguar2_desc = {
+       .desc = {
+               .name = "jaguar2-pinctrl",
+               .pins = jaguar2_pins,
+               .npins = ARRAY_SIZE(jaguar2_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc servalt_desc = {
-       .name = "servalt-pinctrl",
-       .pins = servalt_pins,
-       .npins = ARRAY_SIZE(servalt_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data servalt_desc = {
+       .desc = {
+               .name = "servalt-pinctrl",
+               .pins = servalt_pins,
+               .npins = ARRAY_SIZE(servalt_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc sparx5_desc = {
-       .name = "sparx5-pinctrl",
-       .pins = sparx5_pins,
-       .npins = ARRAY_SIZE(sparx5_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .confops = &ocelot_confops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data sparx5_desc = {
+       .desc = {
+               .name = "sparx5-pinctrl",
+               .pins = sparx5_pins,
+               .npins = ARRAY_SIZE(sparx5_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .confops = &ocelot_confops,
+               .owner = THIS_MODULE,
+       },
+       .pincfg_data = {
+               .pd_bit = BIT(4),
+               .pu_bit = BIT(3),
+               .drive_bits = GENMASK(1, 0),
+               .schmitt_bit = BIT(2),
+       },
 };
 
-static struct pinctrl_desc lan966x_desc = {
-       .name = "lan966x-pinctrl",
-       .pins = lan966x_pins,
-       .npins = ARRAY_SIZE(lan966x_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &lan966x_pmx_ops,
-       .confops = &ocelot_confops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data lan966x_desc = {
+       .desc = {
+               .name = "lan966x-pinctrl",
+               .pins = lan966x_pins,
+               .npins = ARRAY_SIZE(lan966x_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &lan966x_pmx_ops,
+               .confops = &ocelot_confops,
+               .owner = THIS_MODULE,
+       },
+       .pincfg_data = {
+               .pd_bit = BIT(3),
+               .pu_bit = BIT(2),
+               .drive_bits = GENMASK(1, 0),
+       },
 };
 
 static int ocelot_create_group_func_map(struct device *dev,
@@ -1890,7 +1939,8 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
        {},
 };
 
-static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
+static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
+                                                  const struct ocelot_pinctrl *info)
 {
        void __iomem *base;
 
@@ -1898,7 +1948,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
                .reg_bits = 32,
                .val_bits = 32,
                .reg_stride = 4,
-               .max_register = 32,
+               .max_register = info->desc->npins * 4,
                .name = "pincfg",
        };
 
@@ -1913,6 +1963,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
 
 static int ocelot_pinctrl_probe(struct platform_device *pdev)
 {
+       const struct ocelot_match_data *data;
        struct device *dev = &pdev->dev;
        struct ocelot_pinctrl *info;
        struct reset_control *reset;
@@ -1929,7 +1980,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        if (!info)
                return -ENOMEM;
 
-       info->desc = (struct pinctrl_desc *)device_get_match_data(dev);
+       data = device_get_match_data(dev);
+       if (!data)
+               return -EINVAL;
+
+       info->desc = devm_kmemdup(dev, &data->desc, sizeof(*info->desc),
+                                 GFP_KERNEL);
+       if (!info->desc)
+               return -ENOMEM;
+
+       info->pincfg_data = &data->pincfg_data;
 
        reset = devm_reset_control_get_optional_shared(dev, "switch");
        if (IS_ERR(reset))
@@ -1956,7 +2016,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
 
        /* Pinconf registers */
        if (info->desc->confops) {
-               pincfg = ocelot_pinctrl_create_pincfg(pdev);
+               pincfg = ocelot_pinctrl_create_pincfg(pdev, info);
                if (IS_ERR(pincfg))
                        dev_dbg(dev, "Failed to create pincfg regmap\n");
                else
index 63429a2..770862f 100644 (file)
@@ -266,6 +266,8 @@ static int ralink_pinctrl_pins(struct ralink_priv *p)
                                                p->func[i]->pin_count,
                                                sizeof(int),
                                                GFP_KERNEL);
+               if (!p->func[i]->pins)
+                       return -ENOMEM;
                for (j = 0; j < p->func[i]->pin_count; j++)
                        p->func[i]->pins[j] = p->func[i]->pin_first + j;
 
index 3ba4704..2b3335a 100644 (file)
@@ -871,6 +871,9 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
        }
 
        *map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL);
+       if (*map == NULL)
+               return -ENOMEM;
+
        for (i = 0; i < (*num_maps); i++) {
                dt_pin = be32_to_cpu(list[i]);
                pin_num = FIELD_GET(GENMASK(31, 24), dt_pin);
index 458218f..fe4971b 100644 (file)
@@ -176,6 +176,7 @@ config PTP_1588_CLOCK_OCP
        depends on !S390
        depends on COMMON_CLK
        select NET_DEVLINK
+       select CRC16
        help
          This driver adds support for an OpenCompute time card.
 
index 9e54fe7..35d4b39 100644 (file)
@@ -3565,7 +3565,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
                        if (!atomic_read(&queue->set_pci_flags_count)) {
                                /*
                                 * there's no outstanding PCI any more, so we
-                                * have to request a PCI to be sure the the PCI
+                                * have to request a PCI to be sure the PCI
                                 * will wake at some time in the future then we
                                 * can flush packed buffers that might still be
                                 * hanging around, which can happen if no
index 775c0bf..0933948 100644 (file)
@@ -1138,10 +1138,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 
        /* if an error occurred and we have an active dma, then terminate */
-       dmaengine_terminate_sync(ctlr->dma_tx);
-       bs->tx_dma_active = false;
-       dmaengine_terminate_sync(ctlr->dma_rx);
-       bs->rx_dma_active = false;
+       if (ctlr->dma_tx) {
+               dmaengine_terminate_sync(ctlr->dma_tx);
+               bs->tx_dma_active = false;
+       }
+       if (ctlr->dma_rx) {
+               dmaengine_terminate_sync(ctlr->dma_rx);
+               bs->rx_dma_active = false;
+       }
        bcm2835_spi_undo_prologue(bs);
 
        /* and reset */
index 31d778e..6a7f7df 100644 (file)
@@ -69,7 +69,7 @@
 #define CDNS_SPI_BAUD_DIV_SHIFT                3 /* Baud rate divisor shift in CR */
 #define CDNS_SPI_SS_SHIFT              10 /* Slave Select field shift in CR */
 #define CDNS_SPI_SS0                   0x1 /* Slave Select zero */
-#define CDNS_SPI_NOSS                  0x3C /* No Slave select */
+#define CDNS_SPI_NOSS                  0xF /* No Slave select */
 
 /*
  * SPI Interrupt Registers bit Masks
index 7a014ee..411b130 100644 (file)
@@ -613,6 +613,10 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
                                               rspi->dma_callbacked, HZ);
        if (ret > 0 && rspi->dma_callbacked) {
                ret = 0;
+               if (tx)
+                       dmaengine_synchronize(rspi->ctlr->dma_tx);
+               if (rx)
+                       dmaengine_synchronize(rspi->ctlr->dma_rx);
        } else {
                if (!ret) {
                        dev_err(&rspi->ctlr->dev, "DMA timeout\n");
index 90ce16b..f422f9c 100644 (file)
@@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct snp_guest_dev *snp_dev;
        struct miscdevice *misc;
+       void __iomem *mapping;
        int ret;
 
        if (!dev->platform_data)
                return -ENODEV;
 
        data = (struct sev_guest_platform_data *)dev->platform_data;
-       layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
-       if (!layout)
+       mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
+       if (!mapping)
                return -ENODEV;
 
+       layout = (__force void *)mapping;
+
        ret = -ENOMEM;
        snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
        if (!snp_dev)
@@ -706,7 +709,7 @@ e_free_response:
 e_free_request:
        free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
 e_unmap:
-       iounmap(layout);
+       iounmap(mapping);
        return ret;
 }
 
index a01ea49..e8e769b 100644 (file)
@@ -1737,6 +1737,14 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
            (req->flags & REQ_F_PARTIAL_IO))
                return;
 
+       /*
+        * READV uses fields in `struct io_rw` (len/addr) to stash the selected
+        * buffer data. However if that buffer is recycled the original request
+        * data stored in addr is lost. Therefore forbid recycling for now.
+        */
+       if (req->opcode == IORING_OP_READV)
+               return;
+
        /*
         * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
         * the flag and hence ensure that bl->head doesn't get incremented.
@@ -12931,7 +12939,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
 {
        struct io_uring_buf_ring *br;
        struct io_uring_buf_reg reg;
-       struct io_buffer_list *bl;
+       struct io_buffer_list *bl, *free_bl = NULL;
        struct page **pages;
        int nr_pages;
 
@@ -12963,7 +12971,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
                if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
                        return -EEXIST;
        } else {
-               bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+               free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
                if (!bl)
                        return -ENOMEM;
        }
@@ -12972,7 +12980,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
                             struct_size(br, bufs, reg.ring_entries),
                             &nr_pages);
        if (IS_ERR(pages)) {
-               kfree(bl);
+               kfree(free_bl);
                return PTR_ERR(pages);
        }
 
index 4de597a..52615e6 100644 (file)
@@ -592,8 +592,12 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
                a = (ATTR_RECORD*)((u8*)ctx->attr +
                                le32_to_cpu(ctx->attr->length));
        for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
-               if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
-                               le32_to_cpu(ctx->mrec->bytes_allocated))
+               u8 *mrec_end = (u8 *)ctx->mrec +
+                              le32_to_cpu(ctx->mrec->bytes_allocated);
+               u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
+                              a->name_length * sizeof(ntfschar);
+               if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
+                   name_end > mrec_end)
                        break;
                ctx->attr = a;
                if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
index 3375275..740b642 100644 (file)
@@ -277,7 +277,6 @@ enum ocfs2_mount_options
        OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15,  /* Journal Async Commit */
        OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */
        OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */
-       OCFS2_MOUNT_NOCLUSTER = 1 << 18, /* No cluster aware filesystem mount */
 };
 
 #define OCFS2_OSB_SOFT_RO      0x0001
@@ -673,8 +672,7 @@ static inline int ocfs2_cluster_o2cb_global_heartbeat(struct ocfs2_super *osb)
 
 static inline int ocfs2_mount_local(struct ocfs2_super *osb)
 {
-       return ((osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT)
-               || (osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER));
+       return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
 }
 
 static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)
index 0b0ae3e..da7718c 100644 (file)
@@ -252,16 +252,14 @@ static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si,
        int i, ret = -ENOSPC;
 
        if ((preferred >= 0) && (preferred < si->si_num_slots)) {
-               if (!si->si_slots[preferred].sl_valid ||
-                   !si->si_slots[preferred].sl_node_num) {
+               if (!si->si_slots[preferred].sl_valid) {
                        ret = preferred;
                        goto out;
                }
        }
 
        for(i = 0; i < si->si_num_slots; i++) {
-               if (!si->si_slots[i].sl_valid ||
-                   !si->si_slots[i].sl_node_num) {
+               if (!si->si_slots[i].sl_valid) {
                        ret = i;
                        break;
                }
@@ -456,30 +454,24 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
        spin_lock(&osb->osb_lock);
        ocfs2_update_slot_info(si);
 
-       if (ocfs2_mount_local(osb))
-               /* use slot 0 directly in local mode */
-               slot = 0;
-       else {
-               /* search for ourselves first and take the slot if it already
-                * exists. Perhaps we need to mark this in a variable for our
-                * own journal recovery? Possibly not, though we certainly
-                * need to warn to the user */
-               slot = __ocfs2_node_num_to_slot(si, osb->node_num);
+       /* search for ourselves first and take the slot if it already
+        * exists. Perhaps we need to mark this in a variable for our
+        * own journal recovery? Possibly not, though we certainly
+        * need to warn to the user */
+       slot = __ocfs2_node_num_to_slot(si, osb->node_num);
+       if (slot < 0) {
+               /* if no slot yet, then just take 1st available
+                * one. */
+               slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
                if (slot < 0) {
-                       /* if no slot yet, then just take 1st available
-                        * one. */
-                       slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
-                       if (slot < 0) {
-                               spin_unlock(&osb->osb_lock);
-                               mlog(ML_ERROR, "no free slots available!\n");
-                               status = -EINVAL;
-                               goto bail;
-                       }
-               } else
-                       printk(KERN_INFO "ocfs2: Slot %d on device (%s) was "
-                              "already allocated to this node!\n",
-                              slot, osb->dev_str);
-       }
+                       spin_unlock(&osb->osb_lock);
+                       mlog(ML_ERROR, "no free slots available!\n");
+                       status = -EINVAL;
+                       goto bail;
+               }
+       } else
+               printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
+                      "allocated to this node!\n", slot, osb->dev_str);
 
        ocfs2_set_slot(si, slot, osb->node_num);
        osb->slot_num = slot;
index f729881..438be02 100644 (file)
@@ -172,7 +172,6 @@ enum {
        Opt_dir_resv_level,
        Opt_journal_async_commit,
        Opt_err_cont,
-       Opt_nocluster,
        Opt_err,
 };
 
@@ -206,7 +205,6 @@ static const match_table_t tokens = {
        {Opt_dir_resv_level, "dir_resv_level=%u"},
        {Opt_journal_async_commit, "journal_async_commit"},
        {Opt_err_cont, "errors=continue"},
-       {Opt_nocluster, "nocluster"},
        {Opt_err, NULL}
 };
 
@@ -618,13 +616,6 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
                goto out;
        }
 
-       tmp = OCFS2_MOUNT_NOCLUSTER;
-       if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
-               ret = -EINVAL;
-               mlog(ML_ERROR, "Cannot change nocluster option on remount\n");
-               goto out;
-       }
-
        tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
                OCFS2_MOUNT_HB_NONE;
        if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
@@ -865,7 +856,6 @@ static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb,
        }
 
        if (ocfs2_userspace_stack(osb) &&
-           !(osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
            strncmp(osb->osb_cluster_stack, mopt->cluster_stack,
                    OCFS2_STACK_LABEL_LEN)) {
                mlog(ML_ERROR,
@@ -1137,11 +1127,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
               osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" :
               "ordered");
 
-       if ((osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
-          !(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT))
-               printk(KERN_NOTICE "ocfs2: The shared device (%s) is mounted "
-                      "without cluster aware mode.\n", osb->dev_str);
-
        atomic_set(&osb->vol_state, VOLUME_MOUNTED);
        wake_up(&osb->osb_mount_event);
 
@@ -1452,9 +1437,6 @@ static int ocfs2_parse_options(struct super_block *sb,
                case Opt_journal_async_commit:
                        mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
                        break;
-               case Opt_nocluster:
-                       mopt->mount_opt |= OCFS2_MOUNT_NOCLUSTER;
-                       break;
                default:
                        mlog(ML_ERROR,
                             "Unrecognized mount option \"%s\" "
@@ -1566,9 +1548,6 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
        if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
                seq_printf(s, ",journal_async_commit");
 
-       if (opts & OCFS2_MOUNT_NOCLUSTER)
-               seq_printf(s, ",nocluster");
-
        return 0;
 }
 
index e0777ee..397da02 100644 (file)
@@ -1263,6 +1263,9 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
                                          count, fl);
                file_end_write(out.file);
        } else {
+               if (out.file->f_flags & O_NONBLOCK)
+                       fl |= SPLICE_F_NONBLOCK;
+
                retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl);
        }
 
index e943370..de86f5b 100644 (file)
@@ -192,17 +192,19 @@ static inline void msg_init(struct uffd_msg *msg)
 }
 
 static inline struct uffd_msg userfault_msg(unsigned long address,
+                                           unsigned long real_address,
                                            unsigned int flags,
                                            unsigned long reason,
                                            unsigned int features)
 {
        struct uffd_msg msg;
+
        msg_init(&msg);
        msg.event = UFFD_EVENT_PAGEFAULT;
 
-       if (!(features & UFFD_FEATURE_EXACT_ADDRESS))
-               address &= PAGE_MASK;
-       msg.arg.pagefault.address = address;
+       msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
+                                   real_address : address;
+
        /*
         * These flags indicate why the userfault occurred:
         * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
@@ -488,8 +490,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
 
        init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
        uwq.wq.private = current;
-       uwq.msg = userfault_msg(vmf->real_address, vmf->flags, reason,
-                       ctx->features);
+       uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
+                               reason, ctx->features);
        uwq.ctx = ctx;
        uwq.waken = false;
 
index 7ce93aa..98954dd 100644 (file)
@@ -1125,9 +1125,7 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
 }
 #endif
 
-#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED
 extern int devmem_is_allowed(unsigned long pfn);
-#endif
 
 #endif /* __KERNEL__ */
 
index cb2167c..492dce4 100644 (file)
@@ -368,9 +368,6 @@ static inline void tlb_flush(struct mmu_gather *tlb)
                flush_tlb_mm(tlb->mm);
 }
 
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
 #else /* CONFIG_MMU_GATHER_NO_RANGE */
 
 #ifndef tlb_flush
index 0fca8f3..addb135 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/dma-fence.h>
 #include <linux/completion.h>
 #include <linux/xarray.h>
-#include <linux/irq_work.h>
+#include <linux/workqueue.h>
 
 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
 
@@ -295,7 +295,7 @@ struct drm_sched_job {
         */
        union {
                struct dma_fence_cb             finish_cb;
-               struct irq_work                 work;
+               struct work_struct              work;
        };
 
        uint64_t                        id;
index cf3d0d6..7898e29 100644 (file)
@@ -1130,23 +1130,27 @@ static inline bool is_zone_movable_page(const struct page *page)
 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
 
-bool __put_devmap_managed_page(struct page *page);
-static inline bool put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page_refs(struct page *page, int refs);
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
 {
        if (!static_branch_unlikely(&devmap_managed_key))
                return false;
        if (!is_zone_device_page(page))
                return false;
-       return __put_devmap_managed_page(page);
+       return __put_devmap_managed_page_refs(page, refs);
 }
-
 #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-static inline bool put_devmap_managed_page(struct page *page)
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
 {
        return false;
 }
 #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
 
+static inline bool put_devmap_managed_page(struct page *page)
+{
+       return put_devmap_managed_page_refs(page, 1);
+}
+
 /* 127: arbitrary random number, small enough to assemble well */
 #define folio_ref_zero_or_close_to_overflow(folio) \
        ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
index f7506f0..c04f359 100644 (file)
@@ -405,6 +405,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
 {
        const struct inet6_dev *idev = __in6_dev_get(dev);
 
+       if (unlikely(!idev))
+               return true;
+
        return !!idev->cnf.ignore_routes_with_linkdown;
 }
 
index 3c4f550..2f766e3 100644 (file)
@@ -847,6 +847,7 @@ enum {
 };
 
 void l2cap_chan_hold(struct l2cap_chan *c);
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
 void l2cap_chan_put(struct l2cap_chan *c);
 
 static inline void l2cap_chan_lock(struct l2cap_chan *chan)
index 5bd3fac..119ed1f 100644 (file)
@@ -1509,6 +1509,27 @@ struct devlink_ops {
                                    struct devlink_rate *parent,
                                    void *priv_child, void *priv_parent,
                                    struct netlink_ext_ack *extack);
+       /**
+        * selftests_check() - queries if selftest is supported
+        * @devlink: devlink instance
+        * @id: test index
+        * @extack: extack for reporting error messages
+        *
+        * Return: true if test is supported by the driver
+        */
+       bool (*selftest_check)(struct devlink *devlink, unsigned int id,
+                              struct netlink_ext_ack *extack);
+       /**
+        * selftest_run() - Runs a selftest
+        * @devlink: devlink instance
+        * @id: test index
+        * @extack: extack for reporting error messages
+        *
+        * Return: status of the test
+        */
+       enum devlink_selftest_status
+       (*selftest_run)(struct devlink *devlink, unsigned int id,
+                       struct netlink_ext_ack *extack);
 };
 
 void *devlink_priv(struct devlink *devlink);
index 2442d64..8fbff8d 100644 (file)
@@ -13,8 +13,7 @@ union fwnet_hwaddr {
                __be64 uniq_id;         /* EUI-64                       */
                u8 max_rec;             /* max packet size              */
                u8 sspd;                /* max speed                    */
-               __be16 fifo_hi;         /* hi 16bits of FIFO addr       */
-               __be32 fifo_lo;         /* lo 32bits of FIFO addr       */
+               u8 fifo[6];             /* FIFO addr                    */
        } __packed uc;
 };
 
index 85cd695..ee88f0f 100644 (file)
@@ -321,7 +321,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
 
 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
 
-#define TCP_PINGPONG_THRESH    3
+#define TCP_PINGPONG_THRESH    1
 
 static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
 {
@@ -338,14 +338,6 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
        return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
 }
 
-static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
-{
-       struct inet_connection_sock *icsk = inet_csk(sk);
-
-       if (icsk->icsk_ack.pingpong < U8_MAX)
-               icsk->icsk_ack.pingpong++;
-}
-
 static inline bool inet_csk_has_ulp(struct sock *sk)
 {
        return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
index 20db950..63fac94 100644 (file)
@@ -54,6 +54,7 @@ struct ip_tunnel_key {
        __be32                  label;          /* Flow Label for IPv6 */
        __be16                  tp_src;
        __be16                  tp_dst;
+       __u8                    flow_flags;
 };
 
 /* Flags for ip_tunnel_info mode. */
index f7ad1a7..a7273b2 100644 (file)
@@ -2823,18 +2823,18 @@ static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
 {
        /* Does this proto have per netns sysctl_wmem ? */
        if (proto->sysctl_wmem_offset)
-               return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
+               return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
 
-       return *proto->sysctl_wmem;
+       return READ_ONCE(*proto->sysctl_wmem);
 }
 
 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
 {
        /* Does this proto have per netns sysctl_rmem ? */
        if (proto->sysctl_rmem_offset)
-               return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
+               return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
 
-       return *proto->sysctl_rmem;
+       return READ_ONCE(*proto->sysctl_rmem);
 }
 
 /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
index b8620be..d10962b 100644 (file)
@@ -1425,7 +1425,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
 
 static inline int tcp_win_from_space(const struct sock *sk, int space)
 {
-       int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
+       int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
 
        return tcp_adv_win_scale <= 0 ?
                (space>>(-tcp_adv_win_scale)) :
index abb050b..b75b572 100644 (file)
@@ -161,6 +161,8 @@ struct tls_offload_context_tx {
 
        struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
        void (*sk_destruct)(struct sock *sk);
+       struct work_struct destruct_work;
+       struct tls_context *ctx;
        u8 driver_state[] __aligned(8);
        /* The TLS layer reserves room for driver specific state
         * Currently the belief is that there is not enough
index f13d37b..1ecdb91 100644 (file)
@@ -192,6 +192,7 @@ struct f_owner_ex {
 
 #define F_LINUX_SPECIFIC_BASE  1024
 
+#ifndef HAVE_ARCH_STRUCT_FLOCK
 struct flock {
        short   l_type;
        short   l_whence;
@@ -216,5 +217,6 @@ struct flock64 {
        __ARCH_FLOCK64_PAD
 #endif
 };
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
 
 #endif /* _ASM_GENERIC_FCNTL_H */
index 5413216..2f24b53 100644 (file)
@@ -136,6 +136,9 @@ enum devlink_command {
        DEVLINK_CMD_LINECARD_NEW,
        DEVLINK_CMD_LINECARD_DEL,
 
+       DEVLINK_CMD_SELFTESTS_GET,      /* can dump */
+       DEVLINK_CMD_SELFTESTS_RUN,
+
        /* add new commands above here */
        __DEVLINK_CMD_MAX,
        DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -276,6 +279,30 @@ enum {
 #define DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS \
        (_BITUL(__DEVLINK_FLASH_OVERWRITE_MAX_BIT) - 1)
 
+enum devlink_attr_selftest_id {
+       DEVLINK_ATTR_SELFTEST_ID_UNSPEC,
+       DEVLINK_ATTR_SELFTEST_ID_FLASH, /* flag */
+
+       __DEVLINK_ATTR_SELFTEST_ID_MAX,
+       DEVLINK_ATTR_SELFTEST_ID_MAX = __DEVLINK_ATTR_SELFTEST_ID_MAX - 1
+};
+
+enum devlink_selftest_status {
+       DEVLINK_SELFTEST_STATUS_SKIP,
+       DEVLINK_SELFTEST_STATUS_PASS,
+       DEVLINK_SELFTEST_STATUS_FAIL
+};
+
+enum devlink_attr_selftest_result {
+       DEVLINK_ATTR_SELFTEST_RESULT_UNSPEC,
+       DEVLINK_ATTR_SELFTEST_RESULT,           /* nested */
+       DEVLINK_ATTR_SELFTEST_RESULT_ID,        /* u32, enum devlink_attr_selftest_id */
+       DEVLINK_ATTR_SELFTEST_RESULT_STATUS,    /* u8, enum devlink_selftest_status */
+
+       __DEVLINK_ATTR_SELFTEST_RESULT_MAX,
+       DEVLINK_ATTR_SELFTEST_RESULT_MAX = __DEVLINK_ATTR_SELFTEST_RESULT_MAX - 1
+};
+
 /**
  * enum devlink_trap_action - Packet trap action.
  * @DEVLINK_TRAP_ACTION_DROP: Packet is dropped by the device and a copy is not
@@ -578,6 +605,8 @@ enum devlink_attr {
 
        DEVLINK_ATTR_NESTED_DEVLINK,            /* nested */
 
+       DEVLINK_ATTR_SELFTESTS,                 /* nested */
+
        /* add new attributes above here, update the policy in devlink.c */
 
        __DEVLINK_ATTR_MAX,
index 811897d..860f867 100644 (file)
@@ -2084,7 +2084,7 @@ struct kvm_stats_header {
 #define KVM_STATS_UNIT_SECONDS         (0x2 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_CYCLES          (0x3 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_BOOLEAN         (0x4 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_CYCLES
+#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_BOOLEAN
 
 #define KVM_STATS_BASE_SHIFT           8
 #define KVM_STATS_BASE_MASK            (0xF << KVM_STATS_BASE_SHIFT)
index eb815e0..a9fa777 100644 (file)
@@ -35,6 +35,8 @@ enum {
        SEG6_IPTUN_MODE_INLINE,
        SEG6_IPTUN_MODE_ENCAP,
        SEG6_IPTUN_MODE_L2ENCAP,
+       SEG6_IPTUN_MODE_ENCAP_RED,
+       SEG6_IPTUN_MODE_L2ENCAP_RED,
 };
 
 #endif
index 7ac971e..7e64447 100644 (file)
@@ -6643,7 +6643,7 @@ static void btf_snprintf_show(struct btf_show *show, const char *fmt,
        if (len < 0) {
                ssnprintf->len_left = 0;
                ssnprintf->len = len;
-       } else if (len > ssnprintf->len_left) {
+       } else if (len >= ssnprintf->len_left) {
                /* no space, drive on to get length we would have written */
                ssnprintf->len_left = 0;
                ssnprintf->len += len;
index 1400561..a0e02b0 100644 (file)
@@ -477,7 +477,7 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
        if (!dev->netdev_ops->ndo_xdp_xmit)
                return -EOPNOTSUPP;
 
-       err = xdp_ok_fwd_dev(dev, xdpf->len);
+       err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
        if (unlikely(err))
                return err;
 
@@ -536,7 +536,7 @@ static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
            !obj->dev->netdev_ops->ndo_xdp_xmit)
                return false;
 
-       if (xdp_ok_fwd_dev(obj->dev, xdpf->len))
+       if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
                return false;
 
        return true;
index 42e387a..0f532e6 100644 (file)
@@ -98,7 +98,7 @@ static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd
        default:
                ret = -EINVAL;
                break;
-       };
+       }
 
        mutex_unlock(&tr->mutex);
        return ret;
@@ -248,14 +248,17 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
        int ret;
 
        faddr = ftrace_location((unsigned long)ip);
-       if (faddr)
+       if (faddr) {
+               if (!tr->fops)
+                       return -ENOTSUPP;
                tr->func.ftrace_managed = true;
+       }
 
        if (bpf_trampoline_module_get(tr))
                return -ENOENT;
 
        if (tr->func.ftrace_managed) {
-               ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 0);
+               ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
                ret = register_ftrace_direct_multi(tr->fops, (long)new_addr);
        } else {
                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
index 50ba70f..1c304fe 100644 (file)
@@ -511,10 +511,52 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
        return sum;
 }
 
-#define SRCU_INTERVAL          1       // Base delay if no expedited GPs pending.
-#define SRCU_MAX_INTERVAL      10      // Maximum incremental delay from slow readers.
-#define SRCU_MAX_NODELAY_PHASE 1       // Maximum per-GP-phase consecutive no-delay instances.
-#define SRCU_MAX_NODELAY       100     // Maximum consecutive no-delay instances.
+/*
+ * We use an adaptive strategy for synchronize_srcu() and especially for
+ * synchronize_srcu_expedited().  We spin for a fixed time period
+ * (defined below, boot time configurable) to allow SRCU readers to exit
+ * their read-side critical sections.  If there are still some readers
+ * after one jiffy, we repeatedly block for one jiffy time periods.
+ * The blocking time is increased as the grace-period age increases,
+ * with max blocking time capped at 10 jiffies.
+ */
+#define SRCU_DEFAULT_RETRY_CHECK_DELAY         5
+
+static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
+module_param(srcu_retry_check_delay, ulong, 0444);
+
+#define SRCU_INTERVAL          1               // Base delay if no expedited GPs pending.
+#define SRCU_MAX_INTERVAL      10              // Maximum incremental delay from slow readers.
+
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO      3UL     // Lowmark on default per-GP-phase
+                                                       // no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI      1000UL  // Highmark on default per-GP-phase
+                                                       // no-delay instances.
+
+#define SRCU_UL_CLAMP_LO(val, low)     ((val) > (low) ? (val) : (low))
+#define SRCU_UL_CLAMP_HI(val, high)    ((val) < (high) ? (val) : (high))
+#define SRCU_UL_CLAMP(val, low, high)  SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
+// per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
+// one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
+// called from process_srcu().
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED        \
+       (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
+
+// Maximum per-GP-phase consecutive no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE \
+       SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED,  \
+                     SRCU_DEFAULT_MAX_NODELAY_PHASE_LO,        \
+                     SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
+
+static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
+module_param(srcu_max_nodelay_phase, ulong, 0444);
+
+// Maximum consecutive no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY       (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
+                                        SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
+
+static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
+module_param(srcu_max_nodelay, ulong, 0444);
 
 /*
  * Return grace-period delay, zero if there are expedited grace
@@ -522,16 +564,22 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
  */
 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
 {
+       unsigned long gpstart;
+       unsigned long j;
        unsigned long jbase = SRCU_INTERVAL;
 
        if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
                jbase = 0;
-       if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)))
-               jbase += jiffies - READ_ONCE(ssp->srcu_gp_start);
-       if (!jbase) {
-               WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
-               if (READ_ONCE(ssp->srcu_n_exp_nodelay) > SRCU_MAX_NODELAY_PHASE)
-                       jbase = 1;
+       if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
+               j = jiffies - 1;
+               gpstart = READ_ONCE(ssp->srcu_gp_start);
+               if (time_after(j, gpstart))
+                       jbase += j - gpstart;
+               if (!jbase) {
+                       WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
+                       if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
+                               jbase = 1;
+               }
        }
        return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
 }
@@ -606,15 +654,6 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
 }
 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 
-/*
- * We use an adaptive strategy for synchronize_srcu() and especially for
- * synchronize_srcu_expedited().  We spin for a fixed time period
- * (defined below) to allow SRCU readers to exit their read-side critical
- * sections.  If there are still some readers after a few microseconds,
- * we repeatedly block for 1-millisecond time periods.
- */
-#define SRCU_RETRY_CHECK_DELAY         5
-
 /*
  * Start an SRCU grace period.
  */
@@ -700,7 +739,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
  */
 static void srcu_gp_end(struct srcu_struct *ssp)
 {
-       unsigned long cbdelay;
+       unsigned long cbdelay = 1;
        bool cbs;
        bool last_lvl;
        int cpu;
@@ -720,7 +759,9 @@ static void srcu_gp_end(struct srcu_struct *ssp)
        spin_lock_irq_rcu_node(ssp);
        idx = rcu_seq_state(ssp->srcu_gp_seq);
        WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
-       cbdelay = !!srcu_get_delay(ssp);
+       if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
+               cbdelay = 0;
+
        WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
        rcu_seq_end(&ssp->srcu_gp_seq);
        gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
@@ -921,12 +962,16 @@ static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
  */
 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
 {
+       unsigned long curdelay;
+
+       curdelay = !srcu_get_delay(ssp);
+
        for (;;) {
                if (srcu_readers_active_idx_check(ssp, idx))
                        return true;
-               if (--trycount + !srcu_get_delay(ssp) <= 0)
+               if ((--trycount + curdelay) <= 0)
                        return false;
-               udelay(SRCU_RETRY_CHECK_DELAY);
+               udelay(srcu_retry_check_delay);
        }
 }
 
@@ -1582,7 +1627,7 @@ static void process_srcu(struct work_struct *work)
                j = jiffies;
                if (READ_ONCE(ssp->reschedule_jiffies) == j) {
                        WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
-                       if (READ_ONCE(ssp->reschedule_count) > SRCU_MAX_NODELAY)
+                       if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
                                curdelay = 1;
                } else {
                        WRITE_ONCE(ssp->reschedule_count, 1);
@@ -1674,6 +1719,11 @@ static int __init srcu_bootup_announce(void)
        pr_info("Hierarchical SRCU implementation.\n");
        if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
                pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
+       if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
+               pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
+       if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
+               pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
+       pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
        return 0;
 }
 early_initcall(srcu_bootup_announce);
index b515296..7bf5612 100644 (file)
@@ -1701,7 +1701,10 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
                 * the throttle.
                 */
                p->dl.dl_throttled = 0;
-               BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
+               if (!(flags & ENQUEUE_REPLENISH))
+                       printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
+                                            task_pid_nr(p));
+
                return;
        }
 
index bb9962b..59ddb00 100644 (file)
@@ -454,6 +454,33 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue)
        rcu_assign_pointer(watch->queue, wqueue);
 }
 
+static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue)
+{
+       const struct cred *cred;
+       struct watch *w;
+
+       hlist_for_each_entry(w, &wlist->watchers, list_node) {
+               struct watch_queue *wq = rcu_access_pointer(w->queue);
+               if (wqueue == wq && watch->id == w->id)
+                       return -EBUSY;
+       }
+
+       cred = current_cred();
+       if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
+               atomic_dec(&cred->user->nr_watches);
+               return -EAGAIN;
+       }
+
+       watch->cred = get_cred(cred);
+       rcu_assign_pointer(watch->watch_list, wlist);
+
+       kref_get(&wqueue->usage);
+       kref_get(&watch->usage);
+       hlist_add_head(&watch->queue_node, &wqueue->watches);
+       hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
+       return 0;
+}
+
 /**
  * add_watch_to_object - Add a watch on an object to a watch list
  * @watch: The watch to add
@@ -468,34 +495,21 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue)
  */
 int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
 {
-       struct watch_queue *wqueue = rcu_access_pointer(watch->queue);
-       struct watch *w;
-
-       hlist_for_each_entry(w, &wlist->watchers, list_node) {
-               struct watch_queue *wq = rcu_access_pointer(w->queue);
-               if (wqueue == wq && watch->id == w->id)
-                       return -EBUSY;
-       }
-
-       watch->cred = get_current_cred();
-       rcu_assign_pointer(watch->watch_list, wlist);
+       struct watch_queue *wqueue;
+       int ret = -ENOENT;
 
-       if (atomic_inc_return(&watch->cred->user->nr_watches) >
-           task_rlimit(current, RLIMIT_NOFILE)) {
-               atomic_dec(&watch->cred->user->nr_watches);
-               put_cred(watch->cred);
-               return -EAGAIN;
-       }
+       rcu_read_lock();
 
+       wqueue = rcu_access_pointer(watch->queue);
        if (lock_wqueue(wqueue)) {
-               kref_get(&wqueue->usage);
-               kref_get(&watch->usage);
-               hlist_add_head(&watch->queue_node, &wqueue->watches);
+               spin_lock(&wlist->lock);
+               ret = add_one_watch(watch, wlist, wqueue);
+               spin_unlock(&wlist->lock);
                unlock_wqueue(wqueue);
        }
 
-       hlist_add_head(&watch->list_node, &wlist->watchers);
-       return 0;
+       rcu_read_unlock();
+       return ret;
 }
 EXPORT_SYMBOL(add_watch_to_object);
 
index 5512644..e2a39e3 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -87,7 +87,8 @@ retry:
         * belongs to this folio.
         */
        if (unlikely(page_folio(page) != folio)) {
-               folio_put_refs(folio, refs);
+               if (!put_devmap_managed_page_refs(&folio->page, refs))
+                       folio_put_refs(folio, refs);
                goto retry;
        }
 
@@ -176,7 +177,8 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
                        refs *= GUP_PIN_COUNTING_BIAS;
        }
 
-       folio_put_refs(folio, refs);
+       if (!put_devmap_managed_page_refs(&folio->page, refs))
+               folio_put_refs(folio, refs);
 }
 
 /**
index a57e1be..a18c071 100644 (file)
@@ -4788,8 +4788,13 @@ again:
                         * sharing with another vma.
                         */
                        ;
-               } else if (unlikely(is_hugetlb_entry_migration(entry) ||
-                                   is_hugetlb_entry_hwpoisoned(entry))) {
+               } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
+                       bool uffd_wp = huge_pte_uffd_wp(entry);
+
+                       if (!userfaultfd_wp(dst_vma) && uffd_wp)
+                               entry = huge_pte_clear_uffd_wp(entry);
+                       set_huge_pte_at(dst, addr, dst_pte, entry);
+               } else if (unlikely(is_hugetlb_entry_migration(entry))) {
                        swp_entry_t swp_entry = pte_to_swp_entry(entry);
                        bool uffd_wp = huge_pte_uffd_wp(entry);
 
@@ -5947,6 +5952,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 
                page = alloc_huge_page(dst_vma, dst_addr, 0);
                if (IS_ERR(page)) {
+                       put_page(*pagep);
                        ret = -ENOMEM;
                        *pagep = NULL;
                        goto out;
index 4b5e5a3..6aff49f 100644 (file)
@@ -603,14 +603,6 @@ static unsigned long kfence_init_pool(void)
                addr += 2 * PAGE_SIZE;
        }
 
-       /*
-        * The pool is live and will never be deallocated from this point on.
-        * Remove the pool object from the kmemleak object tree, as it would
-        * otherwise overlap with allocations returned by kfence_alloc(), which
-        * are registered with kmemleak through the slab post-alloc hook.
-        */
-       kmemleak_free(__kfence_pool);
-
        return 0;
 }
 
@@ -623,8 +615,16 @@ static bool __init kfence_init_pool_early(void)
 
        addr = kfence_init_pool();
 
-       if (!addr)
+       if (!addr) {
+               /*
+                * The pool is live and will never be deallocated from this point on.
+                * Ignore the pool object from the kmemleak phys object tree, as it would
+                * otherwise overlap with allocations returned by kfence_alloc(), which
+                * are registered with kmemleak through the slab post-alloc hook.
+                */
+               kmemleak_ignore_phys(__pa(__kfence_pool));
                return true;
+       }
 
        /*
         * Only release unprotected pages, and do not try to go back and change
index 4cf7d4b..1c6027a 100644 (file)
@@ -3043,7 +3043,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
        pte_t entry;
 
        VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
-       VM_BUG_ON(PageAnon(page) && !PageAnonExclusive(page));
+       VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page));
 
        /*
         * Clear the pages cpupid information as the existing
@@ -4369,9 +4369,12 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
                        return VM_FAULT_OOM;
        }
 
-       /* See comment in handle_pte_fault() */
+       /*
+        * See comment in handle_pte_fault() for how this scenario happens, we
+        * need to return NOPAGE so that we drop this page.
+        */
        if (pmd_devmap_trans_unstable(vmf->pmd))
-               return 0;
+               return VM_FAULT_NOPAGE;
 
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
                                      vmf->address, &vmf->ptl);
index b870a65..745eea0 100644 (file)
@@ -499,7 +499,7 @@ void free_zone_device_page(struct page *page)
 }
 
 #ifdef CONFIG_FS_DAX
-bool __put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page_refs(struct page *page, int refs)
 {
        if (page->pgmap->type != MEMORY_DEVICE_FS_DAX)
                return false;
@@ -509,9 +509,9 @@ bool __put_devmap_managed_page(struct page *page)
         * refcount is 1, then the page is free and the refcount is
         * stable because nobody holds a reference on the page.
         */
-       if (page_ref_dec_return(page) == 1)
+       if (page_ref_sub_return(page, refs) == 1)
                wake_up_var(&page->_refcount);
        return true;
 }
-EXPORT_SYMBOL(__put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page_refs);
 #endif /* CONFIG_FS_DAX */
index 206ed6b..f06279d 100644 (file)
@@ -55,22 +55,28 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
        gfp_t gfp = vmf->gfp_mask;
        unsigned long addr;
        struct page *page;
+       vm_fault_t ret;
        int err;
 
        if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
                return vmf_error(-EINVAL);
 
+       filemap_invalidate_lock_shared(mapping);
+
 retry:
        page = find_lock_page(mapping, offset);
        if (!page) {
                page = alloc_page(gfp | __GFP_ZERO);
-               if (!page)
-                       return VM_FAULT_OOM;
+               if (!page) {
+                       ret = VM_FAULT_OOM;
+                       goto out;
+               }
 
                err = set_direct_map_invalid_noflush(page);
                if (err) {
                        put_page(page);
-                       return vmf_error(err);
+                       ret = vmf_error(err);
+                       goto out;
                }
 
                __SetPageUptodate(page);
@@ -86,7 +92,8 @@ retry:
                        if (err == -EEXIST)
                                goto retry;
 
-                       return vmf_error(err);
+                       ret = vmf_error(err);
+                       goto out;
                }
 
                addr = (unsigned long)page_address(page);
@@ -94,7 +101,11 @@ retry:
        }
 
        vmf->page = page;
-       return VM_FAULT_LOCKED;
+       ret = VM_FAULT_LOCKED;
+
+out:
+       filemap_invalidate_unlock_shared(mapping);
+       return ret;
 }
 
 static const struct vm_operations_struct secretmem_vm_ops = {
@@ -162,12 +173,20 @@ static int secretmem_setattr(struct user_namespace *mnt_userns,
                             struct dentry *dentry, struct iattr *iattr)
 {
        struct inode *inode = d_inode(dentry);
+       struct address_space *mapping = inode->i_mapping;
        unsigned int ia_valid = iattr->ia_valid;
+       int ret;
+
+       filemap_invalidate_lock(mapping);
 
        if ((ia_valid & ATTR_SIZE) && inode->i_size)
-               return -EINVAL;
+               ret = -EINVAL;
+       else
+               ret = simple_setattr(mnt_userns, dentry, iattr);
 
-       return simple_setattr(mnt_userns, dentry, iattr);
+       filemap_invalidate_unlock(mapping);
+
+       return ret;
 }
 
 static const struct inode_operations secretmem_iops = {
index a6f5653..b7f2d4a 100644 (file)
@@ -3392,7 +3392,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
                break;
        case Opt_nr_blocks:
                ctx->blocks = memparse(param->string, &rest);
-               if (*rest)
+               if (*rest || ctx->blocks > S64_MAX)
                        goto bad_value;
                ctx->seen |= SHMEM_SEEN_BLOCKS;
                break;
@@ -3514,10 +3514,7 @@ static int shmem_reconfigure(struct fs_context *fc)
 
        raw_spin_lock(&sbinfo->stat_lock);
        inodes = sbinfo->max_inodes - sbinfo->free_inodes;
-       if (ctx->blocks > S64_MAX) {
-               err = "Number of blocks too large";
-               goto out;
-       }
+
        if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
                if (!sbinfo->max_blocks) {
                        err = "Cannot retroactively limit size";
index 148ce62..e6d804b 100644 (file)
@@ -5297,6 +5297,9 @@ int hci_suspend_sync(struct hci_dev *hdev)
                return err;
        }
 
+       /* Update event mask so only the allowed event can wakeup the host */
+       hci_set_event_mask_sync(hdev);
+
        /* Only configure accept list if disconnect succeeded and wake
         * isn't being prevented.
         */
@@ -5308,9 +5311,6 @@ int hci_suspend_sync(struct hci_dev *hdev)
        /* Unpause to take care of updating scanning params */
        hdev->scanning_paused = false;
 
-       /* Update event mask so only the allowed event can wakeup the host */
-       hci_set_event_mask_sync(hdev);
-
        /* Enable event filter for paired devices */
        hci_update_event_filter_sync(hdev);
 
index 09ecaf5..77c0aac 100644 (file)
@@ -111,7 +111,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
 }
 
 /* Find channel with given SCID.
- * Returns locked channel. */
+ * Returns a reference locked channel.
+ */
 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
                                                 u16 cid)
 {
@@ -119,15 +120,19 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
 
        mutex_lock(&conn->chan_lock);
        c = __l2cap_get_chan_by_scid(conn, cid);
-       if (c)
-               l2cap_chan_lock(c);
+       if (c) {
+               /* Only lock if chan reference is not 0 */
+               c = l2cap_chan_hold_unless_zero(c);
+               if (c)
+                       l2cap_chan_lock(c);
+       }
        mutex_unlock(&conn->chan_lock);
 
        return c;
 }
 
 /* Find channel with given DCID.
- * Returns locked channel.
+ * Returns a reference locked channel.
  */
 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
                                                 u16 cid)
@@ -136,8 +141,12 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
 
        mutex_lock(&conn->chan_lock);
        c = __l2cap_get_chan_by_dcid(conn, cid);
-       if (c)
-               l2cap_chan_lock(c);
+       if (c) {
+               /* Only lock if chan reference is not 0 */
+               c = l2cap_chan_hold_unless_zero(c);
+               if (c)
+                       l2cap_chan_lock(c);
+       }
        mutex_unlock(&conn->chan_lock);
 
        return c;
@@ -162,8 +171,12 @@ static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
 
        mutex_lock(&conn->chan_lock);
        c = __l2cap_get_chan_by_ident(conn, ident);
-       if (c)
-               l2cap_chan_lock(c);
+       if (c) {
+               /* Only lock if chan reference is not 0 */
+               c = l2cap_chan_hold_unless_zero(c);
+               if (c)
+                       l2cap_chan_lock(c);
+       }
        mutex_unlock(&conn->chan_lock);
 
        return c;
@@ -497,6 +510,16 @@ void l2cap_chan_hold(struct l2cap_chan *c)
        kref_get(&c->kref);
 }
 
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
+{
+       BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
+
+       if (!kref_get_unless_zero(&c->kref))
+               return NULL;
+
+       return c;
+}
+
 void l2cap_chan_put(struct l2cap_chan *c)
 {
        BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
@@ -1969,7 +1992,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
                        src_match = !bacmp(&c->src, src);
                        dst_match = !bacmp(&c->dst, dst);
                        if (src_match && dst_match) {
-                               l2cap_chan_hold(c);
+                               c = l2cap_chan_hold_unless_zero(c);
+                               if (!c)
+                                       continue;
+
                                read_unlock(&chan_list_lock);
                                return c;
                        }
@@ -1984,7 +2010,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
        }
 
        if (c1)
-               l2cap_chan_hold(c1);
+               c1 = l2cap_chan_hold_unless_zero(c1);
 
        read_unlock(&chan_list_lock);
 
@@ -4464,6 +4490,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
 
 unlock:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
        return err;
 }
 
@@ -4578,6 +4605,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
 
 done:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
        return err;
 }
 
@@ -5305,6 +5333,7 @@ send_move_response:
        l2cap_send_move_chan_rsp(chan, result);
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -5397,6 +5426,7 @@ static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
        }
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 }
 
 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
@@ -5426,6 +5456,7 @@ static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
        l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 }
 
 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
@@ -5489,6 +5520,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
        l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -5524,6 +5556,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
        }
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -5896,12 +5929,11 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
        if (credits > max_credits) {
                BT_ERR("LE credits overflow");
                l2cap_send_disconn_req(chan, ECONNRESET);
-               l2cap_chan_unlock(chan);
 
                /* Return 0 so that we don't trigger an unnecessary
                 * command reject packet.
                 */
-               return 0;
+               goto unlock;
        }
 
        chan->tx_credits += credits;
@@ -5912,7 +5944,9 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
        if (chan->tx_credits)
                chan->ops->resume(chan);
 
+unlock:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -7598,6 +7632,7 @@ drop:
 
 done:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 }
 
 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
@@ -8086,7 +8121,7 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
                if (src_type != c->src_type)
                        continue;
 
-               l2cap_chan_hold(c);
+               c = l2cap_chan_hold_unless_zero(c);
                read_unlock(&chan_list_lock);
                return c;
        }
index 8cfafd7..646d104 100644 (file)
@@ -4844,7 +4844,6 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
                else
                        status = MGMT_STATUS_FAILED;
 
-               mgmt_pending_remove(cmd);
                goto unlock;
        }
 
index 1ef14a0..5aeb364 100644 (file)
@@ -589,9 +589,13 @@ static int br_fill_ifinfo(struct sk_buff *skb,
        }
 
 done:
+       if (af) {
+               if (nlmsg_get_pos(skb) - (void *)af > nla_attr_size(0))
+                       nla_nest_end(skb, af);
+               else
+                       nla_nest_cancel(skb, af);
+       }
 
-       if (af)
-               nla_nest_end(skb, af);
        nlmsg_end(skb, nlh);
        return 0;
 
index 251e666..748be72 100644 (file)
@@ -47,7 +47,7 @@ enum caif_states {
 struct caifsock {
        struct sock sk; /* must be first member */
        struct cflayer layer;
-       u32 flow_state;
+       unsigned long flow_state;
        struct caif_connect_request conn_req;
        struct mutex readlock;
        struct dentry *debugfs_socket_dir;
@@ -56,38 +56,32 @@ struct caifsock {
 
 static int rx_flow_is_on(struct caifsock *cf_sk)
 {
-       return test_bit(RX_FLOW_ON_BIT,
-                       (void *) &cf_sk->flow_state);
+       return test_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static int tx_flow_is_on(struct caifsock *cf_sk)
 {
-       return test_bit(TX_FLOW_ON_BIT,
-                       (void *) &cf_sk->flow_state);
+       return test_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_rx_flow_off(struct caifsock *cf_sk)
 {
-        clear_bit(RX_FLOW_ON_BIT,
-                (void *) &cf_sk->flow_state);
+       clear_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_rx_flow_on(struct caifsock *cf_sk)
 {
-        set_bit(RX_FLOW_ON_BIT,
-                       (void *) &cf_sk->flow_state);
+       set_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_tx_flow_off(struct caifsock *cf_sk)
 {
-        clear_bit(TX_FLOW_ON_BIT,
-               (void *) &cf_sk->flow_state);
+       clear_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_tx_flow_on(struct caifsock *cf_sk)
 {
-        set_bit(TX_FLOW_ON_BIT,
-               (void *) &cf_sk->flow_state);
+       set_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void caif_read_lock(struct sock *sk)
index ca4c993..c43c965 100644 (file)
@@ -201,6 +201,10 @@ static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_
                                 DEVLINK_PORT_FN_STATE_ACTIVE),
 };
 
+static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = {
+       [DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG },
+};
+
 static DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
 #define DEVLINK_REGISTERED XA_MARK_1
 
@@ -695,6 +699,10 @@ struct devlink_region {
                const struct devlink_region_ops *ops;
                const struct devlink_port_region_ops *port_ops;
        };
+       struct mutex snapshot_lock; /* protects snapshot_list,
+                                    * max_snapshots and cur_snapshots
+                                    * consistency.
+                                    */
        struct list_head snapshot_list;
        u32 max_snapshots;
        u32 cur_snapshots;
@@ -4826,6 +4834,206 @@ static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
        return ret;
 }
 
+static int
+devlink_nl_selftests_fill(struct sk_buff *msg, struct devlink *devlink,
+                         u32 portid, u32 seq, int flags,
+                         struct netlink_ext_ack *extack)
+{
+       struct nlattr *selftests;
+       void *hdr;
+       int err;
+       int i;
+
+       hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags,
+                         DEVLINK_CMD_SELFTESTS_GET);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       err = -EMSGSIZE;
+       if (devlink_nl_put_handle(msg, devlink))
+               goto err_cancel_msg;
+
+       selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
+       if (!selftests)
+               goto err_cancel_msg;
+
+       for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
+            i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
+               if (devlink->ops->selftest_check(devlink, i, extack)) {
+                       err = nla_put_flag(msg, i);
+                       if (err)
+                               goto err_cancel_msg;
+               }
+       }
+
+       nla_nest_end(msg, selftests);
+       genlmsg_end(msg, hdr);
+       return 0;
+
+err_cancel_msg:
+       genlmsg_cancel(msg, hdr);
+       return err;
+}
+
+static int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb,
+                                            struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct sk_buff *msg;
+       int err;
+
+       if (!devlink->ops->selftest_check)
+               return -EOPNOTSUPP;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       err = devlink_nl_selftests_fill(msg, devlink, info->snd_portid,
+                                       info->snd_seq, 0, info->extack);
+       if (err) {
+               nlmsg_free(msg);
+               return err;
+       }
+
+       return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_selftests_get_dumpit(struct sk_buff *msg,
+                                              struct netlink_callback *cb)
+{
+       struct devlink *devlink;
+       int start = cb->args[0];
+       unsigned long index;
+       int idx = 0;
+       int err = 0;
+
+       mutex_lock(&devlink_mutex);
+       devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+               if (idx < start || !devlink->ops->selftest_check)
+                       goto inc;
+
+               devl_lock(devlink);
+               err = devlink_nl_selftests_fill(msg, devlink,
+                                               NETLINK_CB(cb->skb).portid,
+                                               cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                               cb->extack);
+               devl_unlock(devlink);
+               if (err) {
+                       devlink_put(devlink);
+                       break;
+               }
+inc:
+               idx++;
+               devlink_put(devlink);
+       }
+       mutex_unlock(&devlink_mutex);
+
+       if (err != -EMSGSIZE)
+               return err;
+
+       cb->args[0] = idx;
+       return msg->len;
+}
+
+static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id,
+                                      enum devlink_selftest_status test_status)
+{
+       struct nlattr *result_attr;
+
+       result_attr = nla_nest_start(skb, DEVLINK_ATTR_SELFTEST_RESULT);
+       if (!result_attr)
+               return -EMSGSIZE;
+
+       if (nla_put_u32(skb, DEVLINK_ATTR_SELFTEST_RESULT_ID, id) ||
+           nla_put_u8(skb, DEVLINK_ATTR_SELFTEST_RESULT_STATUS,
+                      test_status))
+               goto nla_put_failure;
+
+       nla_nest_end(skb, result_attr);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, result_attr);
+       return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_selftests_run(struct sk_buff *skb,
+                                       struct genl_info *info)
+{
+       struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1];
+       struct devlink *devlink = info->user_ptr[0];
+       struct nlattr *attrs, *selftests;
+       struct sk_buff *msg;
+       void *hdr;
+       int err;
+       int i;
+
+       if (!devlink->ops->selftest_run || !devlink->ops->selftest_check)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[DEVLINK_ATTR_SELFTESTS]) {
+               NL_SET_ERR_MSG_MOD(info->extack, "selftest required");
+               return -EINVAL;
+       }
+
+       attrs = info->attrs[DEVLINK_ATTR_SELFTESTS];
+
+       err = nla_parse_nested(tb, DEVLINK_ATTR_SELFTEST_ID_MAX, attrs,
+                              devlink_selftest_nl_policy, info->extack);
+       if (err < 0)
+               return err;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       err = -EMSGSIZE;
+       hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
+                         &devlink_nl_family, 0, DEVLINK_CMD_SELFTESTS_RUN);
+       if (!hdr)
+               goto free_msg;
+
+       if (devlink_nl_put_handle(msg, devlink))
+               goto genlmsg_cancel;
+
+       selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
+       if (!selftests)
+               goto genlmsg_cancel;
+
+       for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
+            i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
+               enum devlink_selftest_status test_status;
+
+               if (nla_get_flag(tb[i])) {
+                       if (!devlink->ops->selftest_check(devlink, i,
+                                                         info->extack)) {
+                               if (devlink_selftest_result_put(msg, i,
+                                                               DEVLINK_SELFTEST_STATUS_SKIP))
+                                       goto selftests_nest_cancel;
+                               continue;
+                       }
+
+                       test_status = devlink->ops->selftest_run(devlink, i,
+                                                                info->extack);
+                       if (devlink_selftest_result_put(msg, i, test_status))
+                               goto selftests_nest_cancel;
+               }
+       }
+
+       nla_nest_end(msg, selftests);
+       genlmsg_end(msg, hdr);
+       return genlmsg_reply(msg, info);
+
+selftests_nest_cancel:
+       nla_nest_cancel(msg, selftests);
+genlmsg_cancel:
+       genlmsg_cancel(msg, hdr);
+free_msg:
+       nlmsg_free(msg);
+       return err;
+}
+
 static const struct devlink_param devlink_param_generic[] = {
        {
                .id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
@@ -5690,21 +5898,28 @@ static int __devlink_snapshot_id_increment(struct devlink *devlink, u32 id)
 {
        unsigned long count;
        void *p;
+       int err;
 
-       devl_assert_locked(devlink);
-
+       xa_lock(&devlink->snapshot_ids);
        p = xa_load(&devlink->snapshot_ids, id);
-       if (WARN_ON(!p))
-               return -EINVAL;
+       if (WARN_ON(!p)) {
+               err = -EINVAL;
+               goto unlock;
+       }
 
-       if (WARN_ON(!xa_is_value(p)))
-               return -EINVAL;
+       if (WARN_ON(!xa_is_value(p))) {
+               err = -EINVAL;
+               goto unlock;
+       }
 
        count = xa_to_value(p);
        count++;
 
-       return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
-                              GFP_KERNEL));
+       err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
+                               GFP_ATOMIC));
+unlock:
+       xa_unlock(&devlink->snapshot_ids);
+       return err;
 }
 
 /**
@@ -5727,25 +5942,26 @@ static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
        unsigned long count;
        void *p;
 
-       devl_assert_locked(devlink);
-
+       xa_lock(&devlink->snapshot_ids);
        p = xa_load(&devlink->snapshot_ids, id);
        if (WARN_ON(!p))
-               return;
+               goto unlock;
 
        if (WARN_ON(!xa_is_value(p)))
-               return;
+               goto unlock;
 
        count = xa_to_value(p);
 
        if (count > 1) {
                count--;
-               xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
-                        GFP_KERNEL);
+               __xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
+                          GFP_ATOMIC);
        } else {
                /* If this was the last user, we can erase this id */
-               xa_erase(&devlink->snapshot_ids, id);
+               __xa_erase(&devlink->snapshot_ids, id);
        }
+unlock:
+       xa_unlock(&devlink->snapshot_ids);
 }
 
 /**
@@ -5766,13 +5982,17 @@ static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
  */
 static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
 {
-       devl_assert_locked(devlink);
+       int err;
 
-       if (xa_load(&devlink->snapshot_ids, id))
+       xa_lock(&devlink->snapshot_ids);
+       if (xa_load(&devlink->snapshot_ids, id)) {
+               xa_unlock(&devlink->snapshot_ids);
                return -EEXIST;
-
-       return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
-                              GFP_KERNEL));
+       }
+       err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
+                               GFP_ATOMIC));
+       xa_unlock(&devlink->snapshot_ids);
+       return err;
 }
 
 /**
@@ -5793,8 +6013,6 @@ static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
  */
 static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
 {
-       devl_assert_locked(devlink);
-
        return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1),
                        xa_limit_32b, GFP_KERNEL);
 }
@@ -5807,7 +6025,7 @@ static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
  *     Multiple snapshots can be created on a region.
  *     The @snapshot_id should be obtained using the getter function.
  *
- *     Must be called only while holding the devlink instance lock.
+ *     Must be called only while holding the region snapshot lock.
  *
  *     @region: devlink region of the snapshot
  *     @data: snapshot data
@@ -5821,7 +6039,7 @@ __devlink_region_snapshot_create(struct devlink_region *region,
        struct devlink_snapshot *snapshot;
        int err;
 
-       devl_assert_locked(devlink);
+       lockdep_assert_held(&region->snapshot_lock);
 
        /* check if region can hold one more snapshot */
        if (region->cur_snapshots == region->max_snapshots)
@@ -5859,7 +6077,7 @@ static void devlink_region_snapshot_del(struct devlink_region *region,
 {
        struct devlink *devlink = region->devlink;
 
-       devl_assert_locked(devlink);
+       lockdep_assert_held(&region->snapshot_lock);
 
        devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
        region->cur_snapshots--;
@@ -6038,11 +6256,15 @@ static int devlink_nl_cmd_region_del(struct sk_buff *skb,
        if (!region)
                return -EINVAL;
 
+       mutex_lock(&region->snapshot_lock);
        snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
-       if (!snapshot)
+       if (!snapshot) {
+               mutex_unlock(&region->snapshot_lock);
                return -EINVAL;
+       }
 
        devlink_region_snapshot_del(region, snapshot);
+       mutex_unlock(&region->snapshot_lock);
        return 0;
 }
 
@@ -6090,9 +6312,12 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
                return -EOPNOTSUPP;
        }
 
+       mutex_lock(&region->snapshot_lock);
+
        if (region->cur_snapshots == region->max_snapshots) {
                NL_SET_ERR_MSG_MOD(info->extack, "The region has reached the maximum number of stored snapshots");
-               return -ENOSPC;
+               err = -ENOSPC;
+               goto unlock;
        }
 
        snapshot_id_attr = info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID];
@@ -6101,17 +6326,18 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
 
                if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
                        NL_SET_ERR_MSG_MOD(info->extack, "The requested snapshot id is already in use");
-                       return -EEXIST;
+                       err = -EEXIST;
+                       goto unlock;
                }
 
                err = __devlink_snapshot_id_insert(devlink, snapshot_id);
                if (err)
-                       return err;
+                       goto unlock;
        } else {
                err = __devlink_region_snapshot_id_get(devlink, &snapshot_id);
                if (err) {
                        NL_SET_ERR_MSG_MOD(info->extack, "Failed to allocate a new snapshot id");
-                       return err;
+                       goto unlock;
                }
        }
 
@@ -6149,16 +6375,20 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
                        goto err_notify;
        }
 
+       mutex_unlock(&region->snapshot_lock);
        return 0;
 
 err_snapshot_create:
        region->ops->destructor(data);
 err_snapshot_capture:
        __devlink_snapshot_id_decrement(devlink, snapshot_id);
+       mutex_unlock(&region->snapshot_lock);
        return err;
 
 err_notify:
        devlink_region_snapshot_del(region, snapshot);
+unlock:
+       mutex_unlock(&region->snapshot_lock);
        return err;
 }
 
@@ -7527,6 +7757,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
        enum devlink_health_reporter_state prev_health_state;
        struct devlink *devlink = reporter->devlink;
        unsigned long recover_ts_threshold;
+       int ret;
 
        /* write a log message of the current error */
        WARN_ON(!msg);
@@ -7560,11 +7791,14 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
                mutex_unlock(&reporter->dump_lock);
        }
 
-       if (reporter->auto_recover)
-               return devlink_health_reporter_recover(reporter,
-                                                      priv_ctx, NULL);
+       if (!reporter->auto_recover)
+               return 0;
 
-       return 0;
+       devl_lock(devlink);
+       ret = devlink_health_reporter_recover(reporter, priv_ctx, NULL);
+       devl_unlock(devlink);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(devlink_health_report);
 
@@ -8969,6 +9203,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
        [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
        [DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 },
        [DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING },
+       [DEVLINK_ATTR_SELFTESTS] = { .type = NLA_NESTED },
 };
 
 static const struct genl_small_ops devlink_nl_ops[] = {
@@ -9238,8 +9473,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_get_doit,
                .dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -9247,24 +9481,21 @@ static const struct genl_small_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
        },
        {
                .cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_recover_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
        },
        {
                .cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_diagnose_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
        },
        {
                .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
@@ -9278,16 +9509,14 @@ static const struct genl_small_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
        },
        {
                .cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_test_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
        },
        {
                .cmd = DEVLINK_CMD_FLASH_UPDATE,
@@ -9328,6 +9557,17 @@ static const struct genl_small_ops devlink_nl_ops[] = {
                .doit = devlink_nl_cmd_trap_policer_set_doit,
                .flags = GENL_ADMIN_PERM,
        },
+       {
+               .cmd = DEVLINK_CMD_SELFTESTS_GET,
+               .doit = devlink_nl_cmd_selftests_get_doit,
+               .dumpit = devlink_nl_cmd_selftests_get_dumpit
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_SELFTESTS_RUN,
+               .doit = devlink_nl_cmd_selftests_run,
+               .flags = GENL_ADMIN_PERM,
+       },
 };
 
 static struct genl_family devlink_nl_family __ro_after_init = {
@@ -11084,6 +11324,7 @@ struct devlink_region *devl_region_create(struct devlink *devlink,
        region->ops = ops;
        region->size = region_size;
        INIT_LIST_HEAD(&region->snapshot_list);
+       mutex_init(&region->snapshot_lock);
        list_add_tail(&region->list, &devlink->region_list);
        devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
 
@@ -11157,6 +11398,7 @@ devlink_port_region_create(struct devlink_port *port,
        region->port_ops = ops;
        region->size = region_size;
        INIT_LIST_HEAD(&region->snapshot_list);
+       mutex_init(&region->snapshot_lock);
        list_add_tail(&region->list, &port->region_list);
        devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
 
@@ -11186,6 +11428,7 @@ void devl_region_destroy(struct devlink_region *region)
                devlink_region_snapshot_del(region, snapshot);
 
        list_del(&region->list);
+       mutex_destroy(&region->snapshot_lock);
 
        devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
        kfree(region);
@@ -11226,13 +11469,7 @@ EXPORT_SYMBOL_GPL(devlink_region_destroy);
  */
 int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
 {
-       int err;
-
-       devl_lock(devlink);
-       err = __devlink_region_snapshot_id_get(devlink, id);
-       devl_unlock(devlink);
-
-       return err;
+       return __devlink_region_snapshot_id_get(devlink, id);
 }
 EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
 
@@ -11248,9 +11485,7 @@ EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
  */
 void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id)
 {
-       devl_lock(devlink);
        __devlink_snapshot_id_decrement(devlink, id);
-       devl_unlock(devlink);
 }
 EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
 
@@ -11269,13 +11504,11 @@ EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
 int devlink_region_snapshot_create(struct devlink_region *region,
                                   u8 *data, u32 snapshot_id)
 {
-       struct devlink *devlink = region->devlink;
        int err;
 
-       devl_lock(devlink);
+       mutex_lock(&region->snapshot_lock);
        err = __devlink_region_snapshot_create(region, data, snapshot_id);
-       devl_unlock(devlink);
-
+       mutex_unlock(&region->snapshot_lock);
        return err;
 }
 EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
index 57c5e4c..5669248 100644 (file)
@@ -3918,7 +3918,7 @@ static void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
                offset -= frag_size;
        }
 out:
-       return offset + len < size ? addr + offset : NULL;
+       return offset + len <= size ? addr + offset : NULL;
 }
 
 BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset,
@@ -4653,6 +4653,7 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
        } else {
                info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
                info->key.u.ipv4.src = cpu_to_be32(from->local_ipv4);
+               info->key.flow_flags = FLOWI_FLAG_ANYSRC;
        }
 
        return 0;
index aa4f43f..6582dfd 100644 (file)
@@ -484,8 +484,8 @@ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gf
        sk->sk_family      = PF_DECnet;
        sk->sk_protocol    = 0;
        sk->sk_allocation  = gfp;
-       sk->sk_sndbuf      = sysctl_decnet_wmem[1];
-       sk->sk_rcvbuf      = sysctl_decnet_rmem[1];
+       sk->sk_sndbuf      = READ_ONCE(sysctl_decnet_wmem[1]);
+       sk->sk_rcvbuf      = READ_ONCE(sysctl_decnet_rmem[1]);
 
        /* Initialization of DECnet Session Control Port                */
        scp = DN_SK(sk);
index 552a53f..ac2ee16 100644 (file)
@@ -201,7 +201,7 @@ static void dn_dst_check_expire(struct timer_list *unused)
                }
                spin_unlock(&dn_rt_hash_table[i].lock);
 
-               if ((jiffies - now) > 0)
+               if (jiffies != now)
                        break;
        }
 
index 2b56218..4dfd68c 100644 (file)
@@ -344,6 +344,7 @@ static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
 
        ether_addr_copy(a->addr, addr);
        a->vid = vid;
+       a->db = db;
        refcount_set(&a->refcount, 1);
        list_add_tail(&a->list, &lag->fdbs);
 
index 46e8a51..452ff17 100644 (file)
@@ -1042,6 +1042,7 @@ fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri)
 
 void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
 {
+       u8 fib_notify_on_flag_change;
        struct fib_alias *fa_match;
        struct sk_buff *skb;
        int err;
@@ -1063,14 +1064,16 @@ void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
        WRITE_ONCE(fa_match->offload, fri->offload);
        WRITE_ONCE(fa_match->trap, fri->trap);
 
+       fib_notify_on_flag_change = READ_ONCE(net->ipv4.sysctl_fib_notify_on_flag_change);
+
        /* 2 means send notifications only if offload_failed was changed. */
-       if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&
+       if (fib_notify_on_flag_change == 2 &&
            READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
                goto out;
 
        WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
 
-       if (!net->ipv4.sysctl_fib_notify_on_flag_change)
+       if (!fib_notify_on_flag_change)
                goto out;
 
        skb = nlmsg_new(fib_nlmsg_size(fa_match->fa_info), GFP_ATOMIC);
index dc7cc3c..970e9a2 100644 (file)
@@ -454,8 +454,8 @@ void tcp_init_sock(struct sock *sk)
 
        icsk->icsk_sync_mss = tcp_sync_mss;
 
-       WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
-       WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
+       WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]));
+       WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]));
 
        sk_sockets_allocated_inc(sk);
 }
@@ -688,7 +688,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
                                int size_goal)
 {
        return skb->len < size_goal &&
-              sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
+              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
               !tcp_rtx_queue_empty(sk) &&
               refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&
               tcp_skb_can_collapse_to(skb);
@@ -1842,7 +1842,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
        if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
                cap = sk->sk_rcvbuf >> 1;
        else
-               cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
+               cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
        val = min(val, cap);
        WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
 
@@ -4573,9 +4573,18 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
                return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
        }
 
-       /* check the signature */
-       genhash = tp->af_specific->calc_md5_hash(newhash, hash_expected,
-                                                NULL, skb);
+       /* Check the signature.
+        * To support dual stack listeners, we need to handle
+        * IPv4-mapped case.
+        */
+       if (family == AF_INET)
+               genhash = tcp_v4_md5_hash_skb(newhash,
+                                             hash_expected,
+                                             NULL, skb);
+       else
+               genhash = tp->af_specific->calc_md5_hash(newhash,
+                                                        hash_expected,
+                                                        NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
index ae73b34..ab5f0ea 100644 (file)
@@ -426,7 +426,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
 
        if (sk->sk_sndbuf < sndmem)
                WRITE_ONCE(sk->sk_sndbuf,
-                          min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
+                          min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2])));
 }
 
 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -461,7 +461,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb,
        struct tcp_sock *tp = tcp_sk(sk);
        /* Optimize this! */
        int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;
-       int window = tcp_win_from_space(sk, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
+       int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;
 
        while (tp->rcv_ssthresh <= window) {
                if (truesize <= skb->len)
@@ -534,7 +534,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
  */
 static void tcp_init_buffer_space(struct sock *sk)
 {
-       int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
+       int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
        struct tcp_sock *tp = tcp_sk(sk);
        int maxwin;
 
@@ -574,16 +574,17 @@ static void tcp_clamp_window(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct net *net = sock_net(sk);
+       int rmem2;
 
        icsk->icsk_ack.quick = 0;
+       rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
 
-       if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] &&
+       if (sk->sk_rcvbuf < rmem2 &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
            !tcp_under_memory_pressure(sk) &&
            sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
                WRITE_ONCE(sk->sk_rcvbuf,
-                          min(atomic_read(&sk->sk_rmem_alloc),
-                              net->ipv4.sysctl_tcp_rmem[2]));
+                          min(atomic_read(&sk->sk_rmem_alloc), rmem2));
        }
        if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
                tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
@@ -724,7 +725,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
         * <prev RTT . ><current RTT .. ><next RTT .... >
         */
 
-       if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
                int rcvmem, rcvbuf;
                u64 rcvwin, grow;
@@ -745,7 +746,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
 
                do_div(rcvwin, tp->advmss);
                rcvbuf = min_t(u64, rcvwin * rcvmem,
-                              sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
                if (rcvbuf > sk->sk_rcvbuf) {
                        WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
 
@@ -909,9 +910,9 @@ static void tcp_update_pacing_rate(struct sock *sk)
         *       end of slow start and should slow down.
         */
        if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
-               rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;
+               rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio);
        else
-               rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;
+               rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio);
 
        rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
 
@@ -2174,7 +2175,7 @@ void tcp_enter_loss(struct sock *sk)
         * loss recovery is underway except recurring timeout(s) on
         * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
         */
-       tp->frto = net->ipv4.sysctl_tcp_frto &&
+       tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
                   (new_recovery || icsk->icsk_retransmits) &&
                   !inet_csk(sk)->icsk_mtup.probe_size;
 }
@@ -3057,7 +3058,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
 
 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
 {
-       u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
+       u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
        struct tcp_sock *tp = tcp_sk(sk);
 
        if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
@@ -3580,7 +3581,8 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
        if (*last_oow_ack_time) {
                s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
 
-               if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) {
+               if (0 <= elapsed &&
+                   elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
                        NET_INC_STATS(net, mib_idx);
                        return true;    /* rate-limited: don't send yet! */
                }
@@ -3628,7 +3630,7 @@ static void tcp_send_challenge_ack(struct sock *sk)
        /* Then check host-wide RFC 5961 rate limit. */
        now = jiffies / HZ;
        if (now != challenge_timestamp) {
-               u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit;
+               u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
                u32 half = (ack_limit + 1) >> 1;
 
                challenge_timestamp = now;
@@ -4425,7 +4427,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
+       if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
                int mib_idx;
 
                if (before(seq, tp->rcv_nxt))
@@ -4472,7 +4474,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
                NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
                tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
 
-               if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
+               if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
                        u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
                        tcp_rcv_spurious_retrans(sk, skb);
@@ -5516,7 +5518,7 @@ send_now:
        }
 
        if (!tcp_is_sack(tp) ||
-           tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
+           tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr))
                goto send_now;
 
        if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
@@ -5537,11 +5539,12 @@ send_now:
        if (tp->srtt_us && tp->srtt_us < rtt)
                rtt = tp->srtt_us;
 
-       delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
+       delay = min_t(unsigned long,
+                     READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns),
                      rtt * (NSEC_PER_USEC >> 3)/20);
        sock_hold(sk);
        hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay),
-                              sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns,
+                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns),
                               HRTIMER_MODE_REL_PINNED_SOFT);
 }
 
index c7e7101..0c83780 100644 (file)
@@ -1008,7 +1008,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
-               tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+               tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
                                (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
                                (inet_sk(sk)->tos & INET_ECN_MASK) :
                                inet_sk(sk)->tos;
@@ -1528,7 +1528,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        /* Set ToS of the new socket based upon the value of incoming SYN.
         * ECT bits are set later in tcp_init_transfer().
         */
-       if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
                newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
 
        if (!dst) {
index a501150..d58e672 100644 (file)
@@ -329,7 +329,7 @@ void tcp_update_metrics(struct sock *sk)
        int m;
 
        sk_dst_confirm(sk);
-       if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
+       if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
                return;
 
        rcu_read_lock();
@@ -385,7 +385,7 @@ void tcp_update_metrics(struct sock *sk)
 
        if (tcp_in_initial_slowstart(tp)) {
                /* Slow start still did not finish. */
-               if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+               if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
                    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
                        val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
                        if (val && (tcp_snd_cwnd(tp) >> 1) > val)
@@ -401,7 +401,7 @@ void tcp_update_metrics(struct sock *sk)
        } else if (!tcp_in_slow_start(tp) &&
                   icsk->icsk_ca_state == TCP_CA_Open) {
                /* Cong. avoidance phase, cwnd is reliable. */
-               if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+               if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
                    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
                        tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
                                       max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
@@ -418,7 +418,7 @@ void tcp_update_metrics(struct sock *sk)
                        tcp_metric_set(tm, TCP_METRIC_CWND,
                                       (val + tp->snd_ssthresh) >> 1);
                }
-               if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+               if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
                    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
                        val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
                        if (val && tp->snd_ssthresh > val)
@@ -463,7 +463,7 @@ void tcp_init_metrics(struct sock *sk)
        if (tcp_metric_locked(tm, TCP_METRIC_CWND))
                tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
 
-       val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ?
+       val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
              0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
        if (val) {
                tp->snd_ssthresh = val;
index 2b72ccd..78b654f 100644 (file)
@@ -167,16 +167,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
        if (tcp_packets_in_flight(tp) == 0)
                tcp_ca_event(sk, CA_EVENT_TX_START);
 
-       /* If this is the first data packet sent in response to the
-        * previous received data,
-        * and it is a reply for ato after last received packet,
-        * increase pingpong count.
-        */
-       if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
-           (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
-               inet_csk_inc_pingpong_cnt(sk);
-
        tp->lsndtime = now;
+
+       /* If it is a reply for ato after last received
+        * packet, enter pingpong mode.
+        */
+       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+               inet_csk_enter_pingpong_mode(sk);
 }
 
 /* Account for an ACK we sent. */
@@ -230,7 +227,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
         * which we interpret as a sign the remote TCP is not
         * misinterpreting the window field as a signed quantity.
         */
-       if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
                (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
        else
                (*rcv_wnd) = min_t(u32, space, U16_MAX);
@@ -241,7 +238,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
        *rcv_wscale = 0;
        if (wscale_ok) {
                /* Set window scaling on max possible window */
-               space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+               space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
                space = max_t(u32, space, sysctl_rmem_max);
                space = min_t(u32, space, *window_clamp);
                *rcv_wscale = clamp_t(int, ilog2(space) - 15,
@@ -285,7 +282,7 @@ static u16 tcp_select_window(struct sock *sk)
         * scaled window.
         */
        if (!tp->rx_opt.rcv_wscale &&
-           sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
+           READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
                new_win = min(new_win, MAX_TCP_WINDOW);
        else
                new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
@@ -1976,7 +1973,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
 
        bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift);
 
-       r = tcp_min_rtt(tcp_sk(sk)) >> sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log;
+       r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
        if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
                bytes += sk->sk_gso_max_size >> r;
 
@@ -1995,7 +1992,7 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
 
        min_tso = ca_ops->min_tso_segs ?
                        ca_ops->min_tso_segs(sk) :
-                       sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
+                       READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
 
        tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
        return min_t(u32, tso_segs, sk->sk_gso_max_segs);
@@ -2507,7 +2504,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
                      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
        if (sk->sk_pacing_status == SK_PACING_NONE)
                limit = min_t(unsigned long, limit,
-                             sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
+                             READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
        limit <<= factor;
 
        if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
index 7f695c3..87c699d 100644 (file)
@@ -1522,7 +1522,6 @@ static void mld_query_work(struct work_struct *work)
 
                if (++cnt >= MLD_MAX_QUEUE) {
                        rework = true;
-                       schedule_delayed_work(&idev->mc_query_work, 0);
                        break;
                }
        }
@@ -1533,8 +1532,10 @@ static void mld_query_work(struct work_struct *work)
                __mld_query_work(skb);
        mutex_unlock(&idev->mc_lock);
 
-       if (!rework)
-               in6_dev_put(idev);
+       if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
+               return;
+
+       in6_dev_put(idev);
 }
 
 /* called with rcu_read_lock() */
@@ -1624,7 +1625,6 @@ static void mld_report_work(struct work_struct *work)
 
                if (++cnt >= MLD_MAX_QUEUE) {
                        rework = true;
-                       schedule_delayed_work(&idev->mc_report_work, 0);
                        break;
                }
        }
@@ -1635,8 +1635,10 @@ static void mld_report_work(struct work_struct *work)
                __mld_report_work(skb);
        mutex_unlock(&idev->mc_lock);
 
-       if (!rework)
-               in6_dev_put(idev);
+       if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
+               return;
+
+       in6_dev_put(idev);
 }
 
 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
index b1179f6..91b8405 100644 (file)
 #include <linux/proc_fs.h>
 #include <net/ping.h>
 
+static void ping_v6_destroy(struct sock *sk)
+{
+       inet6_destroy_sock(sk);
+}
+
 /* Compatibility glue so we can support IPv6 when it's compiled as a module */
 static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
                                 int *addr_len)
@@ -185,6 +190,7 @@ struct proto pingv6_prot = {
        .owner =        THIS_MODULE,
        .init =         ping_init_sock,
        .close =        ping_close,
+       .destroy =      ping_v6_destroy,
        .connect =      ip6_datagram_connect_v6_only,
        .disconnect =   __udp_disconnect,
        .setsockopt =   ipv6_setsockopt,
index e756ba7..34db881 100644 (file)
@@ -36,9 +36,11 @@ static size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
        case SEG6_IPTUN_MODE_INLINE:
                break;
        case SEG6_IPTUN_MODE_ENCAP:
+       case SEG6_IPTUN_MODE_ENCAP_RED:
                head = sizeof(struct ipv6hdr);
                break;
        case SEG6_IPTUN_MODE_L2ENCAP:
+       case SEG6_IPTUN_MODE_L2ENCAP_RED:
                return 0;
        }
 
@@ -197,6 +199,124 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
 }
 EXPORT_SYMBOL_GPL(seg6_do_srh_encap);
 
+/* encapsulate an IPv6 packet within an outer IPv6 header with reduced SRH */
+static int seg6_do_srh_encap_red(struct sk_buff *skb,
+                                struct ipv6_sr_hdr *osrh, int proto)
+{
+       __u8 first_seg = osrh->first_segment;
+       struct dst_entry *dst = skb_dst(skb);
+       struct net *net = dev_net(dst->dev);
+       struct ipv6hdr *hdr, *inner_hdr;
+       int hdrlen = ipv6_optlen(osrh);
+       int red_tlv_offset, tlv_offset;
+       struct ipv6_sr_hdr *isrh;
+       bool skip_srh = false;
+       __be32 flowlabel;
+       int tot_len, err;
+       int red_hdrlen;
+       int tlvs_len;
+
+       if (first_seg > 0) {
+               red_hdrlen = hdrlen - sizeof(struct in6_addr);
+       } else {
+               /* NOTE: if tag/flags and/or other TLVs are introduced in the
+                * seg6_iptunnel infrastructure, they should be considered when
+                * deciding to skip the SRH.
+                */
+               skip_srh = !sr_has_hmac(osrh);
+
+               red_hdrlen = skip_srh ? 0 : hdrlen;
+       }
+
+       tot_len = red_hdrlen + sizeof(struct ipv6hdr);
+
+       err = skb_cow_head(skb, tot_len + skb->mac_len);
+       if (unlikely(err))
+               return err;
+
+       inner_hdr = ipv6_hdr(skb);
+       flowlabel = seg6_make_flowlabel(net, skb, inner_hdr);
+
+       skb_push(skb, tot_len);
+       skb_reset_network_header(skb);
+       skb_mac_header_rebuild(skb);
+       hdr = ipv6_hdr(skb);
+
+       /* based on seg6_do_srh_encap() */
+       if (skb->protocol == htons(ETH_P_IPV6)) {
+               ip6_flow_hdr(hdr, ip6_tclass(ip6_flowinfo(inner_hdr)),
+                            flowlabel);
+               hdr->hop_limit = inner_hdr->hop_limit;
+       } else {
+               ip6_flow_hdr(hdr, 0, flowlabel);
+               hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
+
+               memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+               IP6CB(skb)->iif = skb->skb_iif;
+       }
+
+       /* no matter if we have to skip the SRH or not, the first segment
+        * always comes in the pushed IPv6 header.
+        */
+       hdr->daddr = osrh->segments[first_seg];
+
+       if (skip_srh) {
+               hdr->nexthdr = proto;
+
+               set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
+               goto out;
+       }
+
+       /* we cannot skip the SRH, slow path */
+
+       hdr->nexthdr = NEXTHDR_ROUTING;
+       isrh = (void *)hdr + sizeof(struct ipv6hdr);
+
+       if (unlikely(!first_seg)) {
+               /* this is a very rare case; we have only one SID but
+                * we cannot skip the SRH since we are carrying some
+                * other info.
+                */
+               memcpy(isrh, osrh, hdrlen);
+               goto srcaddr;
+       }
+
+       tlv_offset = sizeof(*osrh) + (first_seg + 1) * sizeof(struct in6_addr);
+       red_tlv_offset = tlv_offset - sizeof(struct in6_addr);
+
+       memcpy(isrh, osrh, red_tlv_offset);
+
+       tlvs_len = hdrlen - tlv_offset;
+       if (unlikely(tlvs_len > 0)) {
+               const void *s = (const void *)osrh + tlv_offset;
+               void *d = (void *)isrh + red_tlv_offset;
+
+               memcpy(d, s, tlvs_len);
+       }
+
+       --isrh->first_segment;
+       isrh->hdrlen -= 2;
+
+srcaddr:
+       isrh->nexthdr = proto;
+       set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
+
+#ifdef CONFIG_IPV6_SEG6_HMAC
+       if (unlikely(!skip_srh && sr_has_hmac(isrh))) {
+               err = seg6_push_hmac(net, &hdr->saddr, isrh);
+               if (unlikely(err))
+                       return err;
+       }
+#endif
+
+out:
+       hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+
+       skb_postpush_rcsum(skb, hdr, tot_len);
+
+       return 0;
+}
+
 /* insert an SRH within an IPv6 packet, just after the IPv6 header */
 int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
 {
@@ -269,6 +389,7 @@ static int seg6_do_srh(struct sk_buff *skb)
                        return err;
                break;
        case SEG6_IPTUN_MODE_ENCAP:
+       case SEG6_IPTUN_MODE_ENCAP_RED:
                err = iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6);
                if (err)
                        return err;
@@ -280,7 +401,11 @@ static int seg6_do_srh(struct sk_buff *skb)
                else
                        return -EINVAL;
 
-               err = seg6_do_srh_encap(skb, tinfo->srh, proto);
+               if (tinfo->mode == SEG6_IPTUN_MODE_ENCAP)
+                       err = seg6_do_srh_encap(skb, tinfo->srh, proto);
+               else
+                       err = seg6_do_srh_encap_red(skb, tinfo->srh, proto);
+
                if (err)
                        return err;
 
@@ -289,6 +414,7 @@ static int seg6_do_srh(struct sk_buff *skb)
                skb->protocol = htons(ETH_P_IPV6);
                break;
        case SEG6_IPTUN_MODE_L2ENCAP:
+       case SEG6_IPTUN_MODE_L2ENCAP_RED:
                if (!skb_mac_header_was_set(skb))
                        return -EINVAL;
 
@@ -298,7 +424,13 @@ static int seg6_do_srh(struct sk_buff *skb)
                skb_mac_header_rebuild(skb);
                skb_push(skb, skb->mac_len);
 
-               err = seg6_do_srh_encap(skb, tinfo->srh, IPPROTO_ETHERNET);
+               if (tinfo->mode == SEG6_IPTUN_MODE_L2ENCAP)
+                       err = seg6_do_srh_encap(skb, tinfo->srh,
+                                               IPPROTO_ETHERNET);
+               else
+                       err = seg6_do_srh_encap_red(skb, tinfo->srh,
+                                                   IPPROTO_ETHERNET);
+
                if (err)
                        return err;
 
@@ -517,6 +649,10 @@ static int seg6_build_state(struct net *net, struct nlattr *nla,
                break;
        case SEG6_IPTUN_MODE_L2ENCAP:
                break;
+       case SEG6_IPTUN_MODE_ENCAP_RED:
+               break;
+       case SEG6_IPTUN_MODE_L2ENCAP_RED:
+               break;
        default:
                return -EINVAL;
        }
index 85b8b76..e54eee8 100644 (file)
@@ -546,7 +546,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                if (np->repflow && ireq->pktopts)
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
-               tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+               tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
                                (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
                                (np->tclass & INET_ECN_MASK) :
                                np->tclass;
@@ -1317,7 +1317,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
        /* Set ToS of the new socket based upon the value of incoming SYN.
         * ECT bits are set later in tcp_init_transfer().
         */
-       if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
                newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
 
        /* Clone native IPv6 options from listening socket (if any)
index bd8f0f4..30d2890 100644 (file)
@@ -1271,7 +1271,7 @@ raise_win:
                if (unlikely(th->syn))
                        new_win = min(new_win, 65535U) << tp->rx_opt.rcv_wscale;
                if (!tp->rx_opt.rcv_wscale &&
-                   sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows)
+                   READ_ONCE(sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows))
                        new_win = min(new_win, MAX_TCP_WINDOW);
                else
                        new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
index 57f23f4..a3f1c14 100644 (file)
@@ -1873,7 +1873,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
        if (msk->rcvq_space.copied <= msk->rcvq_space.space)
                goto new_measure;
 
-       if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
                int rcvmem, rcvbuf;
                u64 rcvwin, grow;
@@ -1891,7 +1891,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
 
                do_div(rcvwin, advmss);
                rcvbuf = min_t(u64, rcvwin * rcvmem,
-                              sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
 
                if (rcvbuf > sk->sk_rcvbuf) {
                        u32 window_clamp;
@@ -2634,8 +2634,8 @@ static int mptcp_init_sock(struct sock *sk)
        mptcp_ca_reset(sk);
 
        sk_sockets_allocated_inc(sk);
-       sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
-       sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
+       sk->sk_rcvbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
+       sk->sk_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
 
        return 0;
 }
index d4b16d0..901c763 100644 (file)
@@ -1533,7 +1533,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
        mptcp_sock_graft(ssk, sk->sk_socket);
        iput(SOCK_INODE(sf));
        WRITE_ONCE(msk->allow_infinite_fallback, false);
-       return err;
+       return 0;
 
 failed_unlink:
        list_del(&subflow->node);
index 646d5fd..9f976b1 100644 (file)
@@ -3340,6 +3340,8 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
                        if (err < 0)
                                return err;
                }
+
+               cond_resched();
        }
 
        return 0;
@@ -9367,9 +9369,13 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                                break;
                        }
                }
+
+               cond_resched();
        }
 
        list_for_each_entry(set, &ctx->table->sets, list) {
+               cond_resched();
+
                if (!nft_is_active_next(ctx->net, set))
                        continue;
                if (!(set->flags & NFT_SET_MAP) ||
index a364f8e..87a9009 100644 (file)
@@ -843,11 +843,16 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 }
 
 static int
-nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
+nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
 {
        struct sk_buff *nskb;
 
        if (diff < 0) {
+               unsigned int min_len = skb_transport_offset(e->skb);
+
+               if (data_len < min_len)
+                       return -EINVAL;
+
                if (pskb_trim(e->skb, data_len))
                        return -ENOMEM;
        } else if (diff > 0) {
index 15e4b76..da29e92 100644 (file)
@@ -68,6 +68,31 @@ static void nft_queue_sreg_eval(const struct nft_expr *expr,
        regs->verdict.code = ret;
 }
 
+static int nft_queue_validate(const struct nft_ctx *ctx,
+                             const struct nft_expr *expr,
+                             const struct nft_data **data)
+{
+       static const unsigned int supported_hooks = ((1 << NF_INET_PRE_ROUTING) |
+                                                    (1 << NF_INET_LOCAL_IN) |
+                                                    (1 << NF_INET_FORWARD) |
+                                                    (1 << NF_INET_LOCAL_OUT) |
+                                                    (1 << NF_INET_POST_ROUTING));
+
+       switch (ctx->family) {
+       case NFPROTO_IPV4:
+       case NFPROTO_IPV6:
+       case NFPROTO_INET:
+       case NFPROTO_BRIDGE:
+               break;
+       case NFPROTO_NETDEV: /* lacks okfn */
+               fallthrough;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return nft_chain_validate_hooks(ctx->chain, supported_hooks);
+}
+
 static const struct nla_policy nft_queue_policy[NFTA_QUEUE_MAX + 1] = {
        [NFTA_QUEUE_NUM]        = { .type = NLA_U16 },
        [NFTA_QUEUE_TOTAL]      = { .type = NLA_U16 },
@@ -164,6 +189,7 @@ static const struct nft_expr_ops nft_queue_ops = {
        .eval           = nft_queue_eval,
        .init           = nft_queue_init,
        .dump           = nft_queue_dump,
+       .validate       = nft_queue_validate,
        .reduce         = NFT_REDUCE_READONLY,
 };
 
@@ -173,6 +199,7 @@ static const struct nft_expr_ops nft_queue_sreg_ops = {
        .eval           = nft_queue_sreg_eval,
        .init           = nft_queue_sreg_init,
        .dump           = nft_queue_sreg_dump,
+       .validate       = nft_queue_validate,
        .reduce         = NFT_REDUCE_READONLY,
 };
 
index d08c472..5cbe071 100644 (file)
@@ -3037,8 +3037,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        if (err)
                goto out_free;
 
-       if (sock->type == SOCK_RAW &&
-           !dev_validate_header(dev, skb->data, len)) {
+       if ((sock->type == SOCK_RAW &&
+            !dev_validate_header(dev, skb->data, len)) || !skb->len) {
                err = -EINVAL;
                goto out_free;
        }
index be29da0..3460abc 100644 (file)
@@ -229,9 +229,8 @@ static struct sctp_association *sctp_association_init(
        if (!sctp_ulpq_init(&asoc->ulpq, asoc))
                goto fail_init;
 
-       if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
-                            0, gfp))
-               goto fail_init;
+       if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
+               goto stream_free;
 
        /* Initialize default path MTU. */
        asoc->pathmtu = sp->pathmtu;
index 6dc95dc..ef9fcea 100644 (file)
@@ -137,7 +137,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
 
        ret = sctp_stream_alloc_out(stream, outcnt, gfp);
        if (ret)
-               goto out_err;
+               return ret;
 
        for (i = 0; i < stream->outcnt; i++)
                SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
@@ -145,22 +145,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
 handle_in:
        sctp_stream_interleave_init(stream);
        if (!incnt)
-               goto out;
-
-       ret = sctp_stream_alloc_in(stream, incnt, gfp);
-       if (ret)
-               goto in_err;
-
-       goto out;
+               return 0;
 
-in_err:
-       sched->free(stream);
-       genradix_free(&stream->in);
-out_err:
-       genradix_free(&stream->out);
-       stream->outcnt = 0;
-out:
-       return ret;
+       return sctp_stream_alloc_in(stream, incnt, gfp);
 }
 
 int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
index 518b1b9..1ad565e 100644 (file)
@@ -160,7 +160,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc,
                if (!SCTP_SO(&asoc->stream, i)->ext)
                        continue;
 
-               ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);
+               ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
                if (ret)
                        goto err;
        }
index 43509c7..f1c3b8e 100644 (file)
@@ -517,7 +517,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
        sk->sk_shutdown = 0;
        sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
-       sk->sk_rcvbuf = sysctl_tipc_rmem[1];
+       sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
        sk->sk_destruct = tipc_sock_destruct;
index fc513c1..18c7e5c 100644 (file)
  */
 static DECLARE_RWSEM(device_offload_lock);
 
-static void tls_device_gc_task(struct work_struct *work);
+static struct workqueue_struct *destruct_wq __read_mostly;
 
-static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
-static LIST_HEAD(tls_device_gc_list);
 static LIST_HEAD(tls_device_list);
 static LIST_HEAD(tls_device_down_list);
 static DEFINE_SPINLOCK(tls_device_lock);
@@ -68,47 +66,44 @@ static void tls_device_free_ctx(struct tls_context *ctx)
        tls_ctx_free(NULL, ctx);
 }
 
-static void tls_device_gc_task(struct work_struct *work)
+static void tls_device_tx_del_task(struct work_struct *work)
 {
-       struct tls_context *ctx, *tmp;
-       unsigned long flags;
-       LIST_HEAD(gc_list);
-
-       spin_lock_irqsave(&tls_device_lock, flags);
-       list_splice_init(&tls_device_gc_list, &gc_list);
-       spin_unlock_irqrestore(&tls_device_lock, flags);
-
-       list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
-               struct net_device *netdev = ctx->netdev;
+       struct tls_offload_context_tx *offload_ctx =
+               container_of(work, struct tls_offload_context_tx, destruct_work);
+       struct tls_context *ctx = offload_ctx->ctx;
+       struct net_device *netdev = ctx->netdev;
 
-               if (netdev && ctx->tx_conf == TLS_HW) {
-                       netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
-                                                       TLS_OFFLOAD_CTX_DIR_TX);
-                       dev_put(netdev);
-                       ctx->netdev = NULL;
-               }
-
-               list_del(&ctx->list);
-               tls_device_free_ctx(ctx);
-       }
+       netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
+       dev_put(netdev);
+       ctx->netdev = NULL;
+       tls_device_free_ctx(ctx);
 }
 
 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
 {
        unsigned long flags;
+       bool async_cleanup;
 
        spin_lock_irqsave(&tls_device_lock, flags);
-       if (unlikely(!refcount_dec_and_test(&ctx->refcount)))
-               goto unlock;
+       if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
+               spin_unlock_irqrestore(&tls_device_lock, flags);
+               return;
+       }
 
-       list_move_tail(&ctx->list, &tls_device_gc_list);
+       list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
+       async_cleanup = ctx->netdev && ctx->tx_conf == TLS_HW;
+       if (async_cleanup) {
+               struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
 
-       /* schedule_work inside the spinlock
-        * to make sure tls_device_down waits for that work.
-        */
-       schedule_work(&tls_device_gc_work);
-unlock:
+               /* queue_work inside the spinlock
+                * to make sure tls_device_down waits for that work.
+                */
+               queue_work(destruct_wq, &offload_ctx->destruct_work);
+       }
        spin_unlock_irqrestore(&tls_device_lock, flags);
+
+       if (!async_cleanup)
+               tls_device_free_ctx(ctx);
 }
 
 /* We assume that the socket is already connected */
@@ -1150,6 +1145,9 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
        start_marker_record->len = 0;
        start_marker_record->num_frags = 0;
 
+       INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
+       offload_ctx->ctx = ctx;
+
        INIT_LIST_HEAD(&offload_ctx->records_list);
        list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
        spin_lock_init(&offload_ctx->lock);
@@ -1383,13 +1381,18 @@ static int tls_device_down(struct net_device *netdev)
                 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
                 * Now release the ref taken above.
                 */
-               if (refcount_dec_and_test(&ctx->refcount))
+               if (refcount_dec_and_test(&ctx->refcount)) {
+                       /* sk_destruct ran after tls_device_down took a ref, and
+                        * it returned early. Complete the destruction here.
+                        */
+                       list_del(&ctx->list);
                        tls_device_free_ctx(ctx);
+               }
        }
 
        up_write(&device_offload_lock);
 
-       flush_work(&tls_device_gc_work);
+       flush_workqueue(destruct_wq);
 
        return NOTIFY_DONE;
 }
@@ -1430,12 +1433,23 @@ static struct notifier_block tls_dev_notifier = {
 
 int __init tls_device_init(void)
 {
-       return register_netdevice_notifier(&tls_dev_notifier);
+       int err;
+
+       destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
+       if (!destruct_wq)
+               return -ENOMEM;
+
+       err = register_netdevice_notifier(&tls_dev_notifier);
+       if (err)
+               destroy_workqueue(destruct_wq);
+
+       return err;
 }
 
 void __exit tls_device_cleanup(void)
 {
        unregister_netdevice_notifier(&tls_dev_notifier);
-       flush_work(&tls_device_gc_work);
+       flush_workqueue(destruct_wq);
+       destroy_workqueue(destruct_wq);
        clean_acked_data_flush();
 }
index b945288..f0b7c91 100644 (file)
@@ -187,9 +187,10 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
                           unsigned int offset, size_t in_len)
 {
        struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
-       size_t sz, len, chunk;
        struct sk_buff *skb;
        skb_frag_t *frag;
+       size_t len, chunk;
+       int sz;
 
        if (strp->msg_ready)
                return 0;
@@ -480,7 +481,7 @@ void tls_strp_done(struct tls_strparser *strp)
 
 int __init tls_strp_dev_init(void)
 {
-       tls_strp_wq = create_singlethread_workqueue("kstrp");
+       tls_strp_wq = create_workqueue("tls-strp");
        if (unlikely(!tls_strp_wq))
                return -ENOMEM;
 
index 0fc24a5..17db8c8 100644 (file)
@@ -1283,11 +1283,14 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 
 static int
 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
-               bool released, long timeo)
+               bool released)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       long timeo;
+
+       timeo = sock_rcvtimeo(sk, nonblock);
 
        while (!tls_strp_msg_ready(ctx)) {
                if (!sk_psock_queue_empty(psock))
@@ -1308,7 +1311,7 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
                if (sock_flag(sk, SOCK_DONE))
                        return 0;
 
-               if (nonblock || !timeo)
+               if (!timeo)
                        return -EAGAIN;
 
                released = true;
@@ -1842,8 +1845,8 @@ tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
        return sk_flush_backlog(sk);
 }
 
-static long tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
-                              bool nonblock)
+static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
+                             bool nonblock)
 {
        long timeo;
        int err;
@@ -1874,7 +1877,7 @@ static long tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
 
        WRITE_ONCE(ctx->reader_present, 1);
 
-       return timeo;
+       return 0;
 
 err_unlock:
        release_sock(sk);
@@ -1913,8 +1916,7 @@ int tls_sw_recvmsg(struct sock *sk,
        struct tls_msg *tlm;
        ssize_t copied = 0;
        bool async = false;
-       int target, err = 0;
-       long timeo;
+       int target, err;
        bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
        bool is_peek = flags & MSG_PEEK;
        bool released = true;
@@ -1925,9 +1927,9 @@ int tls_sw_recvmsg(struct sock *sk,
                return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
 
        psock = sk_psock_get(sk);
-       timeo = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
-       if (timeo < 0)
-               return timeo;
+       err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
+       if (err < 0)
+               return err;
        bpf_strp_enabled = sk_psock_strp_enabled(psock);
 
        /* If crypto failed the connection is broken */
@@ -1954,8 +1956,8 @@ int tls_sw_recvmsg(struct sock *sk,
                struct tls_decrypt_arg darg;
                int to_decrypt, chunk;
 
-               err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, released,
-                                     timeo);
+               err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
+                                     released);
                if (err <= 0) {
                        if (psock) {
                                chunk = sk_msg_recvmsg(sk, psock, msg, len,
@@ -2024,7 +2026,7 @@ put_on_rx_list_err:
                        bool partially_consumed = chunk > len;
                        struct sk_buff *skb = darg.skb;
 
-                       DEBUG_NET_WARN_ON_ONCE(darg.skb == tls_strp_msg(ctx));
+                       DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
 
                        if (async) {
                                /* TLS 1.2-only, to_decrypt must be text len */
@@ -2131,13 +2133,12 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
        struct tls_msg *tlm;
        struct sk_buff *skb;
        ssize_t copied = 0;
-       int err = 0;
-       long timeo;
        int chunk;
+       int err;
 
-       timeo = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
-       if (timeo < 0)
-               return timeo;
+       err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
+       if (err < 0)
+               return err;
 
        if (!skb_queue_empty(&ctx->rx_list)) {
                skb = __skb_dequeue(&ctx->rx_list);
@@ -2145,7 +2146,7 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
                struct tls_decrypt_arg darg;
 
                err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
-                                     true, timeo);
+                                     true);
                if (err <= 0)
                        goto splice_read_end;
 
index 99a128a..4ce5d25 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/of_gpio.h>
 #include <linux/of_device.h>
 #include <linux/clk.h>
-#include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/spinlock.h>
@@ -55,40 +54,8 @@ struct rk_i2s_dev {
        const struct rk_i2s_pins *pins;
        unsigned int bclk_ratio;
        spinlock_t lock; /* tx/rx lock */
-       struct pinctrl *pinctrl;
-       struct pinctrl_state *bclk_on;
-       struct pinctrl_state *bclk_off;
 };
 
-static int i2s_pinctrl_select_bclk_on(struct rk_i2s_dev *i2s)
-{
-       int ret = 0;
-
-       if (!IS_ERR(i2s->pinctrl) && !IS_ERR_OR_NULL(i2s->bclk_on))
-               ret = pinctrl_select_state(i2s->pinctrl,
-                                    i2s->bclk_on);
-
-       if (ret)
-               dev_err(i2s->dev, "bclk enable failed %d\n", ret);
-
-       return ret;
-}
-
-static int i2s_pinctrl_select_bclk_off(struct rk_i2s_dev *i2s)
-{
-
-       int ret = 0;
-
-       if (!IS_ERR(i2s->pinctrl) && !IS_ERR_OR_NULL(i2s->bclk_off))
-               ret = pinctrl_select_state(i2s->pinctrl,
-                                    i2s->bclk_off);
-
-       if (ret)
-               dev_err(i2s->dev, "bclk disable failed %d\n", ret);
-
-       return ret;
-}
-
 static int i2s_runtime_suspend(struct device *dev)
 {
        struct rk_i2s_dev *i2s = dev_get_drvdata(dev);
@@ -125,49 +92,38 @@ static inline struct rk_i2s_dev *to_info(struct snd_soc_dai *dai)
        return snd_soc_dai_get_drvdata(dai);
 }
 
-static int rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
+static void rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
 {
        unsigned int val = 0;
        int retry = 10;
-       int ret = 0;
 
        spin_lock(&i2s->lock);
        if (on) {
-               ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
-                               I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_ENABLE);
-               if (ret < 0)
-                       goto end;
+               regmap_update_bits(i2s->regmap, I2S_DMACR,
+                                  I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_ENABLE);
 
-               ret = regmap_update_bits(i2s->regmap, I2S_XFER,
-                               I2S_XFER_TXS_START | I2S_XFER_RXS_START,
-                               I2S_XFER_TXS_START | I2S_XFER_RXS_START);
-               if (ret < 0)
-                       goto end;
+               regmap_update_bits(i2s->regmap, I2S_XFER,
+                                  I2S_XFER_TXS_START | I2S_XFER_RXS_START,
+                                  I2S_XFER_TXS_START | I2S_XFER_RXS_START);
 
                i2s->tx_start = true;
        } else {
                i2s->tx_start = false;
 
-               ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
-                               I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_DISABLE);
-               if (ret < 0)
-                       goto end;
+               regmap_update_bits(i2s->regmap, I2S_DMACR,
+                                  I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_DISABLE);
 
                if (!i2s->rx_start) {
-                       ret = regmap_update_bits(i2s->regmap, I2S_XFER,
-                                       I2S_XFER_TXS_START |
-                                       I2S_XFER_RXS_START,
-                                       I2S_XFER_TXS_STOP |
-                                       I2S_XFER_RXS_STOP);
-                       if (ret < 0)
-                               goto end;
+                       regmap_update_bits(i2s->regmap, I2S_XFER,
+                                          I2S_XFER_TXS_START |
+                                          I2S_XFER_RXS_START,
+                                          I2S_XFER_TXS_STOP |
+                                          I2S_XFER_RXS_STOP);
 
                        udelay(150);
-                       ret = regmap_update_bits(i2s->regmap, I2S_CLR,
-                                       I2S_CLR_TXC | I2S_CLR_RXC,
-                                       I2S_CLR_TXC | I2S_CLR_RXC);
-                       if (ret < 0)
-                               goto end;
+                       regmap_update_bits(i2s->regmap, I2S_CLR,
+                                          I2S_CLR_TXC | I2S_CLR_RXC,
+                                          I2S_CLR_TXC | I2S_CLR_RXC);
 
                        regmap_read(i2s->regmap, I2S_CLR, &val);
 
@@ -182,57 +138,44 @@ static int rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
                        }
                }
        }
-end:
        spin_unlock(&i2s->lock);
-       if (ret < 0)
-               dev_err(i2s->dev, "lrclk update failed\n");
-
-       return ret;
 }
 
-static int rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
+static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
 {
        unsigned int val = 0;
        int retry = 10;
-       int ret = 0;
 
        spin_lock(&i2s->lock);
        if (on) {
-               ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
+               regmap_update_bits(i2s->regmap, I2S_DMACR,
                                   I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_ENABLE);
-               if (ret < 0)
-                       goto end;
 
-               ret = regmap_update_bits(i2s->regmap, I2S_XFER,
+               regmap_update_bits(i2s->regmap, I2S_XFER,
                                   I2S_XFER_TXS_START | I2S_XFER_RXS_START,
                                   I2S_XFER_TXS_START | I2S_XFER_RXS_START);
-               if (ret < 0)
-                       goto end;
 
                i2s->rx_start = true;
        } else {
                i2s->rx_start = false;
 
-               ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
+               regmap_update_bits(i2s->regmap, I2S_DMACR,
                                   I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_DISABLE);
-               if (ret < 0)
-                       goto end;
 
                if (!i2s->tx_start) {
-                       ret = regmap_update_bits(i2s->regmap, I2S_XFER,
+                       regmap_update_bits(i2s->regmap, I2S_XFER,
                                           I2S_XFER_TXS_START |
                                           I2S_XFER_RXS_START,
                                           I2S_XFER_TXS_STOP |
                                           I2S_XFER_RXS_STOP);
-                       if (ret < 0)
-                               goto end;
+
                        udelay(150);
-                       ret = regmap_update_bits(i2s->regmap, I2S_CLR,
+                       regmap_update_bits(i2s->regmap, I2S_CLR,
                                           I2S_CLR_TXC | I2S_CLR_RXC,
                                           I2S_CLR_TXC | I2S_CLR_RXC);
-                       if (ret < 0)
-                               goto end;
+
                        regmap_read(i2s->regmap, I2S_CLR, &val);
+
                        /* Should wait for clear operation to finish */
                        while (val) {
                                regmap_read(i2s->regmap, I2S_CLR, &val);
@@ -244,12 +187,7 @@ static int rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
                        }
                }
        }
-end:
        spin_unlock(&i2s->lock);
-       if (ret < 0)
-               dev_err(i2s->dev, "lrclk update failed\n");
-
-       return ret;
 }
 
 static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
@@ -487,26 +425,17 @@ static int rockchip_i2s_trigger(struct snd_pcm_substream *substream,
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-                       ret = rockchip_snd_rxctrl(i2s, 1);
+                       rockchip_snd_rxctrl(i2s, 1);
                else
-                       ret = rockchip_snd_txctrl(i2s, 1);
-               /* Do not turn on bclk if lrclk open fails. */
-               if (ret < 0)
-                       return ret;
-               i2s_pinctrl_select_bclk_on(i2s);
+                       rockchip_snd_txctrl(i2s, 1);
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
-                       if (!i2s->tx_start)
-                               i2s_pinctrl_select_bclk_off(i2s);
-                       ret = rockchip_snd_rxctrl(i2s, 0);
-               } else {
-                       if (!i2s->rx_start)
-                               i2s_pinctrl_select_bclk_off(i2s);
-                       ret = rockchip_snd_txctrl(i2s, 0);
-               }
+               if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+                       rockchip_snd_rxctrl(i2s, 0);
+               else
+                       rockchip_snd_txctrl(i2s, 0);
                break;
        default:
                ret = -EINVAL;
@@ -807,33 +736,6 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
        }
 
        i2s->bclk_ratio = 64;
-       i2s->pinctrl = devm_pinctrl_get(&pdev->dev);
-       if (IS_ERR(i2s->pinctrl))
-               dev_err(&pdev->dev, "failed to find i2s pinctrl\n");
-
-       i2s->bclk_on = pinctrl_lookup_state(i2s->pinctrl,
-                                  "bclk_on");
-       if (IS_ERR_OR_NULL(i2s->bclk_on))
-               dev_err(&pdev->dev, "failed to find i2s default state\n");
-       else
-               dev_dbg(&pdev->dev, "find i2s bclk state\n");
-
-       i2s->bclk_off = pinctrl_lookup_state(i2s->pinctrl,
-                                 "bclk_off");
-       if (IS_ERR_OR_NULL(i2s->bclk_off))
-               dev_err(&pdev->dev, "failed to find i2s gpio state\n");
-       else
-               dev_dbg(&pdev->dev, "find i2s bclk_off state\n");
-
-       i2s_pinctrl_select_bclk_off(i2s);
-
-       i2s->playback_dma_data.addr = res->start + I2S_TXDR;
-       i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       i2s->playback_dma_data.maxburst = 4;
-
-       i2s->capture_dma_data.addr = res->start + I2S_RXDR;
-       i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       i2s->capture_dma_data.maxburst = 4;
 
        dev_set_drvdata(&pdev->dev, i2s);
 
index 1cf53bb..7070dcf 100644 (file)
@@ -1175,7 +1175,7 @@ static int do_skeleton(int argc, char **argv)
                static inline void                                          \n\
                %1$s__detach(struct %1$s *obj)                              \n\
                {                                                           \n\
-                       return bpf_object__detach_skeleton(obj->skeleton);  \n\
+                       bpf_object__detach_skeleton(obj->skeleton);         \n\
                }                                                           \n\
                ",
                obj_name
index f081de3..c81362a 100644 (file)
@@ -1962,7 +1962,7 @@ static int profile_parse_metrics(int argc, char **argv)
        int selected_cnt = 0;
        unsigned int i;
 
-       metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
+       metric_cnt = ARRAY_SIZE(metrics);
 
        while (argc > 0) {
                for (i = 0; i < metric_cnt; i++) {
index 0197042..1ecdb91 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _ASM_GENERIC_FCNTL_H
 #define _ASM_GENERIC_FCNTL_H
 
@@ -90,7 +91,7 @@
 
 /* a horrid kludge trying to make sure that this will fail on old kernels */
 #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
-#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)
+#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)      
 
 #ifndef O_NDELAY
 #define O_NDELAY       O_NONBLOCK
 #define F_GETSIG       11      /* for sockets. */
 #endif
 
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
 #ifndef F_GETLK64
 #define F_GETLK64      12      /*  using 'struct flock64' */
 #define F_SETLK64      13
 #define F_SETLKW64     14
 #endif
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
 
 #ifndef F_SETOWN_EX
 #define F_SETOWN_EX    15
@@ -178,6 +181,10 @@ struct f_owner_ex {
                                   blocking */
 #define LOCK_UN                8       /* remove lock */
 
+/*
+ * LOCK_MAND support has been removed from the kernel. We leave the symbols
+ * here to not break legacy builds, but these should not be used in new code.
+ */
 #define LOCK_MAND      32      /* This is a mandatory flock ... */
 #define LOCK_READ      64      /* which allows concurrent read operations */
 #define LOCK_WRITE     128     /* which allows concurrent write operations */
@@ -185,6 +192,7 @@ struct f_owner_ex {
 
 #define F_LINUX_SPECIFIC_BASE  1024
 
+#ifndef HAVE_ARCH_STRUCT_FLOCK
 struct flock {
        short   l_type;
        short   l_whence;
@@ -209,5 +217,6 @@ struct flock64 {
        __ARCH_FLOCK64_PAD
 #endif
 };
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
 
 #endif /* _ASM_GENERIC_FCNTL_H */
index 811897d..860f867 100644 (file)
@@ -2084,7 +2084,7 @@ struct kvm_stats_header {
 #define KVM_STATS_UNIT_SECONDS         (0x2 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_CYCLES          (0x3 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_BOOLEAN         (0x4 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_CYCLES
+#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_BOOLEAN
 
 #define KVM_STATS_BASE_SHIFT           8
 #define KVM_STATS_BASE_MASK            (0xF << KVM_STATS_BASE_SHIFT)
index 5eb0df9..efcc06d 100644 (file)
@@ -578,12 +578,21 @@ int bpf_obj_pin(int fd, const char *pathname)
 }
 
 int bpf_obj_get(const char *pathname)
+{
+       return bpf_obj_get_opts(pathname, NULL);
+}
+
+int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
 {
        union bpf_attr attr;
        int fd;
 
+       if (!OPTS_VALID(opts, bpf_obj_get_opts))
+               return libbpf_err(-EINVAL);
+
        memset(&attr, 0, sizeof(attr));
        attr.pathname = ptr_to_u64((void *)pathname);
+       attr.file_flags = OPTS_GET(opts, file_flags, 0);
 
        fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
        return libbpf_err_errno(fd);
index 88a7cc4..9c50bea 100644 (file)
@@ -270,8 +270,19 @@ LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values
                                    __u32 *count,
                                    const struct bpf_map_batch_opts *opts);
 
+struct bpf_obj_get_opts {
+       size_t sz; /* size of this struct for forward/backward compatibility */
+
+       __u32 file_flags;
+
+       size_t :0;
+};
+#define bpf_obj_get_opts__last_field file_flags
+
 LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
 LIBBPF_API int bpf_obj_get(const char *pathname);
+LIBBPF_API int bpf_obj_get_opts(const char *pathname,
+                               const struct bpf_obj_get_opts *opts);
 
 struct bpf_prog_attach_opts {
        size_t sz; /* size of this struct for forward/backward compatibility */
index f4d3e1e..43ca3af 100644 (file)
@@ -523,10 +523,17 @@ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
  * Original struct pt_regs * context is preserved as 'ctx' argument. This might
  * be necessary when using BPF helpers like bpf_perf_event_output().
  *
- * At the moment BPF_KSYSCALL does not handle all the calling convention
- * quirks for mmap(), clone() and compat syscalls transparrently. This may or
- * may not change in the future. User needs to take extra measures to handle
- * such quirks explicitly, if necessary.
+ * At the moment BPF_KSYSCALL does not transparently handle all the calling
+ * convention quirks for the following syscalls:
+ *
+ * - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
+ * - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
+ *            CONFIG_CLONE_BACKWARDS3.
+ * - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
+ * - compat syscalls.
+ *
+ * This may or may not change in the future. User needs to take extra measures
+ * to handle such quirks explicitly, if necessary.
  *
  * This macro relies on BPF CO-RE support and virtual __kconfig externs.
  */
index b01fe01..50d4181 100644 (file)
@@ -9995,6 +9995,10 @@ static const char *arch_specific_syscall_pfx(void)
        return "mips";
 #elif defined(__riscv)
        return "riscv";
+#elif defined(__powerpc__)
+       return "powerpc";
+#elif defined(__powerpc64__)
+       return "powerpc64";
 #else
        return NULL;
 #endif
@@ -10127,8 +10131,13 @@ struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
                return libbpf_err_ptr(-EINVAL);
 
        if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
+               /* arch_specific_syscall_pfx() should never return NULL here
+                * because it is guarded by kernel_supports(). However, since
+                * compiler does not know that we have an explicit conditional
+                * as well.
+                */
                snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
-                        arch_specific_syscall_pfx(), syscall_name);
+                        arch_specific_syscall_pfx() ? : "", syscall_name);
        } else {
                snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
        }
index 0625adb..119e6e1 100644 (file)
@@ -355,6 +355,7 @@ LIBBPF_0.8.0 {
 
 LIBBPF_1.0.0 {
        global:
+               bpf_obj_get_opts;
                bpf_prog_query_opts;
                bpf_program__attach_ksyscall;
                btf__add_enum64;
diff --git a/tools/testing/selftests/bpf/DENYLIST b/tools/testing/selftests/bpf/DENYLIST
new file mode 100644 (file)
index 0000000..939de57
--- /dev/null
@@ -0,0 +1,6 @@
+# TEMPORARY
+get_stack_raw_tp    # spams with kernel warnings until next bpf -> bpf-next merge
+stacktrace_build_id_nmi
+stacktrace_build_id
+task_fd_query_rawtp
+varlen
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
new file mode 100644 (file)
index 0000000..e33cab3
--- /dev/null
@@ -0,0 +1,67 @@
+# TEMPORARY
+atomics                                  # attach(add): actual -524 <= expected 0                                      (trampoline)
+bpf_iter_setsockopt                      # JIT does not support calling kernel function                                (kfunc)
+bloom_filter_map                         # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3                (?)
+bpf_tcp_ca                               # JIT does not support calling kernel function                                (kfunc)
+bpf_loop                                 # attaches to __x64_sys_nanosleep
+bpf_mod_race                             # BPF trampoline
+bpf_nf                                   # JIT does not support calling kernel function
+core_read_macros                         # unknown func bpf_probe_read#4                                               (overlapping)
+d_path                                   # failed to auto-attach program 'prog_stat': -524                             (trampoline)
+dummy_st_ops                             # test_run unexpected error: -524 (errno 524)                                 (trampoline)
+fentry_fexit                             # fentry attach failed: -524                                                  (trampoline)
+fentry_test                              # fentry_first_attach unexpected error: -524                                  (trampoline)
+fexit_bpf2bpf                            # freplace_attach_trace unexpected error: -524                                (trampoline)
+fexit_sleep                              # fexit_skel_load fexit skeleton failed                                       (trampoline)
+fexit_stress                             # fexit attach failed prog 0 failed: -524                                     (trampoline)
+fexit_test                               # fexit_first_attach unexpected error: -524                                   (trampoline)
+get_func_args_test                      # trampoline
+get_func_ip_test                         # get_func_ip_test__attach unexpected error: -524                             (trampoline)
+get_stack_raw_tp                         # user_stack corrupted user stack                                             (no backchain userspace)
+kfree_skb                                # attach fentry unexpected error: -524                                        (trampoline)
+kfunc_call                               # 'bpf_prog_active': not found in kernel BTF                                  (?)
+ksyms_module                             # test_ksyms_module__open_and_load unexpected error: -9                       (?)
+ksyms_module_libbpf                      # JIT does not support calling kernel function                                (kfunc)
+ksyms_module_lskel                       # test_ksyms_module_lskel__open_and_load unexpected error: -9                 (?)
+modify_return                            # modify_return attach failed: -524                                           (trampoline)
+module_attach                            # skel_attach skeleton attach failed: -524                                    (trampoline)
+mptcp
+kprobe_multi_test                        # relies on fentry
+netcnt                                   # failed to load BPF skeleton 'netcnt_prog': -7                               (?)
+probe_user                               # check_kprobe_res wrong kprobe res from probe read                           (?)
+recursion                                # skel_attach unexpected error: -524                                          (trampoline)
+ringbuf                                  # skel_load skeleton load failed                                              (?)
+sk_assign                                # Can't read on server: Invalid argument                                      (?)
+sk_lookup                                # endianness problem
+sk_storage_tracing                       # test_sk_storage_tracing__attach unexpected error: -524                      (trampoline)
+skc_to_unix_sock                         # could not attach BPF object unexpected error: -524                          (trampoline)
+socket_cookie                            # prog_attach unexpected error: -524                                          (trampoline)
+stacktrace_build_id                      # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2                   (?)
+tailcalls                                # tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls      (?)
+task_local_storage                       # failed to auto-attach program 'trace_exit_creds': -524                      (trampoline)
+test_bpffs                               # bpffs test  failed 255                                                      (iterator)
+test_bprm_opts                           # failed to auto-attach program 'secure_exec': -524                           (trampoline)
+test_ima                                 # failed to auto-attach program 'ima': -524                                   (trampoline)
+test_local_storage                       # failed to auto-attach program 'unlink_hook': -524                           (trampoline)
+test_lsm                                 # failed to find kernel BTF type ID of '__x64_sys_setdomainname': -3          (?)
+test_overhead                            # attach_fentry unexpected error: -524                                        (trampoline)
+test_profiler                            # unknown func bpf_probe_read_str#45                                          (overlapping)
+timer                                    # failed to auto-attach program 'test1': -524                                 (trampoline)
+timer_crash                              # trampoline
+timer_mim                                # failed to auto-attach program 'test1': -524                                 (trampoline)
+trace_ext                                # failed to auto-attach program 'test_pkt_md_access_new': -524                (trampoline)
+trace_printk                             # trace_printk__load unexpected error: -2 (errno 2)                           (?)
+trace_vprintk                            # trace_vprintk__open_and_load unexpected error: -9                           (?)
+trampoline_count                         # prog 'prog1': failed to attach: ERROR: strerror_r(-524)=22                  (trampoline)
+verif_stats                              # trace_vprintk__open_and_load unexpected error: -9                           (?)
+vmlinux                                  # failed to auto-attach program 'handle__fentry': -524                        (trampoline)
+xdp_adjust_tail                          # case-128 err 0 errno 28 retval 1 size 128 expect-size 3520                  (?)
+xdp_bonding                              # failed to auto-attach program 'trace_on_entry': -524                        (trampoline)
+xdp_bpf2bpf                              # failed to auto-attach program 'trace_on_entry': -524                        (trampoline)
+map_kptr                                 # failed to open_and_load program: -524 (trampoline)
+bpf_cookie                               # failed to open_and_load program: -524 (trampoline)
+xdp_do_redirect                          # prog_run_max_size unexpected error: -22 (errno 22)
+send_signal                              # intermittently fails to receive signal
+select_reuseport                         # intermittently fails on new s390x setup
+xdp_synproxy                             # JIT does not support calling kernel function                                (kfunc)
+unpriv_bpf_disabled                      # fentry
index c05904d..fabf0c0 100644 (file)
@@ -1,65 +1,64 @@
+CONFIG_BLK_DEV_LOOP=y
 CONFIG_BPF=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_NET_CLS_BPF=m
 CONFIG_BPF_EVENTS=y
-CONFIG_TEST_BPF=m
+CONFIG_BPF_JIT=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_BPF_LSM=y
+CONFIG_BPF_STREAM_PARSER=y
+CONFIG_BPF_SYSCALL=y
 CONFIG_CGROUP_BPF=y
-CONFIG_NETDEVSIM=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_INGRESS=y
-CONFIG_NET_IPIP=y
-CONFIG_IPV6=y
-CONFIG_NET_IPGRE_DEMUX=y
-CONFIG_NET_IPGRE=y
-CONFIG_IPV6_GRE=y
-CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_HMAC=m
 CONFIG_CRYPTO_SHA256=m
-CONFIG_VXLAN=y
-CONFIG_GENEVE=y
-CONFIG_NET_CLS_FLOWER=m
-CONFIG_LWTUNNEL=y
-CONFIG_BPF_STREAM_PARSER=y
-CONFIG_XDP_SOCKETS=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_FPROBE=y
 CONFIG_FTRACE_SYSCALLS=y
-CONFIG_IPV6_TUNNEL=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_GENEVE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IMA=y
+CONFIG_IMA_READ_POLICY=y
+CONFIG_IMA_WRITE_POLICY=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_TARGET_SYNPROXY=y
+CONFIG_IPV6=y
+CONFIG_IPV6_FOU=m
+CONFIG_IPV6_FOU_TUNNEL=m
 CONFIG_IPV6_GRE=y
 CONFIG_IPV6_SEG6_BPF=y
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_TUNNEL=y
+CONFIG_LIRC=y
+CONFIG_LWTUNNEL=y
+CONFIG_MPLS=y
+CONFIG_MPLS_IPTUNNEL=m
+CONFIG_MPLS_ROUTING=m
+CONFIG_MPTCP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_CLS_FLOWER=m
 CONFIG_NET_FOU=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_IPV6_FOU=m
-CONFIG_IPV6_FOU_TUNNEL=m
-CONFIG_MPLS=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPIP=y
 CONFIG_NET_MPLS_GSO=m
-CONFIG_MPLS_ROUTING=m
-CONFIG_MPLS_IPTUNNEL=m
-CONFIG_IPV6_SIT=m
-CONFIG_BPF_JIT=y
-CONFIG_BPF_LSM=y
-CONFIG_SECURITY=y
-CONFIG_RC_CORE=y
-CONFIG_LIRC=y
-CONFIG_BPF_LIRC_MODE2=y
-CONFIG_IMA=y
-CONFIG_SECURITYFS=y
-CONFIG_IMA_WRITE_POLICY=y
-CONFIG_IMA_READ_POLICY=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_FUNCTION_TRACER=y
-CONFIG_DYNAMIC_FTRACE=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_SCHED=y
+CONFIG_NETDEVSIM=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_SYNPROXY=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
+CONFIG_NF_CONNTRACK=y
 CONFIG_NF_DEFRAG_IPV4=y
 CONFIG_NF_DEFRAG_IPV6=y
-CONFIG_NF_CONNTRACK=y
+CONFIG_RC_CORE=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_TEST_BPF=m
 CONFIG_USERFAULTFD=y
-CONFIG_FPROBE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_MPTCP=y
-CONFIG_NETFILTER_SYNPROXY=y
-CONFIG_NETFILTER_XT_TARGET_CT=y
-CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_TARGET_SYNPROXY=y
-CONFIG_IP_NF_RAW=y
+CONFIG_VXLAN=y
+CONFIG_XDP_SOCKETS=y
diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x
new file mode 100644 (file)
index 0000000..f8a7a25
--- /dev/null
@@ -0,0 +1,147 @@
+CONFIG_9P_FS=y
+CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
+CONFIG_AUDIT=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BONDING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_JIT_DEFAULT_ON=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BPFILTER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_CPUSETS=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CRYPTO_USER_API_RNG=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_SG=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FANOTIFY=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_MARCH_Z10_FEATURES=y
+CONFIG_HAVE_MARCH_Z196_FEATURES=y
+CONFIG_HEADERS_INSTALL=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HW_RANDOM=y
+CONFIG_HZ_100=y
+CONFIG_IDLE_PAGE_TRACKING=y
+CONFIG_IKHEADERS=y
+CONFIG_INET6_ESP=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPVLAN=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KERNEL_UNCOMPRESSED=y
+CONFIG_KPROBES=y
+CONFIG_KPROBES_ON_FTRACE=y
+CONFIG_KRETPROBES=y
+CONFIG_KSM=y
+CONFIG_LATENCYTOP=y
+CONFIG_LIVEPATCH=y
+CONFIG_LOCK_STAT=y
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MARCH_Z196=y
+CONFIG_MARCH_Z196_TUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULES=y
+CONFIG_NAMESPACES=y
+CONFIG_NET=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_KEY=y
+CONFIG_NET_SCH_FQ=y
+CONFIG_NET_VRF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NF_TABLES=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_PACKET=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PTDUMP_DEBUGFS=y
+CONFIG_RC_DEVICES=y
+CONFIG_RC_LOOPBACK=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SAMPLE_SECCOMP=y
+CONFIG_SAMPLES=y
+CONFIG_SCHED_TRACER=y
+CONFIG_SCSI=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_STACK_TRACER=y
+CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_SYSVIPC=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_DCTCP=y
+CONFIG_TLS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UPROBES=y
+CONFIG_USELIB=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VLAN_8021Q=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
new file mode 100644 (file)
index 0000000..f0859a1
--- /dev/null
@@ -0,0 +1,251 @@
+CONFIG_9P_FS=y
+CONFIG_9P_FS_POSIX_ACL=y
+CONFIG_9P_FS_SECURITY=y
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_AGP_SIS=y
+CONFIG_AGP_VIA=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_AUDIT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BINFMT_MISC=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BONDING=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTTIME_TRACING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BPFILTER=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=7
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPUSETS=y
+CONFIG_CRC_T10DIF=y
+CONFIG_CRYPTO_BLAKE2B=y
+CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_XXHASH=y
+CONFIG_DCB=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEFAULT_FQ_CODEL=y
+CONFIG_DEFAULT_RENO=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FAIL_FUNCTION=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_VESA=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_MINI_4x6=y
+CONFIG_FONTS=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_GART_IOMMU=y
+CONFIG_GENERIC_PHY=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_KYE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HPET=y
+CONFIG_HUGETLBFS=y
+CONFIG_HWPOISON_INJECT=y
+CONFIG_HZ_1000=y
+CONFIG_INET=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INTEL_POWERCLAMP=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IRQ_POLL=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_KEXEC=y
+CONFIG_KPROBES=y
+CONFIG_KSM=y
+CONFIG_LEGACY_VSYSCALL_NONE=y
+CONFIG_LOG_BUF_SHIFT=21
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=0
+CONFIG_LOGO=y
+CONFIG_LSM="selinux,bpf,integrity"
+CONFIG_MAC_PARTITION=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MCORE2=y
+CONFIG_MEMCG=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
+CONFIG_NAMESPACES=y
+CONFIG_NET=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_SCH_DEFAULT=y
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_NET_TC_SKB_EXT=y
+CONFIG_NET_VRF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETLABEL=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NO_HZ=y
+CONFIG_NR_CPUS=128
+CONFIG_NUMA=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_NVMEM=y
+CONFIG_OSF_PARTITION=y
+CONFIG_PACKET=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PHYSICAL_ALIGN=0x1000000
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PREEMPT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_RC_DEVICES=y
+CONFIG_RC_LOOPBACK=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SMP=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_SYNC_FILE=y
+CONFIG_SYSVIPC=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_TLS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_USER_NS=y
+CONFIG_VALIDATE_FS_PARSER=y
+CONFIG_VETH=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VLAN_8021Q=y
+CONFIG_X86_ACPI_CPUFREQ=y
+CONFIG_X86_CPUID=y
+CONFIG_X86_MSR=y
+CONFIG_X86_POWERNOW_K8=y
+CONFIG_XDP_SOCKETS_DIAG=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_USER=y
+CONFIG_ZEROPLUS_FF=y
index abf890d..34dbd2a 100644 (file)
@@ -4,25 +4,35 @@
 /* TODO: corrupts other tests uses connect() */
 void serial_test_probe_user(void)
 {
-       const char *prog_name = "handle_sys_connect";
+       static const char *const prog_names[] = {
+               "handle_sys_connect",
+#if defined(__s390x__)
+               "handle_sys_socketcall",
+#endif
+       };
+       enum { prog_count = ARRAY_SIZE(prog_names) };
        const char *obj_file = "./test_probe_user.o";
        DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
        int err, results_map_fd, sock_fd, duration = 0;
        struct sockaddr curr, orig, tmp;
        struct sockaddr_in *in = (struct sockaddr_in *)&curr;
-       struct bpf_link *kprobe_link = NULL;
-       struct bpf_program *kprobe_prog;
+       struct bpf_link *kprobe_links[prog_count] = {};
+       struct bpf_program *kprobe_progs[prog_count];
        struct bpf_object *obj;
        static const int zero = 0;
+       size_t i;
 
        obj = bpf_object__open_file(obj_file, &opts);
        if (!ASSERT_OK_PTR(obj, "obj_open_file"))
                return;
 
-       kprobe_prog = bpf_object__find_program_by_name(obj, prog_name);
-       if (CHECK(!kprobe_prog, "find_probe",
-                 "prog '%s' not found\n", prog_name))
-               goto cleanup;
+       for (i = 0; i < prog_count; i++) {
+               kprobe_progs[i] =
+                       bpf_object__find_program_by_name(obj, prog_names[i]);
+               if (CHECK(!kprobe_progs[i], "find_probe",
+                         "prog '%s' not found\n", prog_names[i]))
+                       goto cleanup;
+       }
 
        err = bpf_object__load(obj);
        if (CHECK(err, "obj_load", "err %d\n", err))
@@ -33,9 +43,11 @@ void serial_test_probe_user(void)
                  "err %d\n", results_map_fd))
                goto cleanup;
 
-       kprobe_link = bpf_program__attach(kprobe_prog);
-       if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
-               goto cleanup;
+       for (i = 0; i < prog_count; i++) {
+               kprobe_links[i] = bpf_program__attach(kprobe_progs[i]);
+               if (!ASSERT_OK_PTR(kprobe_links[i], "attach_kprobe"))
+                       goto cleanup;
+       }
 
        memset(&curr, 0, sizeof(curr));
        in->sin_family = AF_INET;
@@ -69,6 +81,7 @@ void serial_test_probe_user(void)
                  inet_ntoa(in->sin_addr), ntohs(in->sin_port)))
                goto cleanup;
 cleanup:
-       bpf_link__destroy(kprobe_link);
+       for (i = 0; i < prog_count; i++)
+               bpf_link__destroy(kprobe_links[i]);
        bpf_object__close(obj);
 }
index d71226e..d63a20f 100644 (file)
@@ -64,7 +64,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
                ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
 
                /* wait a little for signal handler */
-               for (int i = 0; i < 100000000 && !sigusr1_received; i++)
+               for (int i = 0; i < 1000000000 && !sigusr1_received; i++)
                        j /= i + j + 1;
 
                buf[0] = sigusr1_received ? '2' : '0';
index 3bba4a2..eea2741 100644 (file)
@@ -82,6 +82,7 @@
 
 #define MAC_TUNL_DEV0 "52:54:00:d9:01:00"
 #define MAC_TUNL_DEV1 "52:54:00:d9:02:00"
+#define MAC_VETH1 "52:54:00:d9:03:00"
 
 #define VXLAN_TUNL_DEV0 "vxlan00"
 #define VXLAN_TUNL_DEV1 "vxlan11"
 static int config_device(void)
 {
        SYS("ip netns add at_ns0");
-       SYS("ip link add veth0 type veth peer name veth1");
+       SYS("ip link add veth0 address " MAC_VETH1 " type veth peer name veth1");
        SYS("ip link set veth0 netns at_ns0");
        SYS("ip addr add " IP4_ADDR1_VETH1 "/24 dev veth1");
-       SYS("ip addr add " IP4_ADDR2_VETH1 "/24 dev veth1");
        SYS("ip link set dev veth1 up mtu 1500");
        SYS("ip netns exec at_ns0 ip addr add " IP4_ADDR_VETH0 "/24 dev veth0");
        SYS("ip netns exec at_ns0 ip link set dev veth0 up mtu 1500");
@@ -140,6 +140,8 @@ static int add_vxlan_tunnel(void)
            VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0);
        SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev %s",
            IP4_ADDR_TUNL_DEV1, MAC_TUNL_DEV1, VXLAN_TUNL_DEV0);
+       SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev veth0",
+           IP4_ADDR2_VETH1, MAC_VETH1);
 
        /* root namespace */
        SYS("ip link add dev %s type vxlan external gbp dstport 4789",
@@ -277,6 +279,17 @@ static void test_vxlan_tunnel(void)
        if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd))
                goto done;
 
+       /* load and attach bpf prog to veth dev tc hook point */
+       ifindex = if_nametoindex("veth1");
+       if (!ASSERT_NEQ(ifindex, 0, "veth1 ifindex"))
+               goto done;
+       tc_hook.ifindex = ifindex;
+       set_dst_prog_fd = bpf_program__fd(skel->progs.veth_set_outer_dst);
+       if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd"))
+               goto done;
+       if (attach_tc_prog(&tc_hook, set_dst_prog_fd, -1))
+               goto done;
+
        /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */
        nstoken = open_netns("at_ns0");
        if (!ASSERT_OK_PTR(nstoken, "setns src"))
index 8e14950..a8e501a 100644 (file)
@@ -7,8 +7,7 @@
 
 static struct sockaddr_in old;
 
-SEC("ksyscall/connect")
-int BPF_KSYSCALL(handle_sys_connect, int fd, struct sockaddr_in *uservaddr, int addrlen)
+static int handle_sys_connect_common(struct sockaddr_in *uservaddr)
 {
        struct sockaddr_in new;
 
@@ -19,4 +18,30 @@ int BPF_KSYSCALL(handle_sys_connect, int fd, struct sockaddr_in *uservaddr, int
        return 0;
 }
 
+SEC("ksyscall/connect")
+int BPF_KSYSCALL(handle_sys_connect, int fd, struct sockaddr_in *uservaddr,
+                int addrlen)
+{
+       return handle_sys_connect_common(uservaddr);
+}
+
+#if defined(bpf_target_s390)
+#ifndef SYS_CONNECT
+#define SYS_CONNECT 3
+#endif
+
+SEC("ksyscall/socketcall")
+int BPF_KSYSCALL(handle_sys_socketcall, int call, unsigned long *args)
+{
+       if (call == SYS_CONNECT) {
+               struct sockaddr_in *uservaddr;
+
+               bpf_probe_read_user(&uservaddr, sizeof(uservaddr), &args[1]);
+               return handle_sys_connect_common(uservaddr);
+       }
+
+       return 0;
+}
+#endif
+
 char _license[] SEC("license") = "GPL";
index 17f2f32..df0673c 100644 (file)
 #include <linux/if_packet.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <linux/icmp.h>
 #include <linux/types.h>
 #include <linux/socket.h>
 #include <linux/pkt_cls.h>
 #include <linux/erspan.h>
+#include <linux/udp.h>
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_endian.h>
 
 #define log_err(__ret) bpf_printk("ERROR line:%d ret:%d\n", __LINE__, __ret)
 
+#define VXLAN_UDP_PORT 4789
+
+/* Only IPv4 address assigned to veth1.
+ * 172.16.1.200
+ */
+#define ASSIGNED_ADDR_VETH1 0xac1001c8
+
 struct geneve_opt {
        __be16  opt_class;
        __u8    type;
@@ -33,6 +42,11 @@ struct geneve_opt {
        __u8    opt_data[8]; /* hard-coded to 8 byte */
 };
 
+struct vxlanhdr {
+       __be32 vx_flags;
+       __be32 vx_vni;
+} __attribute__((packed));
+
 struct vxlan_metadata {
        __u32     gbp;
 };
@@ -369,14 +383,8 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb)
        int ret;
        struct bpf_tunnel_key key;
        struct vxlan_metadata md;
+       __u32 orig_daddr;
        __u32 index = 0;
-       __u32 *local_ip = NULL;
-
-       local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
-       if (!local_ip) {
-               log_err(ret);
-               return TC_ACT_SHOT;
-       }
 
        ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
        if (ret < 0) {
@@ -390,11 +398,10 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb)
                return TC_ACT_SHOT;
        }
 
-       if (key.local_ipv4 != *local_ip || md.gbp != 0x800FF) {
+       if (key.local_ipv4 != ASSIGNED_ADDR_VETH1 || md.gbp != 0x800FF) {
                bpf_printk("vxlan key %d local ip 0x%x remote ip 0x%x gbp 0x%x\n",
                           key.tunnel_id, key.local_ipv4,
                           key.remote_ipv4, md.gbp);
-               bpf_printk("local_ip 0x%x\n", *local_ip);
                log_err(ret);
                return TC_ACT_SHOT;
        }
@@ -402,6 +409,61 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb)
        return TC_ACT_OK;
 }
 
+SEC("tc")
+int veth_set_outer_dst(struct __sk_buff *skb)
+{
+       struct ethhdr *eth = (struct ethhdr *)(long)skb->data;
+       __u32 assigned_ip = bpf_htonl(ASSIGNED_ADDR_VETH1);
+       void *data_end = (void *)(long)skb->data_end;
+       struct udphdr *udph;
+       struct iphdr *iph;
+       __u32 index = 0;
+       int ret = 0;
+       int shrink;
+       __s64 csum;
+
+       if ((void *)eth + sizeof(*eth) > data_end) {
+               log_err(ret);
+               return TC_ACT_SHOT;
+       }
+
+       if (eth->h_proto != bpf_htons(ETH_P_IP))
+               return TC_ACT_OK;
+
+       iph = (struct iphdr *)(eth + 1);
+       if ((void *)iph + sizeof(*iph) > data_end) {
+               log_err(ret);
+               return TC_ACT_SHOT;
+       }
+       if (iph->protocol != IPPROTO_UDP)
+               return TC_ACT_OK;
+
+       udph = (struct udphdr *)(iph + 1);
+       if ((void *)udph + sizeof(*udph) > data_end) {
+               log_err(ret);
+               return TC_ACT_SHOT;
+       }
+       if (udph->dest != bpf_htons(VXLAN_UDP_PORT))
+               return TC_ACT_OK;
+
+       if (iph->daddr != assigned_ip) {
+               csum = bpf_csum_diff(&iph->daddr, sizeof(__u32), &assigned_ip,
+                                    sizeof(__u32), 0);
+               if (bpf_skb_store_bytes(skb, ETH_HLEN + offsetof(struct iphdr, daddr),
+                                       &assigned_ip, sizeof(__u32), 0) < 0) {
+                       log_err(ret);
+                       return TC_ACT_SHOT;
+               }
+               if (bpf_l3_csum_replace(skb, ETH_HLEN + offsetof(struct iphdr, check),
+                                       0, csum, 0) < 0) {
+                       log_err(ret);
+                       return TC_ACT_SHOT;
+               }
+               bpf_skb_change_type(skb, PACKET_HOST);
+       }
+       return TC_ACT_OK;
+}
+
 SEC("tc")
 int ip6vxlan_set_tunnel_dst(struct __sk_buff *skb)
 {
index c639f2e..3561c97 100644 (file)
@@ -1604,11 +1604,8 @@ int main(int argc, char **argv)
                struct prog_test_def *test = &prog_test_defs[i];
 
                test->test_num = i + 1;
-               if (should_run(&env.test_selector,
-                               test->test_num, test->test_name))
-                       test->should_run = true;
-               else
-                       test->should_run = false;
+               test->should_run = should_run(&env.test_selector,
+                                             test->test_num, test->test_name);
 
                if ((test->run_test == NULL && test->run_serial_test == NULL) ||
                    (test->run_test != NULL && test->run_serial_test != NULL)) {
index e0bb04a..b86ae4a 100755 (executable)
@@ -30,8 +30,7 @@ DEFAULT_COMMAND="./test_progs"
 MOUNT_DIR="mnt"
 ROOTFS_IMAGE="root.img"
 OUTPUT_DIR="$HOME/.bpf_selftests"
-KCONFIG_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/config-latest.${ARCH}"
-KCONFIG_API_URL="https://api.github.com/repos/libbpf/libbpf/contents/travis-ci/vmtest/configs/config-latest.${ARCH}"
+KCONFIG_REL_PATHS=("tools/testing/selftests/bpf/config" "tools/testing/selftests/bpf/config.${ARCH}")
 INDEX_URL="https://raw.githubusercontent.com/libbpf/ci/master/INDEX"
 NUM_COMPILE_JOBS="$(nproc)"
 LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")"
@@ -269,26 +268,42 @@ is_rel_path()
        [[ ${path:0:1} != "/" ]]
 }
 
+do_update_kconfig()
+{
+       local kernel_checkout="$1"
+       local kconfig_file="$2"
+
+       rm -f "$kconfig_file" 2> /dev/null
+
+       for config in "${KCONFIG_REL_PATHS[@]}"; do
+               local kconfig_src="${kernel_checkout}/${config}"
+               cat "$kconfig_src" >> "$kconfig_file"
+       done
+}
+
 update_kconfig()
 {
-       local kconfig_file="$1"
-       local update_command="curl -sLf ${KCONFIG_URL} -o ${kconfig_file}"
-       # Github does not return the "last-modified" header when retrieving the
-       # raw contents of the file. Use the API call to get the last-modified
-       # time of the kernel config and only update the config if it has been
-       # updated after the previously cached config was created. This avoids
-       # unnecessarily compiling the kernel and selftests.
-       if [[ -f "${kconfig_file}" ]]; then
-               local last_modified_date="$(curl -sL -D - "${KCONFIG_API_URL}" -o /dev/null | \
-                       grep "last-modified" | awk -F ': ' '{print $2}')"
-               local remote_modified_timestamp="$(date -d "${last_modified_date}" +"%s")"
-               local local_creation_timestamp="$(stat -c %Y "${kconfig_file}")"
+       local kernel_checkout="$1"
+       local kconfig_file="$2"
 
-               if [[ "${remote_modified_timestamp}" -gt "${local_creation_timestamp}" ]]; then
-                       ${update_command}
-               fi
+       if [[ -f "${kconfig_file}" ]]; then
+               local local_modified="$(stat -c %Y "${kconfig_file}")"
+
+               for config in "${KCONFIG_REL_PATHS[@]}"; do
+                       local kconfig_src="${kernel_checkout}/${config}"
+                       local src_modified="$(stat -c %Y "${kconfig_src}")"
+                       # Only update the config if it has been updated after the
+                       # previously cached config was created. This avoids
+                       # unnecessarily compiling the kernel and selftests.
+                       if [[ "${src_modified}" -gt "${local_modified}" ]]; then
+                               do_update_kconfig "$kernel_checkout" "$kconfig_file"
+                               # Once we have found one outdated configuration
+                               # there is no need to check other ones.
+                               break
+                       fi
+               done
        else
-               ${update_command}
+               do_update_kconfig "$kernel_checkout" "$kconfig_file"
        fi
 }
 
@@ -372,7 +387,7 @@ main()
 
        mkdir -p "${OUTPUT_DIR}"
        mkdir -p "${mount_dir}"
-       update_kconfig "${kconfig_file}"
+       update_kconfig "${kernel_checkout}" "${kconfig_file}"
 
        recompile_kernel "${kernel_checkout}" "${make_command}"
 
diff --git a/tools/testing/selftests/drivers/net/dsa/Makefile b/tools/testing/selftests/drivers/net/dsa/Makefile
new file mode 100644 (file)
index 0000000..2a731d5
--- /dev/null
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0+ OR MIT
+
+TEST_PROGS = bridge_locked_port.sh \
+       bridge_mdb.sh \
+       bridge_mld.sh \
+       bridge_vlan_aware.sh \
+       bridge_vlan_mcast.sh \
+       bridge_vlan_unaware.sh \
+       local_termination.sh \
+       no_forwarding.sh \
+       test_bridge_fdb_stress.sh
+
+TEST_PROGS_EXTENDED := lib.sh
+
+TEST_FILES := forwarding.config
+
+include ../../../lib.mk
index 71b3066..616ed40 100644 (file)
@@ -3,6 +3,6 @@
 TEST_PROGS := gpio-mockup.sh gpio-sim.sh
 TEST_FILES := gpio-mockup-sysfs.sh
 TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev gpio-chip-info gpio-line-name
-CFLAGS += -O2 -g -Wall -I../../../../usr/include/
+CFLAGS += -O2 -g -Wall -I../../../../usr/include/ $(KHDR_INCLUDES)
 
 include ../lib.mk
index 4158da0..2237d1a 100644 (file)
@@ -82,8 +82,9 @@ static int next_cpu(int cpu)
        return cpu;
 }
 
-static void *migration_worker(void *ign)
+static void *migration_worker(void *__rseq_tid)
 {
+       pid_t rseq_tid = (pid_t)(unsigned long)__rseq_tid;
        cpu_set_t allowed_mask;
        int r, i, cpu;
 
@@ -106,7 +107,7 @@ static void *migration_worker(void *ign)
                 * stable, i.e. while changing affinity is in-progress.
                 */
                smp_wmb();
-               r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);
+               r = sched_setaffinity(rseq_tid, sizeof(allowed_mask), &allowed_mask);
                TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
                            errno, strerror(errno));
                smp_wmb();
@@ -231,7 +232,8 @@ int main(int argc, char *argv[])
        vm = vm_create_default(VCPU_ID, 0, guest_code);
        ucall_init(vm, NULL);
 
-       pthread_create(&migration_thread, NULL, migration_worker, 0);
+       pthread_create(&migration_thread, NULL, migration_worker,
+                      (void *)(unsigned long)gettid());
 
        for (i = 0; !done; i++) {
                vcpu_run(vm, VCPU_ID);
index 80628bf..cd86d37 100644 (file)
@@ -35,6 +35,8 @@ TEST_PROGS += cmsg_time.sh cmsg_ipv6.sh
 TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
 TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
 TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
+TEST_PROGS += srv6_hencap_red_l3vpn_test.sh
+TEST_PROGS += srv6_hl2encap_red_l2vpn_test.sh
 TEST_PROGS += vrf_strict_mode_test.sh
 TEST_PROGS += arp_ndisc_evict_nocarrier.sh
 TEST_PROGS += ndisc_unsolicited_na_test.sh
diff --git a/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh b/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh
new file mode 100755 (executable)
index 0000000..28a7756
--- /dev/null
@@ -0,0 +1,879 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <andrea.mayer@uniroma2.it>
+#
+# This script is designed for testing the SRv6 H.Encaps.Red behavior.
+#
+# Below is depicted the IPv6 network of an operator which offers advanced
+# IPv4/IPv6 VPN services to hosts, enabling them to communicate with each
+# other.
+# In this example, hosts hs-1 and hs-2 are connected through an IPv4/IPv6 VPN
+# service, while hs-3 and hs-4 are connected using an IPv6 only VPN.
+#
+# Routers rt-1,rt-2,rt-3 and rt-4 implement IPv4/IPv6 L3 VPN services
+# leveraging the SRv6 architecture. The key components for such VPNs are:
+#
+#   i) The SRv6 H.Encaps.Red behavior applies SRv6 Policies on traffic received
+#      by connected hosts, initiating the VPN tunnel. Such a behavior is an
+#      optimization of the SRv6 H.Encap aiming to reduce the length of the SID
+#      List carried in the pushed SRH. Specifically, the H.Encaps.Red removes
+#      the first SID contained in the SID List (i.e. SRv6 Policy) by storing it
+#      into the IPv6 Destination Address. When a SRv6 Policy is made of only one
+#      SID, the SRv6 H.Encaps.Red behavior omits the SRH at all and pushes that
+#      SID directly into the IPv6 DA;
+#
+#  ii) The SRv6 End behavior advances the active SID in the SID List carried by
+#      the SRH;
+#
+# iii) The SRv6 End.DT46 behavior is used for removing the SRv6 Policy and,
+#      thus, it terminates the VPN tunnel. Such a behavior is capable of
+#      handling, at the same time, both tunneled IPv4 and IPv6 traffic.
+#
+#
+#               cafe::1                      cafe::2
+#              10.0.0.1                     10.0.0.2
+#             +--------+                   +--------+
+#             |        |                   |        |
+#             |  hs-1  |                   |  hs-2  |
+#             |        |                   |        |
+#             +---+----+                   +--- +---+
+#    cafe::/64    |                             |      cafe::/64
+#  10.0.0.0/24    |                             |    10.0.0.0/24
+#             +---+----+                   +----+---+
+#             |        |  fcf0:0:1:2::/64  |        |
+#             |  rt-1  +-------------------+  rt-2  |
+#             |        |                   |        |
+#             +---+----+                   +----+---+
+#                 |      .               .      |
+#                 |  fcf0:0:1:3::/64   .        |
+#                 |          .       .          |
+#                 |            .   .            |
+# fcf0:0:1:4::/64 |              .              | fcf0:0:2:3::/64
+#                 |            .   .            |
+#                 |          .       .          |
+#                 |  fcf0:0:2:4::/64   .        |
+#                 |      .               .      |
+#             +---+----+                   +----+---+
+#             |        |                   |        |
+#             |  rt-4  +-------------------+  rt-3  |
+#             |        |  fcf0:0:3:4::/64  |        |
+#             +---+----+                   +----+---+
+#    cafe::/64    |                             |      cafe::/64
+#  10.0.0.0/24    |                             |    10.0.0.0/24
+#             +---+----+                   +--- +---+
+#             |        |                   |        |
+#             |  hs-4  |                   |  hs-3  |
+#             |        |                   |        |
+#             +--------+                   +--------+
+#               cafe::4                      cafe::3
+#              10.0.0.4                     10.0.0.3
+#
+#
+# Every fcf0:0:x:y::/64 network interconnects the SRv6 routers rt-x with rt-y
+# in the IPv6 operator network.
+#
+# Local SID table
+# ===============
+#
+# Each SRv6 router is configured with a Local SID table in which SIDs are
+# stored. Considering the given SRv6 router rt-x, at least two SIDs are
+# configured in the Local SID table:
+#
+#   Local SID table for SRv6 router rt-x
+#   +----------------------------------------------------------+
+#   |fcff:x::e is associated with the SRv6 End behavior        |
+#   |fcff:x::d46 is associated with the SRv6 End.DT46 behavior |
+#   +----------------------------------------------------------+
+#
+# The fcff::/16 prefix is reserved by the operator for implementing SRv6 VPN
+# services. Reachability of SIDs is ensured by proper configuration of the IPv6
+# operator's network and SRv6 routers.
+#
+# # SRv6 Policies
+# ===============
+#
+# An SRv6 ingress router applies SRv6 policies to the traffic received from a
+# connected host. SRv6 policy enforcement consists of encapsulating the
+# received traffic into a new IPv6 packet with a given SID List contained in
+# the SRH.
+#
+# IPv4/IPv6 VPN between hs-1 and hs-2
+# -----------------------------------
+#
+# Hosts hs-1 and hs-2 are connected using dedicated IPv4/IPv6 VPNs.
+# Specifically, packets generated from hs-1 and directed towards hs-2 are
+# handled by rt-1 which applies the following SRv6 Policies:
+#
+#   i.a) IPv6 traffic, SID List=fcff:3::e,fcff:4::e,fcff:2::d46
+#  ii.a) IPv4 traffic, SID List=fcff:2::d46
+#
+# Policy (i.a) steers tunneled IPv6 traffic through SRv6 routers
+# rt-3,rt-4,rt-2. Instead, Policy (ii.a) steers tunneled IPv4 traffic through
+# rt-2.
+# The H.Encaps.Red reduces the SID List (i.a) carried in SRH by removing the
+# first SID (fcff:3::e) and pushing it into the IPv6 DA. In case of IPv4
+# traffic, the H.Encaps.Red omits the presence of SRH at all, since the SID
+# List (ii.a) consists of only one SID that can be stored directly in the IPv6
+# DA.
+#
+# On the reverse path (i.e. from hs-2 to hs-1), rt-2 applies the following
+# policies:
+#
+#   i.b) IPv6 traffic, SID List=fcff:1::d46
+#  ii.b) IPv4 traffic, SID List=fcff:4::e,fcff:3::e,fcff:1::d46
+#
+# Policy (i.b) steers tunneled IPv6 traffic through the SRv6 router rt-1.
+# Conversely, Policy (ii.b) steers tunneled IPv4 traffic through SRv6 routers
+# rt-4,rt-3,rt-1.
+# The H.Encaps.Red omits the SRH at all in case of (i.b) by pushing the single
+# SID (fcff::1::d46) inside the IPv6 DA.
+# The H.Encaps.Red reduces the SID List (ii.b) in the SRH by removing the first
+# SID (fcff:4::e) and pushing it into the IPv6 DA.
+#
+# In summary:
+#  hs-1->hs-2 |IPv6 DA=fcff:3::e|SRH SIDs=fcff:4::e,fcff:2::d46|IPv6|...| (i.a)
+#  hs-1->hs-2 |IPv6 DA=fcff:2::d46|IPv4|...|                              (ii.a)
+#
+#  hs-2->hs-1 |IPv6 DA=fcff:1::d46|IPv6|...|                              (i.b)
+#  hs-2->hs-1 |IPv6 DA=fcff:4::e|SRH SIDs=fcff:3::e,fcff:1::d46|IPv4|...| (ii.b)
+#
+#
+# IPv6 VPN between hs-3 and hs-4
+# ------------------------------
+#
+# Hosts hs-3 and hs-4 are connected using a dedicated IPv6 only VPN.
+# Specifically, packets generated from hs-3 and directed towards hs-4 are
+# handled by rt-3 which applies the following SRv6 Policy:
+#
+#  i.c) IPv6 traffic, SID List=fcff:2::e,fcff:4::d46
+#
+# Policy (i.c) steers tunneled IPv6 traffic through SRv6 routers rt-2,rt-4.
+# The H.Encaps.Red reduces the SID List (i.c) carried in SRH by pushing the
+# first SID (fcff:2::e) in the IPv6 DA.
+#
+# On the reverse path (i.e. from hs-4 to hs-3) the router rt-4 applies the
+# following SRv6 Policy:
+#
+#  i.d) IPv6 traffic, SID List=fcff:1::e,fcff:3::d46.
+#
+# Policy (i.d) steers tunneled IPv6 traffic through SRv6 routers rt-1,rt-3.
+# The H.Encaps.Red reduces the SID List (i.d) carried in SRH by pushing the
+# first SID (fcff:1::e) in the IPv6 DA.
+#
+# In summary:
+#  hs-3->hs-4 |IPv6 DA=fcff:2::e|SRH SIDs=fcff:4::d46|IPv6|...| (i.c)
+#  hs-4->hs-3 |IPv6 DA=fcff:1::e|SRH SIDs=fcff:3::d46|IPv6|...| (i.d)
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+readonly ksft_skip=4
+
+readonly RDMSUFF="$(mktemp -u XXXXXXXX)"
+readonly VRF_TID=100
+readonly VRF_DEVNAME="vrf-${VRF_TID}"
+readonly RT2HS_DEVNAME="veth-t${VRF_TID}"
+readonly LOCALSID_TABLE_ID=90
+readonly IPv6_RT_NETWORK=fcf0:0
+readonly IPv6_HS_NETWORK=cafe
+readonly IPv4_HS_NETWORK=10.0.0
+readonly VPN_LOCATOR_SERVICE=fcff
+readonly END_FUNC=000e
+readonly DT46_FUNC=0d46
+
+PING_TIMEOUT_SEC=4
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+# IDs of routers and hosts are initialized during the setup of the testing
+# network
+ROUTERS=''
+HOSTS=''
+
+SETUP_ERR=1
+
+ret=${ksft_skip}
+nsuccess=0
+nfail=0
+
+log_test()
+{
+       local rc="$1"
+       local expected="$2"
+       local msg="$3"
+
+       if [ "${rc}" -eq "${expected}" ]; then
+               nsuccess=$((nsuccess+1))
+               printf "\n    TEST: %-60s  [ OK ]\n" "${msg}"
+       else
+               ret=1
+               nfail=$((nfail+1))
+               printf "\n    TEST: %-60s  [FAIL]\n" "${msg}"
+               if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+                       echo
+                       echo "hit enter to continue, 'q' to quit"
+                       read a
+                       [ "$a" = "q" ] && exit 1
+               fi
+       fi
+}
+
+print_log_test_results()
+{
+       printf "\nTests passed: %3d\n" "${nsuccess}"
+       printf "Tests failed: %3d\n"   "${nfail}"
+
+       # when a test fails, the value of 'ret' is set to 1 (error code).
+       # Conversely, when all tests are passed successfully, the 'ret' value
+       # is set to 0 (success code).
+       if [ "${ret}" -ne 1 ]; then
+               ret=0
+       fi
+}
+
+log_section()
+{
+       echo
+       echo "################################################################################"
+       echo "TEST SECTION: $*"
+       echo "################################################################################"
+}
+
+test_command_or_ksft_skip()
+{
+       local cmd="$1"
+
+       if [ ! -x "$(command -v "${cmd}")" ]; then
+               echo "SKIP: Could not run test without \"${cmd}\" tool";
+               exit "${ksft_skip}"
+       fi
+}
+
+get_nodename()
+{
+       local name="$1"
+
+       echo "${name}-${RDMSUFF}"
+}
+
+get_rtname()
+{
+       local rtid="$1"
+
+       get_nodename "rt-${rtid}"
+}
+
+get_hsname()
+{
+       local hsid="$1"
+
+       get_nodename "hs-${hsid}"
+}
+
+__create_namespace()
+{
+       local name="$1"
+
+       ip netns add "${name}"
+}
+
+create_router()
+{
+       local rtid="$1"
+       local nsname
+
+       nsname="$(get_rtname "${rtid}")"
+
+       __create_namespace "${nsname}"
+}
+
+create_host()
+{
+       local hsid="$1"
+       local nsname
+
+       nsname="$(get_hsname "${hsid}")"
+
+       __create_namespace "${nsname}"
+}
+
+cleanup()
+{
+       local nsname
+       local i
+
+       # destroy routers
+       for i in ${ROUTERS}; do
+               nsname="$(get_rtname "${i}")"
+
+               ip netns del "${nsname}" &>/dev/null || true
+       done
+
+       # destroy hosts
+       for i in ${HOSTS}; do
+               nsname="$(get_hsname "${i}")"
+
+               ip netns del "${nsname}" &>/dev/null || true
+       done
+
+       # check whether the setup phase was completed successfully or not. In
+       # case of an error during the setup phase of the testing environment,
+       # the selftest is considered as "skipped".
+       if [ "${SETUP_ERR}" -ne 0 ]; then
+               echo "SKIP: Setting up the testing environment failed"
+               exit "${ksft_skip}"
+       fi
+
+       exit "${ret}"
+}
+
+add_link_rt_pairs()
+{
+       local rt="$1"
+       local rt_neighs="$2"
+       local neigh
+       local nsname
+       local neigh_nsname
+
+       nsname="$(get_rtname "${rt}")"
+
+       for neigh in ${rt_neighs}; do
+               neigh_nsname="$(get_rtname "${neigh}")"
+
+               ip link add "veth-rt-${rt}-${neigh}" netns "${nsname}" \
+                       type veth peer name "veth-rt-${neigh}-${rt}" \
+                       netns "${neigh_nsname}"
+       done
+}
+
+get_network_prefix()
+{
+       local rt="$1"
+       local neigh="$2"
+       local p="${rt}"
+       local q="${neigh}"
+
+       if [ "${p}" -gt "${q}" ]; then
+               p="${q}"; q="${rt}"
+       fi
+
+       echo "${IPv6_RT_NETWORK}:${p}:${q}"
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+       local rt="$1"
+       local rt_neighs="$2"
+       local nsname
+       local net_prefix
+       local devname
+       local neigh
+
+       nsname="$(get_rtname "${rt}")"
+
+       for neigh in ${rt_neighs}; do
+               devname="veth-rt-${rt}-${neigh}"
+
+               net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+               ip -netns "${nsname}" addr \
+                       add "${net_prefix}::${rt}/64" dev "${devname}" nodad
+
+               ip -netns "${nsname}" link set "${devname}" up
+       done
+
+       ip -netns "${nsname}" link set lo up
+
+       ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+       ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+       ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.forwarding=1
+
+       ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.default.rp_filter=0
+       ip netns exec "${nsname}" sysctl -wq net.ipv4.ip_forward=1
+}
+
+# Setup local SIDs for an SRv6 router
+setup_rt_local_sids()
+{
+       local rt="$1"
+       local rt_neighs="$2"
+       local net_prefix
+       local devname
+       local nsname
+       local neigh
+
+       nsname="$(get_rtname "${rt}")"
+
+       for neigh in ${rt_neighs}; do
+               devname="veth-rt-${rt}-${neigh}"
+
+               net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+               # set underlay network routes for SIDs reachability
+               ip -netns "${nsname}" -6 route \
+                       add "${VPN_LOCATOR_SERVICE}:${neigh}::/32" \
+                       table "${LOCALSID_TABLE_ID}" \
+                       via "${net_prefix}::${neigh}" dev "${devname}"
+       done
+
+       # Local End behavior (note that "dev" is dummy and the VRF is chosen
+       # for the sake of simplicity).
+       ip -netns "${nsname}" -6 route \
+               add "${VPN_LOCATOR_SERVICE}:${rt}::${END_FUNC}" \
+               table "${LOCALSID_TABLE_ID}" \
+               encap seg6local action End dev "${VRF_DEVNAME}"
+
+       # Local End.DT46 behavior
+       ip -netns "${nsname}" -6 route \
+               add "${VPN_LOCATOR_SERVICE}:${rt}::${DT46_FUNC}" \
+               table "${LOCALSID_TABLE_ID}" \
+               encap seg6local action End.DT46 vrftable "${VRF_TID}" \
+               dev "${VRF_DEVNAME}"
+
+       # all SIDs for VPNs start with a common locator. Routes and SRv6
+       # Endpoint behavior instaces are grouped together in the 'localsid'
+       # table.
+       ip -netns "${nsname}" -6 rule \
+               add to "${VPN_LOCATOR_SERVICE}::/16" \
+               lookup "${LOCALSID_TABLE_ID}" prio 999
+
+       # set default routes to unreachable for both ipv4 and ipv6
+       ip -netns "${nsname}" -6 route \
+               add unreachable default metric 4278198272 \
+               vrf "${VRF_DEVNAME}"
+
+       ip -netns "${nsname}" -4 route \
+               add unreachable default metric 4278198272 \
+               vrf "${VRF_DEVNAME}"
+}
+
+# build and install the SRv6 policy into the ingress SRv6 router.
+# args:
+#  $1 - destination host (i.e. cafe::x host)
+#  $2 - SRv6 router configured for enforcing the SRv6 Policy
+#  $3 - SRv6 routers configured for steering traffic (End behaviors)
+#  $4 - SRv6 router configured for removing the SRv6 Policy (router connected
+#       to the destination host)
+#  $5 - encap mode (full or red)
+#  $6 - traffic type (IPv6 or IPv4)
+__setup_rt_policy()
+{
+       local dst="$1"
+       local encap_rt="$2"
+       local end_rts="$3"
+       local dec_rt="$4"
+       local mode="$5"
+       local traffic="$6"
+       local nsname
+       local policy=''
+       local n
+
+       nsname="$(get_rtname "${encap_rt}")"
+
+       for n in ${end_rts}; do
+               policy="${policy}${VPN_LOCATOR_SERVICE}:${n}::${END_FUNC},"
+       done
+
+       policy="${policy}${VPN_LOCATOR_SERVICE}:${dec_rt}::${DT46_FUNC}"
+
+       # add SRv6 policy to incoming traffic sent by connected hosts
+       if [ "${traffic}" -eq 6 ]; then
+               ip -netns "${nsname}" -6 route \
+                       add "${IPv6_HS_NETWORK}::${dst}" vrf "${VRF_DEVNAME}" \
+                       encap seg6 mode "${mode}" segs "${policy}" \
+                       dev "${VRF_DEVNAME}"
+
+               ip -netns "${nsname}" -6 neigh \
+                       add proxy "${IPv6_HS_NETWORK}::${dst}" \
+                       dev "${RT2HS_DEVNAME}"
+       else
+               # "dev" must be different from the one where the packet is
+               # received, otherwise the proxy arp does not work.
+               ip -netns "${nsname}" -4 route \
+                       add "${IPv4_HS_NETWORK}.${dst}" vrf "${VRF_DEVNAME}" \
+                       encap seg6 mode "${mode}" segs "${policy}" \
+                       dev "${VRF_DEVNAME}"
+       fi
+}
+
+# see __setup_rt_policy
+setup_rt_policy_ipv6()
+{
+       __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 6
+}
+
+#see __setup_rt_policy
+setup_rt_policy_ipv4()
+{
+       __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 4
+}
+
+setup_hs()
+{
+       local hs="$1"
+       local rt="$2"
+       local hsname
+       local rtname
+
+       hsname="$(get_hsname "${hs}")"
+       rtname="$(get_rtname "${rt}")"
+
+       ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+       ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+       ip -netns "${hsname}" link add veth0 type veth \
+               peer name "${RT2HS_DEVNAME}" netns "${rtname}"
+
+       ip -netns "${hsname}" addr \
+               add "${IPv6_HS_NETWORK}::${hs}/64" dev veth0 nodad
+       ip -netns "${hsname}" addr add "${IPv4_HS_NETWORK}.${hs}/24" dev veth0
+
+       ip -netns "${hsname}" link set veth0 up
+       ip -netns "${hsname}" link set lo up
+
+       # configure the VRF on the router which is directly connected to the
+       # source host.
+       ip -netns "${rtname}" link \
+               add "${VRF_DEVNAME}" type vrf table "${VRF_TID}"
+       ip -netns "${rtname}" link set "${VRF_DEVNAME}" up
+
+       # enslave the veth interface connecting the router with the host to the
+       # VRF in the access router
+       ip -netns "${rtname}" link \
+               set "${RT2HS_DEVNAME}" master "${VRF_DEVNAME}"
+
+       ip -netns "${rtname}" addr \
+               add "${IPv6_HS_NETWORK}::254/64" dev "${RT2HS_DEVNAME}" nodad
+       ip -netns "${rtname}" addr \
+               add "${IPv4_HS_NETWORK}.254/24" dev "${RT2HS_DEVNAME}"
+
+       ip -netns "${rtname}" link set "${RT2HS_DEVNAME}" up
+
+       ip netns exec "${rtname}" \
+               sysctl -wq net.ipv6.conf."${RT2HS_DEVNAME}".proxy_ndp=1
+       ip netns exec "${rtname}" \
+               sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".proxy_arp=1
+
+       # disable the rp_filter otherwise the kernel gets confused about how
+       # to route decap ipv4 packets.
+       ip netns exec "${rtname}" \
+               sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".rp_filter=0
+
+       ip netns exec "${rtname}" sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
+}
+
+setup()
+{
+       local i
+
+       # create routers
+       ROUTERS="1 2 3 4"; readonly ROUTERS
+       for i in ${ROUTERS}; do
+               create_router "${i}"
+       done
+
+       # create hosts
+       HOSTS="1 2 3 4"; readonly HOSTS
+       for i in ${HOSTS}; do
+               create_host "${i}"
+       done
+
+       # set up the links for connecting routers
+       add_link_rt_pairs 1 "2 3 4"
+       add_link_rt_pairs 2 "3 4"
+       add_link_rt_pairs 3 "4"
+
+       # set up the basic connectivity of routers and routes required for
+       # reachability of SIDs.
+       setup_rt_networking 1 "2 3 4"
+       setup_rt_networking 2 "1 3 4"
+       setup_rt_networking 3 "1 2 4"
+       setup_rt_networking 4 "1 2 3"
+
+       # set up the hosts connected to routers
+       setup_hs 1 1
+       setup_hs 2 2
+       setup_hs 3 3
+       setup_hs 4 4
+
+       # set up default SRv6 Endpoints (i.e. SRv6 End and SRv6 End.DT46)
+       setup_rt_local_sids 1 "2 3 4"
+       setup_rt_local_sids 2 "1 3 4"
+       setup_rt_local_sids 3 "1 2 4"
+       setup_rt_local_sids 4 "1 2 3"
+
+       # set up SRv6 policies
+
+       # create an IPv6 VPN between hosts hs-1 and hs-2.
+       # the network path between hs-1 and hs-2 traverses several routers
+       # depending on the direction of traffic.
+       #
+       # Direction hs-1 -> hs-2 (H.Encaps.Red)
+       #  - rt-3,rt-4 (SRv6 End behaviors)
+       #  - rt-2 (SRv6 End.DT46 behavior)
+       #
+       # Direction hs-2 -> hs-1 (H.Encaps.Red)
+       #  - rt-1 (SRv6 End.DT46 behavior)
+       setup_rt_policy_ipv6 2 1 "3 4" 2 encap.red
+       setup_rt_policy_ipv6 1 2 "" 1 encap.red
+
+       # create an IPv4 VPN between hosts hs-1 and hs-2
+       # the network path between hs-1 and hs-2 traverses several routers
+       # depending on the direction of traffic.
+       #
+       # Direction hs-1 -> hs-2 (H.Encaps.Red)
+       # - rt-2 (SRv6 End.DT46 behavior)
+       #
+       # Direction hs-2 -> hs-1 (H.Encaps.Red)
+       #  - rt-4,rt-3 (SRv6 End behaviors)
+       #  - rt-1 (SRv6 End.DT46 behavior)
+       setup_rt_policy_ipv4 2 1 "" 2 encap.red
+       setup_rt_policy_ipv4 1 2 "4 3" 1 encap.red
+
+       # create an IPv6 VPN between hosts hs-3 and hs-4
+       # the network path between hs-3 and hs-4 traverses several routers
+       # depending on the direction of traffic.
+       #
+       # Direction hs-3 -> hs-4 (H.Encaps.Red)
+       # - rt-2 (SRv6 End Behavior)
+       # - rt-4 (SRv6 End.DT46 behavior)
+       #
+       # Direction hs-4 -> hs-3 (H.Encaps.Red)
+       #  - rt-1 (SRv6 End behavior)
+       #  - rt-3 (SRv6 End.DT46 behavior)
+       setup_rt_policy_ipv6 4 3 "2" 4 encap.red
+       setup_rt_policy_ipv6 3 4 "1" 3 encap.red
+
+       # testing environment was set up successfully
+       SETUP_ERR=0
+}
+
+check_rt_connectivity()
+{
+       local rtsrc="$1"
+       local rtdst="$2"
+       local prefix
+       local rtsrc_nsname
+
+       rtsrc_nsname="$(get_rtname "${rtsrc}")"
+
+       prefix="$(get_network_prefix "${rtsrc}" "${rtdst}")"
+
+       ip netns exec "${rtsrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+               "${prefix}::${rtdst}" >/dev/null 2>&1
+}
+
+check_and_log_rt_connectivity()
+{
+       local rtsrc="$1"
+       local rtdst="$2"
+
+       check_rt_connectivity "${rtsrc}" "${rtdst}"
+       log_test $? 0 "Routers connectivity: rt-${rtsrc} -> rt-${rtdst}"
+}
+
+check_hs_ipv6_connectivity()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+       local hssrc_nsname
+
+       hssrc_nsname="$(get_hsname "${hssrc}")"
+
+       ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+               "${IPv6_HS_NETWORK}::${hsdst}" >/dev/null 2>&1
+}
+
+check_hs_ipv4_connectivity()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+       local hssrc_nsname
+
+       hssrc_nsname="$(get_hsname "${hssrc}")"
+
+       ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+               "${IPv4_HS_NETWORK}.${hsdst}" >/dev/null 2>&1
+}
+
+check_and_log_hs2gw_connectivity()
+{
+       local hssrc="$1"
+
+       check_hs_ipv6_connectivity "${hssrc}" 254
+       log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> gw"
+
+       check_hs_ipv4_connectivity "${hssrc}" 254
+       log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> gw"
+}
+
+check_and_log_hs_ipv6_connectivity()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+
+       check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+       log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_ipv4_connectivity()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+
+       check_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+       log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_connectivity()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+
+       check_and_log_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+       check_and_log_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+}
+
+check_and_log_hs_ipv6_isolation()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+
+       # in this case, the connectivity test must fail
+       check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+       log_test $? 1 "IPv6 Hosts isolation: hs-${hssrc} -X-> hs-${hsdst}"
+}
+
+check_and_log_hs_ipv4_isolation()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+
+       # in this case, the connectivity test must fail
+       check_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+       log_test $? 1 "IPv4 Hosts isolation: hs-${hssrc} -X-> hs-${hsdst}"
+}
+
+check_and_log_hs_isolation()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+
+       check_and_log_hs_ipv6_isolation "${hssrc}" "${hsdst}"
+       check_and_log_hs_ipv4_isolation "${hssrc}" "${hsdst}"
+}
+
+router_tests()
+{
+       local i
+       local j
+
+       log_section "IPv6 routers connectivity test"
+
+       for i in ${ROUTERS}; do
+               for j in ${ROUTERS}; do
+                       if [ "${i}" -eq "${j}" ]; then
+                               continue
+                       fi
+
+                       check_and_log_rt_connectivity "${i}" "${j}"
+               done
+       done
+}
+
+host2gateway_tests()
+{
+       local hs
+
+       log_section "IPv4/IPv6 connectivity test among hosts and gateways"
+
+       for hs in ${HOSTS}; do
+               check_and_log_hs2gw_connectivity "${hs}"
+       done
+}
+
+host_vpn_tests()
+{
+       log_section "SRv6 VPN connectivity test hosts (h1 <-> h2, IPv4/IPv6)"
+
+       check_and_log_hs_connectivity 1 2
+       check_and_log_hs_connectivity 2 1
+
+       log_section "SRv6 VPN connectivity test hosts (h3 <-> h4, IPv6 only)"
+
+       check_and_log_hs_ipv6_connectivity 3 4
+       check_and_log_hs_ipv6_connectivity 4 3
+}
+
+host_vpn_isolation_tests()
+{
+       local l1="1 2"
+       local l2="3 4"
+       local tmp
+       local i
+       local j
+       local k
+
+       log_section "SRv6 VPN isolation test among hosts"
+
+       for k in 0 1; do
+               for i in ${l1}; do
+                       for j in ${l2}; do
+                               check_and_log_hs_isolation "${i}" "${j}"
+                       done
+               done
+
+               # let us test the reverse path
+               tmp="${l1}"; l1="${l2}"; l2="${tmp}"
+       done
+
+       log_section "SRv6 VPN isolation test among hosts (h2 <-> h4, IPv4 only)"
+
+       check_and_log_hs_ipv4_isolation 2 4
+       check_and_log_hs_ipv4_isolation 4 2
+}
+
+test_iproute2_supp_or_ksft_skip()
+{
+       if ! ip route help 2>&1 | grep -qo "encap.red"; then
+               echo "SKIP: Missing SRv6 encap.red support in iproute2"
+               exit "${ksft_skip}"
+       fi
+}
+
+test_vrf_or_ksft_skip()
+{
+       modprobe vrf &>/dev/null || true
+       if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
+               echo "SKIP: vrf sysctl does not exist"
+               exit "${ksft_skip}"
+       fi
+}
+
+if [ "$(id -u)" -ne 0 ]; then
+       echo "SKIP: Need root privileges"
+       exit "${ksft_skip}"
+fi
+
+# required programs to carry out this selftest
+test_command_or_ksft_skip ip
+test_command_or_ksft_skip ping
+test_command_or_ksft_skip sysctl
+test_command_or_ksft_skip grep
+
+test_iproute2_supp_or_ksft_skip
+test_vrf_or_ksft_skip
+
+set -e
+trap cleanup EXIT
+
+setup
+set +e
+
+router_tests
+host2gateway_tests
+host_vpn_tests
+host_vpn_isolation_tests
+
+print_log_test_results
diff --git a/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh b/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh
new file mode 100755 (executable)
index 0000000..cb4177d
--- /dev/null
@@ -0,0 +1,821 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <andrea.mayer@uniroma2.it>
+#
+# This script is designed for testing the SRv6 H.L2Encaps.Red behavior.
+#
+# Below is depicted the IPv6 network of an operator which offers L2 VPN
+# services to hosts, enabling them to communicate with each other.
+# In this example, hosts hs-1 and hs-2 are connected through an L2 VPN service.
+# Currently, the SRv6 subsystem in Linux allows hosts hs-1 and hs-2 to exchange
+# full L2 frames as long as they carry IPv4/IPv6.
+#
+# Routers rt-1,rt-2,rt-3 and rt-4 implement L2 VPN services
+# leveraging the SRv6 architecture. The key components for such VPNs are:
+#
+#   i) The SRv6 H.L2Encaps.Red behavior applies SRv6 Policies on traffic
+#      received by connected hosts, initiating the VPN tunnel. Such a behavior
+#      is an optimization of the SRv6 H.L2Encap aiming to reduce the
+#      length of the SID List carried in the pushed SRH. Specifically, the
+#      H.L2Encaps.Red removes the first SID contained in the SID List (i.e. SRv6
+#      Policy) by storing it into the IPv6 Destination Address. When a SRv6
+#      Policy is made of only one SID, the SRv6 H.L2Encaps.Red behavior omits
+#      the SRH at all and pushes that SID directly into the IPv6 DA;
+#
+#  ii) The SRv6 End behavior advances the active SID in the SID List
+#      carried by the SRH;
+#
+# iii) The SRv6 End.DX2 behavior is used for removing the SRv6 Policy
+#      and, thus, it terminates the VPN tunnel. The decapsulated L2 frame is
+#      sent over the interface connected with the destination host.
+#
+#               cafe::1                      cafe::2
+#              10.0.0.1                     10.0.0.2
+#             +--------+                   +--------+
+#             |        |                   |        |
+#             |  hs-1  |                   |  hs-2  |
+#             |        |                   |        |
+#             +---+----+                   +--- +---+
+#    cafe::/64    |                             |      cafe::/64
+#  10.0.0.0/24    |                             |    10.0.0.0/24
+#             +---+----+                   +----+---+
+#             |        |  fcf0:0:1:2::/64  |        |
+#             |  rt-1  +-------------------+  rt-2  |
+#             |        |                   |        |
+#             +---+----+                   +----+---+
+#                 |      .               .      |
+#                 |  fcf0:0:1:3::/64   .        |
+#                 |          .       .          |
+#                 |            .   .            |
+# fcf0:0:1:4::/64 |              .              | fcf0:0:2:3::/64
+#                 |            .   .            |
+#                 |          .       .          |
+#                 |  fcf0:0:2:4::/64   .        |
+#                 |      .               .      |
+#             +---+----+                   +----+---+
+#             |        |                   |        |
+#             |  rt-4  +-------------------+  rt-3  |
+#             |        |  fcf0:0:3:4::/64  |        |
+#             +---+----+                   +----+---+
+#
+#
+# Every fcf0:0:x:y::/64 network interconnects the SRv6 routers rt-x with rt-y
+# in the IPv6 operator network.
+#
+# Local SID table
+# ===============
+#
+# Each SRv6 router is configured with a Local SID table in which SIDs are
+# stored. Considering the given SRv6 router rt-x, at least two SIDs are
+# configured in the Local SID table:
+#
+#   Local SID table for SRv6 router rt-x
+#   +----------------------------------------------------------+
+#   |fcff:x::e is associated with the SRv6 End behavior        |
+#   |fcff:x::d2 is associated with the SRv6 End.DX2 behavior   |
+#   +----------------------------------------------------------+
+#
+# The fcff::/16 prefix is reserved by the operator for implementing SRv6 VPN
+# services. Reachability of SIDs is ensured by proper configuration of the IPv6
+# operator's network and SRv6 routers.
+#
+# SRv6 Policies
+# =============
+#
+# An SRv6 ingress router applies SRv6 policies to the traffic received from a
+# connected host. SRv6 policy enforcement consists of encapsulating the
+# received traffic into a new IPv6 packet with a given SID List contained in
+# the SRH.
+#
+# L2 VPN between hs-1 and hs-2
+# ----------------------------
+#
+# Hosts hs-1 and hs-2 are connected using a dedicated L2 VPN.
+# Specifically, packets generated from hs-1 and directed towards hs-2 are
+# handled by rt-1 which applies the following SRv6 Policies:
+#
+#   i.a) L2 traffic, SID List=fcff:2::d2
+#
+# Policy (i.a) steers tunneled L2 traffic through SRv6 router rt-2.
+# The H.L2Encaps.Red omits the presence of SRH at all, since the SID List
+# consists of only one SID (fcff:2::d2) that can be stored directly in the IPv6
+# DA.
+#
+# On the reverse path (i.e. from hs-2 to hs-1), rt-2 applies the following
+# policies:
+#
+#   i.b) L2 traffic, SID List=fcff:4::e,fcff:3::e,fcff:1::d2
+#
+# Policy (i.b) steers tunneled L2 traffic through the SRv6 routers
+# rt-4,rt-3,rt2. The H.L2Encaps.Red reduces the SID List in the SRH by removing
+# the first SID (fcff:4::e) and pushing it into the IPv6 DA.
+#
+# In summary:
+#  hs-1->hs-2 |IPv6 DA=fcff:2::d2|eth|...|                              (i.a)
+#  hs-2->hs-1 |IPv6 DA=fcff:4::e|SRH SIDs=fcff:3::e,fcff:1::d2|eth|...| (i.b)
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+readonly ksft_skip=4
+
+readonly RDMSUFF="$(mktemp -u XXXXXXXX)"
+readonly DUMMY_DEVNAME="dum0"
+readonly RT2HS_DEVNAME="veth-hs"
+readonly HS_VETH_NAME="veth0"
+readonly LOCALSID_TABLE_ID=90
+readonly IPv6_RT_NETWORK=fcf0:0
+readonly IPv6_HS_NETWORK=cafe
+readonly IPv4_HS_NETWORK=10.0.0
+readonly VPN_LOCATOR_SERVICE=fcff
+readonly MAC_PREFIX=00:00:00:c0:01
+readonly END_FUNC=000e
+readonly DX2_FUNC=00d2
+
+PING_TIMEOUT_SEC=4
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+# IDs of routers and hosts are initialized during the setup of the testing
+# network
+ROUTERS=''
+HOSTS=''
+
+SETUP_ERR=1
+
+ret=${ksft_skip}
+nsuccess=0
+nfail=0
+
+log_test()
+{
+       local rc="$1"
+       local expected="$2"
+       local msg="$3"
+
+       if [ "${rc}" -eq "${expected}" ]; then
+               nsuccess=$((nsuccess+1))
+               printf "\n    TEST: %-60s  [ OK ]\n" "${msg}"
+       else
+               ret=1
+               nfail=$((nfail+1))
+               printf "\n    TEST: %-60s  [FAIL]\n" "${msg}"
+               if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+                       echo
+                       echo "hit enter to continue, 'q' to quit"
+                       read a
+                       [ "$a" = "q" ] && exit 1
+               fi
+       fi
+}
+
+print_log_test_results()
+{
+       printf "\nTests passed: %3d\n" "${nsuccess}"
+       printf "Tests failed: %3d\n"   "${nfail}"
+
+       # when a test fails, the value of 'ret' is set to 1 (error code).
+       # Conversely, when all tests are passed successfully, the 'ret' value
+       # is set to 0 (success code).
+       if [ "${ret}" -ne 1 ]; then
+               ret=0
+       fi
+}
+
+log_section()
+{
+       echo
+       echo "################################################################################"
+       echo "TEST SECTION: $*"
+       echo "################################################################################"
+}
+
+test_command_or_ksft_skip()
+{
+       local cmd="$1"
+
+       if [ ! -x "$(command -v "${cmd}")" ]; then
+               echo "SKIP: Could not run test without \"${cmd}\" tool";
+               exit "${ksft_skip}"
+       fi
+}
+
+get_nodename()
+{
+       local name="$1"
+
+       echo "${name}-${RDMSUFF}"
+}
+
+get_rtname()
+{
+       local rtid="$1"
+
+       get_nodename "rt-${rtid}"
+}
+
+get_hsname()
+{
+       local hsid="$1"
+
+       get_nodename "hs-${hsid}"
+}
+
+__create_namespace()
+{
+       local name="$1"
+
+       ip netns add "${name}"
+}
+
+create_router()
+{
+       local rtid="$1"
+       local nsname
+
+       nsname="$(get_rtname "${rtid}")"
+
+       __create_namespace "${nsname}"
+}
+
+create_host()
+{
+       local hsid="$1"
+       local nsname
+
+       nsname="$(get_hsname "${hsid}")"
+
+       __create_namespace "${nsname}"
+}
+
+cleanup()
+{
+       local nsname
+       local i
+
+       # destroy routers
+       for i in ${ROUTERS}; do
+               nsname="$(get_rtname "${i}")"
+
+               ip netns del "${nsname}" &>/dev/null || true
+       done
+
+       # destroy hosts
+       for i in ${HOSTS}; do
+               nsname="$(get_hsname "${i}")"
+
+               ip netns del "${nsname}" &>/dev/null || true
+       done
+
+       # check whether the setup phase was completed successfully or not. In
+       # case of an error during the setup phase of the testing environment,
+       # the selftest is considered as "skipped".
+       if [ "${SETUP_ERR}" -ne 0 ]; then
+               echo "SKIP: Setting up the testing environment failed"
+               exit "${ksft_skip}"
+       fi
+
+       exit "${ret}"
+}
+
+add_link_rt_pairs()
+{
+       local rt="$1"
+       local rt_neighs="$2"
+       local neigh
+       local nsname
+       local neigh_nsname
+
+       nsname="$(get_rtname "${rt}")"
+
+       for neigh in ${rt_neighs}; do
+               neigh_nsname="$(get_rtname "${neigh}")"
+
+               ip link add "veth-rt-${rt}-${neigh}" netns "${nsname}" \
+                       type veth peer name "veth-rt-${neigh}-${rt}" \
+                       netns "${neigh_nsname}"
+       done
+}
+
+get_network_prefix()
+{
+       local rt="$1"
+       local neigh="$2"
+       local p="${rt}"
+       local q="${neigh}"
+
+       if [ "${p}" -gt "${q}" ]; then
+               p="${q}"; q="${rt}"
+       fi
+
+       echo "${IPv6_RT_NETWORK}:${p}:${q}"
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+       local rt="$1"
+       local rt_neighs="$2"
+       local nsname
+       local net_prefix
+       local devname
+       local neigh
+
+       nsname="$(get_rtname "${rt}")"
+
+       for neigh in ${rt_neighs}; do
+               devname="veth-rt-${rt}-${neigh}"
+
+               net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+               ip -netns "${nsname}" addr \
+                       add "${net_prefix}::${rt}/64" dev "${devname}" nodad
+
+               ip -netns "${nsname}" link set "${devname}" up
+       done
+
+       ip -netns "${nsname}" link add "${DUMMY_DEVNAME}" type dummy
+
+       ip -netns "${nsname}" link set "${DUMMY_DEVNAME}" up
+       ip -netns "${nsname}" link set lo up
+
+       ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+       ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+       ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.forwarding=1
+
+       ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.default.rp_filter=0
+       ip netns exec "${nsname}" sysctl -wq net.ipv4.ip_forward=1
+}
+
+# Setup local SIDs for an SRv6 router
+setup_rt_local_sids()
+{
+       local rt="$1"
+       local rt_neighs="$2"
+       local net_prefix
+       local devname
+       local nsname
+       local neigh
+
+       nsname="$(get_rtname "${rt}")"
+
+       for neigh in ${rt_neighs}; do
+               devname="veth-rt-${rt}-${neigh}"
+
+               net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+               # set underlay network routes for SIDs reachability
+               ip -netns "${nsname}" -6 route \
+                       add "${VPN_LOCATOR_SERVICE}:${neigh}::/32" \
+                       table "${LOCALSID_TABLE_ID}" \
+                       via "${net_prefix}::${neigh}" dev "${devname}"
+       done
+
+       # Local End behavior (note that dev "${DUMMY_DEVNAME}" is a dummy
+       # interface)
+       ip -netns "${nsname}" -6 route \
+               add "${VPN_LOCATOR_SERVICE}:${rt}::${END_FUNC}" \
+               table "${LOCALSID_TABLE_ID}" \
+               encap seg6local action End dev "${DUMMY_DEVNAME}"
+
+       # all SIDs for VPNs start with a common locator. Routes and SRv6
+       # Endpoint behaviors instaces are grouped together in the 'localsid'
+       # table.
+       ip -netns "${nsname}" -6 rule add \
+               to "${VPN_LOCATOR_SERVICE}::/16" \
+               lookup "${LOCALSID_TABLE_ID}" prio 999
+}
+
+# build and install the SRv6 policy into the ingress SRv6 router.
+# args:
+#  $1 - destination host (i.e. cafe::x host)
+#  $2 - SRv6 router configured for enforcing the SRv6 Policy
+#  $3 - SRv6 routers configured for steering traffic (End behaviors)
+#  $4 - SRv6 router configured for removing the SRv6 Policy (router connected
+#       to the destination host)
+#  $5 - encap mode (full or red)
+#  $6 - traffic type (IPv6 or IPv4)
+__setup_rt_policy()
+{
+       local dst="$1"
+       local encap_rt="$2"
+       local end_rts="$3"
+       local dec_rt="$4"
+       local mode="$5"
+       local traffic="$6"
+       local nsname
+       local policy=''
+       local n
+
+       nsname="$(get_rtname "${encap_rt}")"
+
+       for n in ${end_rts}; do
+               policy="${policy}${VPN_LOCATOR_SERVICE}:${n}::${END_FUNC},"
+       done
+
+       policy="${policy}${VPN_LOCATOR_SERVICE}:${dec_rt}::${DX2_FUNC}"
+
+       # add SRv6 policy to incoming traffic sent by connected hosts
+       if [ "${traffic}" -eq 6 ]; then
+               ip -netns "${nsname}" -6 route \
+                       add "${IPv6_HS_NETWORK}::${dst}" \
+                       encap seg6 mode "${mode}" segs "${policy}" \
+                       dev dum0
+       else
+               ip -netns "${nsname}" -4 route \
+                       add "${IPv4_HS_NETWORK}.${dst}" \
+                       encap seg6 mode "${mode}" segs "${policy}" \
+                       dev dum0
+       fi
+}
+
+# see __setup_rt_policy
+setup_rt_policy_ipv6()
+{
+       __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 6
+}
+
+#see __setup_rt_policy
+setup_rt_policy_ipv4()
+{
+       __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 4
+}
+
+setup_decap()
+{
+       local rt="$1"
+       local nsname
+
+       nsname="$(get_rtname "${rt}")"
+
+       # Local End.DX2 behavior
+       ip -netns "${nsname}" -6 route \
+               add "${VPN_LOCATOR_SERVICE}:${rt}::${DX2_FUNC}" \
+               table "${LOCALSID_TABLE_ID}" \
+               encap seg6local action End.DX2 oif "${RT2HS_DEVNAME}" \
+               dev "${RT2HS_DEVNAME}"
+}
+
+setup_hs()
+{
+       local hs="$1"
+       local rt="$2"
+       local hsname
+       local rtname
+
+       hsname="$(get_hsname "${hs}")"
+       rtname="$(get_rtname "${rt}")"
+
+       ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+       ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+       ip -netns "${hsname}" link add "${HS_VETH_NAME}" type veth \
+               peer name "${RT2HS_DEVNAME}" netns "${rtname}"
+
+       ip -netns "${hsname}" addr add "${IPv6_HS_NETWORK}::${hs}/64" \
+               dev "${HS_VETH_NAME}" nodad
+       ip -netns "${hsname}" addr add "${IPv4_HS_NETWORK}.${hs}/24" \
+               dev "${HS_VETH_NAME}"
+
+       ip -netns "${hsname}" link set "${HS_VETH_NAME}" up
+       ip -netns "${hsname}" link set lo up
+
+       ip -netns "${rtname}" addr add "${IPv6_HS_NETWORK}::254/64" \
+               dev "${RT2HS_DEVNAME}" nodad
+       ip -netns "${rtname}" addr \
+               add "${IPv4_HS_NETWORK}.254/24" dev "${RT2HS_DEVNAME}"
+
+       ip -netns "${rtname}" link set "${RT2HS_DEVNAME}" up
+
+       # disable the rp_filter otherwise the kernel gets confused about how
+       # to route decap ipv4 packets.
+       ip netns exec "${rtname}" \
+               sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".rp_filter=0
+}
+
+# set an auto-generated mac address
+# args:
+#  $1 - name of the node (e.g.: hs-1, rt-3, etc)
+#  $2 - id of the node (e.g.: 1 for hs-1, 3 for rt-3, etc)
+#  $3 - host part of the IPv6 network address
+#  $4 - name of the network interface to which the generated mac address must
+#       be set.
+set_mac_address()
+{
+       local nodename="$1"
+       local nodeid="$2"
+       local host="$3"
+       local ifname="$4"
+       local nsname
+
+       nsname=$(get_nodename "${nodename}")
+
+       ip -netns "${nsname}" link set dev "${ifname}" down
+
+       ip -netns "${nsname}" link set address "${MAC_PREFIX}:${nodeid}" \
+               dev "${ifname}"
+
+       # the IPv6 address must be set once again after the MAC address has
+       # been changed.
+       ip -netns "${nsname}" addr add "${IPv6_HS_NETWORK}::${host}/64" \
+               dev "${ifname}" nodad
+
+       ip -netns "${nsname}" link set dev "${ifname}" up
+}
+
+set_host_l2peer()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+       local ipprefix="$3"
+       local proto="$4"
+       local hssrc_name
+       local ipaddr
+
+       hssrc_name="$(get_hsname "${hssrc}")"
+
+       if [ "${proto}" -eq 6 ]; then
+               ipaddr="${ipprefix}::${hsdst}"
+       else
+               ipaddr="${ipprefix}.${hsdst}"
+       fi
+
+       ip -netns "${hssrc_name}" route add "${ipaddr}" dev "${HS_VETH_NAME}"
+
+       ip -netns "${hssrc_name}" neigh \
+               add "${ipaddr}" lladdr "${MAC_PREFIX}:${hsdst}" \
+               dev "${HS_VETH_NAME}"
+}
+
+# setup an SRv6 L2 VPN between host hs-x and hs-y (currently, the SRv6
+# subsystem only supports L2 frames whose layer-3 is IPv4/IPv6).
+# args:
+#  $1 - source host
+#  $2 - SRv6 routers configured for steering tunneled traffic
+#  $3 - destination host
+setup_l2vpn()
+{
+       local hssrc="$1"
+       local end_rts="$2"
+       local hsdst="$3"
+       local rtsrc="${hssrc}"
+       local rtdst="${hsdst}"
+
+       # set fixed mac for source node and the neigh MAC address
+       set_mac_address "hs-${hssrc}" "${hssrc}" "${hssrc}" "${HS_VETH_NAME}"
+       set_host_l2peer "${hssrc}" "${hsdst}" "${IPv6_HS_NETWORK}" 6
+       set_host_l2peer "${hssrc}" "${hsdst}" "${IPv4_HS_NETWORK}" 4
+
+       # we have to set the mac address of the veth-host (on ingress router)
+       # to the mac address of the remote peer (L2 VPN destination host).
+       # Otherwise, traffic coming from the source host is dropped at the
+       # ingress router.
+       set_mac_address "rt-${rtsrc}" "${hsdst}" 254 "${RT2HS_DEVNAME}"
+
+       # set the SRv6 Policies at the ingress router
+       setup_rt_policy_ipv6 "${hsdst}" "${rtsrc}" "${end_rts}" "${rtdst}" \
+               l2encap.red 6
+       setup_rt_policy_ipv4 "${hsdst}" "${rtsrc}" "${end_rts}" "${rtdst}" \
+               l2encap.red 4
+
+       # set the decap behavior
+       setup_decap "${rtsrc}"
+}
+
+setup()
+{
+       local i
+
+       # create routers
+       ROUTERS="1 2 3 4"; readonly ROUTERS
+       for i in ${ROUTERS}; do
+               create_router "${i}"
+       done
+
+       # create hosts
+       HOSTS="1 2"; readonly HOSTS
+       for i in ${HOSTS}; do
+               create_host "${i}"
+       done
+
+       # set up the links for connecting routers
+       add_link_rt_pairs 1 "2 3 4"
+       add_link_rt_pairs 2 "3 4"
+       add_link_rt_pairs 3 "4"
+
+       # set up the basic connectivity of routers and routes required for
+       # reachability of SIDs.
+       setup_rt_networking 1 "2 3 4"
+       setup_rt_networking 2 "1 3 4"
+       setup_rt_networking 3 "1 2 4"
+       setup_rt_networking 4 "1 2 3"
+
+       # set up the hosts connected to routers
+       setup_hs 1 1
+       setup_hs 2 2
+
+       # set up default SRv6 Endpoints (i.e. SRv6 End and SRv6 End.DX2)
+       setup_rt_local_sids 1 "2 3 4"
+       setup_rt_local_sids 2 "1 3 4"
+       setup_rt_local_sids 3 "1 2 4"
+       setup_rt_local_sids 4 "1 2 3"
+
+       # create a L2 VPN between hs-1 and hs-2.
+       # NB: currently, H.L2Encap* enables tunneling of L2 frames whose
+       # layer-3 is IPv4/IPv6.
+       #
+       # the network path between hs-1 and hs-2 traverses several routers
+       # depending on the direction of traffic.
+       #
+       # Direction hs-1 -> hs-2 (H.L2Encaps.Red)
+       # - rt-2 (SRv6 End.DX2 behavior)
+       #
+       # Direction hs-2 -> hs-1 (H.L2Encaps.Red)
+       #  - rt-4,rt-3 (SRv6 End behaviors)
+       #  - rt-1 (SRv6 End.DX2 behavior)
+       setup_l2vpn 1 "" 2
+       setup_l2vpn 2 "4 3" 1
+
+       # testing environment was set up successfully
+       SETUP_ERR=0
+}
+
+check_rt_connectivity()
+{
+       local rtsrc="$1"
+       local rtdst="$2"
+       local prefix
+       local rtsrc_nsname
+
+       rtsrc_nsname="$(get_rtname "${rtsrc}")"
+
+       prefix="$(get_network_prefix "${rtsrc}" "${rtdst}")"
+
+       ip netns exec "${rtsrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+               "${prefix}::${rtdst}" >/dev/null 2>&1
+}
+
+check_and_log_rt_connectivity()
+{
+       local rtsrc="$1"
+       local rtdst="$2"
+
+       check_rt_connectivity "${rtsrc}" "${rtdst}"
+       log_test $? 0 "Routers connectivity: rt-${rtsrc} -> rt-${rtdst}"
+}
+
+check_hs_ipv6_connectivity()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+       local hssrc_nsname
+
+       hssrc_nsname="$(get_hsname "${hssrc}")"
+
+       ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+               "${IPv6_HS_NETWORK}::${hsdst}" >/dev/null 2>&1
+}
+
+check_hs_ipv4_connectivity()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+       local hssrc_nsname
+
+       hssrc_nsname="$(get_hsname "${hssrc}")"
+
+       ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+               "${IPv4_HS_NETWORK}.${hsdst}" >/dev/null 2>&1
+}
+
+check_and_log_hs2gw_connectivity()
+{
+       local hssrc="$1"
+
+       check_hs_ipv6_connectivity "${hssrc}" 254
+       log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> gw"
+
+       check_hs_ipv4_connectivity "${hssrc}" 254
+       log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> gw"
+}
+
+check_and_log_hs_ipv6_connectivity()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+
+       check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+       log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_ipv4_connectivity()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+
+       check_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+       log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_connectivity()
+{
+       local hssrc="$1"
+       local hsdst="$2"
+
+       check_and_log_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+       check_and_log_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+}
+
+router_tests()
+{
+       local i
+       local j
+
+       log_section "IPv6 routers connectivity test"
+
+       for i in ${ROUTERS}; do
+               for j in ${ROUTERS}; do
+                       if [ "${i}" -eq "${j}" ]; then
+                               continue
+                       fi
+
+                       check_and_log_rt_connectivity "${i}" "${j}"
+               done
+       done
+}
+
+host2gateway_tests()
+{
+       local hs
+
+       log_section "IPv4/IPv6 connectivity test among hosts and gateways"
+
+       for hs in ${HOSTS}; do
+               check_and_log_hs2gw_connectivity "${hs}"
+       done
+}
+
+host_vpn_tests()
+{
+       log_section "SRv6 L2 VPN connectivity test hosts (h1 <-> h2)"
+
+       check_and_log_hs_connectivity 1 2
+       check_and_log_hs_connectivity 2 1
+}
+
+test_dummy_dev_or_ksft_skip()
+{
+       local test_netns
+
+       test_netns="dummy-$(mktemp -u XXXXXXXX)"
+
+       if ! ip netns add "${test_netns}"; then
+               echo "SKIP: Cannot set up netns for testing dummy dev support"
+               exit "${ksft_skip}"
+       fi
+
+       modprobe dummy &>/dev/null || true
+       if ! ip -netns "${test_netns}" link \
+               add "${DUMMY_DEVNAME}" type dummy; then
+               echo "SKIP: dummy dev not supported"
+
+               ip netns del "${test_netns}"
+               exit "${ksft_skip}"
+       fi
+
+       ip netns del "${test_netns}"
+}
+
+test_iproute2_supp_or_ksft_skip()
+{
+       if ! ip route help 2>&1 | grep -qo "l2encap.red"; then
+               echo "SKIP: Missing SRv6 l2encap.red support in iproute2"
+               exit "${ksft_skip}"
+       fi
+}
+
+if [ "$(id -u)" -ne 0 ]; then
+       echo "SKIP: Need root privileges"
+       exit "${ksft_skip}"
+fi
+
+# required programs to carry out this selftest
+test_command_or_ksft_skip ip
+test_command_or_ksft_skip ping
+test_command_or_ksft_skip sysctl
+test_command_or_ksft_skip grep
+
+test_iproute2_supp_or_ksft_skip
+test_dummy_dev_or_ksft_skip
+
+set -e
+trap cleanup EXIT
+
+setup
+set +e
+
+router_tests
+host2gateway_tests
+host_vpn_tests
+
+print_log_test_results
index 4ecbac1..2cbb127 100644 (file)
@@ -644,12 +644,14 @@ TEST_F(tls, splice_from_pipe2)
        int p2[2];
        int p[2];
 
+       memrnd(mem_send, sizeof(mem_send));
+
        ASSERT_GE(pipe(p), 0);
        ASSERT_GE(pipe(p2), 0);
-       EXPECT_GE(write(p[1], mem_send, 8000), 0);
-       EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0);
-       EXPECT_GE(write(p2[1], mem_send + 8000, 8000), 0);
-       EXPECT_GE(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 0);
+       EXPECT_EQ(write(p[1], mem_send, 8000), 8000);
+       EXPECT_EQ(splice(p[0], NULL, self->fd, NULL, 8000, 0), 8000);
+       EXPECT_EQ(write(p2[1], mem_send + 8000, 8000), 8000);
+       EXPECT_EQ(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 8000);
        EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
        EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
 }
@@ -683,10 +685,12 @@ TEST_F(tls, splice_to_pipe)
        char mem_recv[TLS_PAYLOAD_MAX_LEN];
        int p[2];
 
+       memrnd(mem_send, sizeof(mem_send));
+
        ASSERT_GE(pipe(p), 0);
-       EXPECT_GE(send(self->fd, mem_send, send_len, 0), 0);
-       EXPECT_GE(splice(self->cfd, NULL, p[1], NULL, send_len, 0), 0);
-       EXPECT_GE(read(p[0], mem_recv, send_len), 0);
+       EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len);
+       EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), send_len);
+       EXPECT_EQ(read(p[0], mem_recv, send_len), send_len);
        EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
 }
 
@@ -875,6 +879,8 @@ TEST_F(tls, multiple_send_single_recv)
        char recv_mem[2 * 10];
        char send_mem[10];
 
+       memrnd(send_mem, sizeof(send_mem));
+
        EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
        EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
        memset(recv_mem, 0, total_len);
@@ -891,6 +897,8 @@ TEST_F(tls, single_send_multiple_recv_non_align)
        char recv_mem[recv_len * 2];
        char send_mem[total_len];
 
+       memrnd(send_mem, sizeof(send_mem));
+
        EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
        memset(recv_mem, 0, total_len);
 
@@ -936,10 +944,10 @@ TEST_F(tls, recv_peek)
        char buf[15];
 
        EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
-       EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1);
+       EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_PEEK), send_len);
        EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
        memset(buf, 0, sizeof(buf));
-       EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+       EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
        EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
 }