Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
authorJakub Kicinski <kuba@kernel.org>
Tue, 22 Mar 2022 17:36:56 +0000 (10:36 -0700)
committerJakub Kicinski <kuba@kernel.org>
Tue, 22 Mar 2022 18:18:49 +0000 (11:18 -0700)
Alexei Starovoitov says:

====================
pull-request: bpf-next 2022-03-21 v2

We've added 137 non-merge commits during the last 17 day(s) which contain
a total of 143 files changed, 7123 insertions(+), 1092 deletions(-).

The main changes are:

1) Custom SEC() handling in libbpf, from Andrii.

2) subskeleton support, from Delyan.

3) Use btf_tag to recognize __percpu pointers in the verifier, from Hao.

4) Fix net.core.bpf_jit_harden race, from Hou.

5) Fix bpf_sk_lookup remote_port on big-endian, from Jakub.

6) Introduce fprobe (multi kprobe) _without_ arch bits, from Masami.
The arch specific bits will come later.

7) Introduce multi_kprobe bpf programs on top of fprobe, from Jiri.

8) Enable non-atomic allocations in local storage, from Joanne.

9) Various var_off ptr_to_btf_id fixed, from Kumar.

10) bpf_ima_file_hash helper, from Roberto.

11) Add "live packet" mode for XDP in BPF_PROG_RUN, from Toke.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (137 commits)
  selftests/bpf: Fix kprobe_multi test.
  Revert "rethook: x86: Add rethook x86 implementation"
  Revert "arm64: rethook: Add arm64 rethook implementation"
  Revert "powerpc: Add rethook support"
  Revert "ARM: rethook: Add rethook arm implementation"
  bpftool: Fix a bug in subskeleton code generation
  bpf: Fix bpf_prog_pack when PMU_SIZE is not defined
  bpf: Fix bpf_prog_pack for multi-node setup
  bpf: Fix warning for cast from restricted gfp_t in verifier
  bpf, arm: Fix various typos in comments
  libbpf: Close fd in bpf_object__reuse_map
  bpftool: Fix print error when show bpf map
  bpf: Fix kprobe_multi return probe backtrace
  Revert "bpf: Add support to inline bpf_get_func_ip helper on x86"
  bpf: Simplify check in btf_parse_hdr()
  selftests/bpf/test_lirc_mode2.sh: Exit with proper code
  bpf: Check for NULL return from bpf_get_btf_vmlinux
  selftests/bpf: Test skipping stacktrace
  bpf: Adjust BPF stack helper functions to accommodate skip > 0
  bpf: Select proper size for bpf_prog_pack
  ...
====================

Link: https://lore.kernel.org/r/20220322050159.5507-1-alexei.starovoitov@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1151 files changed:
.mailmap
Documentation/ABI/testing/sysfs-timecard
Documentation/admin-guide/hw-vuln/spectre.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/mm/pagemap.rst
Documentation/core-api/dma-attributes.rst
Documentation/devicetree/bindings/arm/qcom.yaml
Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
Documentation/devicetree/bindings/mfd/brcm,cru.yaml
Documentation/devicetree/bindings/mfd/cirrus,lochnagar.yaml
Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
Documentation/devicetree/bindings/net/can/xilinx,can.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/can/xilinx_can.txt [deleted file]
Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
Documentation/devicetree/bindings/net/mediatek-dwmac.txt [deleted file]
Documentation/devicetree/bindings/net/mediatek-dwmac.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/mscc-miim.txt
Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
Documentation/devicetree/bindings/phy/fsl,lynx-28g.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml
Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
Documentation/networking/devlink/index.rst
Documentation/networking/ip-sysctl.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
arch/arm/boot/dts/bcm2711.dtsi
arch/arm/boot/dts/tegra124-nyan-big-fhd.dts
arch/arm/include/asm/assembler.h
arch/arm/include/asm/spectre.h [new file with mode: 0644]
arch/arm/include/asm/vmlinux.lds.h
arch/arm/kernel/Makefile
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/spectre.c [new file with mode: 0644]
arch/arm/kernel/traps.c
arch/arm/mach-mstar/Kconfig
arch/arm/mm/Kconfig
arch/arm/mm/proc-v7-bugs.c
arch/arm64/Kconfig
arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
arch/arm64/boot/dts/marvell/armada-37xx.dtsi
arch/arm64/boot/dts/mediatek/mt2712-evb.dts
arch/arm64/boot/dts/mediatek/mt2712e.dtsi
arch/arm64/boot/dts/nvidia/tegra194.dtsi
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
arch/arm64/boot/dts/qcom/sm8350.dtsi
arch/arm64/boot/dts/qcom/sm8450.dtsi
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/fixmap.h
arch/arm64/include/asm/insn.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/mte-kasan.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/rwonce.h
arch/arm64/include/asm/sections.h
arch/arm64/include/asm/spectre.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/vectors.h [new file with mode: 0644]
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/image-vars.h
arch/arm64/kernel/proton-pack.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/nvhe/mm.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/hypercalls.c
arch/arm64/kvm/psci.c
arch/arm64/mm/mmap.c
arch/arm64/mm/mmu.c
arch/arm64/tools/cpucaps
arch/powerpc/include/asm/book3s/64/mmu.h
arch/powerpc/include/asm/checksum.h
arch/powerpc/include/asm/kexec_ranges.h
arch/powerpc/include/asm/nmi.h
arch/riscv/Kconfig.erratas
arch/riscv/Kconfig.socs
arch/riscv/boot/dts/canaan/k210.dtsi
arch/riscv/include/asm/page.h
arch/riscv/include/asm/pgtable.h
arch/riscv/kernel/module.c
arch/riscv/mm/Makefile
arch/riscv/mm/init.c
arch/riscv/mm/kasan_init.c
arch/riscv/mm/physaddr.c
arch/s390/include/asm/extable.h
arch/s390/include/asm/ftrace.h
arch/s390/include/asm/ptrace.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/mcount.S
arch/s390/kernel/setup.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/sgx/encl.c
arch/x86/kernel/e820.c
arch/x86/kernel/kdebugfs.c
arch/x86/kernel/ksysfs.c
arch/x86/kernel/kvm.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/module.c
arch/x86/kernel/setup.c
arch/x86/kernel/traps.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/x86.c
arch/x86/lib/retpoline.S
arch/x86/mm/ioremap.c
arch/x86/net/bpf_jit_comp.c
block/blk-mq.c
drivers/acpi/scan.c
drivers/atm/eni.c
drivers/bcma/driver_chipcommon.c
drivers/bcma/driver_chipcommon_pmu.c
drivers/bcma/driver_gpio.c
drivers/bcma/driver_pci_host.c
drivers/bcma/main.c
drivers/bcma/sprom.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/bluetooth/Kconfig
drivers/bluetooth/ath3k.c
drivers/bluetooth/bcm203x.c
drivers/bluetooth/btmtk.c
drivers/bluetooth/btmtk.h
drivers/bluetooth/btmtksdio.c
drivers/bluetooth/btmtkuart.c
drivers/bluetooth/btrtl.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_h5.c
drivers/char/virtio_console.c
drivers/clk/Kconfig
drivers/clk/qcom/dispcc-sc7180.c
drivers/clk/qcom/dispcc-sc7280.c
drivers/clk/qcom/dispcc-sm8250.c
drivers/clk/qcom/gdsc.c
drivers/clk/qcom/gdsc.h
drivers/crypto/qcom-rng.c
drivers/firmware/efi/apple-properties.c
drivers/firmware/efi/efi.c
drivers/gpio/gpio-sim.c
drivers/gpio/gpio-tegra186.c
drivers/gpio/gpio-ts4900.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/arm/Kconfig
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_pch.c
drivers/gpu/drm/i915/intel_pch.h
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/sun4i/sun8i_mixer.h
drivers/hid/hid-debug.c
drivers/hid/hid-elo.c
drivers/hid/hid-input.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-nintendo.c
drivers/hid/hid-thrustmaster.c
drivers/hid/hid-vivaldi.c
drivers/infiniband/hw/mlx5/cong.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/input/keyboard/Kconfig
drivers/input/mouse/elan_i2c_core.c
drivers/input/touchscreen/goodix.c
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/amd_iommu_types.h
drivers/iommu/amd/init.c
drivers/iommu/amd/io_pgtable.c
drivers/iommu/amd/iommu.c
drivers/iommu/intel/iommu.c
drivers/iommu/tegra-smmu.c
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/isdn/hardware/mISDN/mISDNipac.c
drivers/isdn/hardware/mISDN/mISDNisar.c
drivers/isdn/mISDN/dsp_pipeline.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/core/mmc_ops.h
drivers/mmc/core/sd.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mtd/nand/raw/Kconfig
drivers/net/bareudp.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs_slave.c
drivers/net/can/dev/dev.c
drivers/net/can/rcar/rcar_canfd.c
drivers/net/can/slcan.c
drivers/net/can/spi/hi311x.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/spi/mcp251xfd/Makefile
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c [new file with mode: 0644]
drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c [new file with mode: 0644]
drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h [new file with mode: 0644]
drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
drivers/net/can/spi/mcp251xfd/mcp251xfd.h
drivers/net/can/usb/etas_es58x/es58x_fd.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/ucan.c
drivers/net/can/vcan.c
drivers/net/can/vxcan.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/b53/b53_priv.h
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/microchip/ksz8.h
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz8795_reg.h
drivers/net/dsa/microchip/ksz8795_spi.c
drivers/net/dsa/microchip/ksz9477.c
drivers/net/dsa/microchip/ksz9477_reg.h
drivers/net/dsa/microchip/ksz9477_spi.c
drivers/net/dsa/microchip/ksz_common.h
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/chip.h
drivers/net/dsa/mv88e6xxx/devlink.c
drivers/net/dsa/mv88e6xxx/global1.h
drivers/net/dsa/mv88e6xxx/global1_vtu.c
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/qca8k.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/ethernet/8390/mcf8390.c
drivers/net/ethernet/arc/emac_mdio.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
drivers/net/ethernet/freescale/dpaa2/dpmac.c
drivers/net/ethernet/freescale/dpaa2/dpmac.h
drivers/net/ethernet/freescale/enetc/enetc_mdio.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/fungible/Kconfig
drivers/net/ethernet/fungible/funeth/funeth_ktls.h
drivers/net/ethernet/fungible/funeth/funeth_main.c
drivers/net/ethernet/fungible/funeth/funeth_tx.c
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/i825xx/sun3_82586.h
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/Makefile
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_arfs.h
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_common.h
drivers/net/ethernet/intel/ice/ice_dcb.h
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.h
drivers/net/ethernet/intel/ice/ice_flex_type.h
drivers/net/ethernet/intel/ice/ice_flow.c
drivers/net/ethernet/intel/ice/ice_flow.h
drivers/net/ethernet/intel/ice/ice_gnss.c
drivers/net/ethernet/intel/ice/ice_idc_int.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_osdep.h
drivers/net/ethernet/intel/ice/ice_protocol_type.h
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_repr.c
drivers/net/ethernet/intel/ice/ice_repr.h
drivers/net/ethernet/intel/ice/ice_sriov.c
drivers/net/ethernet/intel/ice/ice_sriov.h
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_switch.h
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/intel/ice/ice_tc_lib.h
drivers/net/ethernet/intel/ice/ice_trace.h
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ice/ice_vf_lib.c [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_vf_lib.h [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_vf_lib_private.h [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_vf_mbx.c [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_vf_mbx.h [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
drivers/net/ethernet/intel/ice/ice_virtchnl.c [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_virtchnl.h [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c [deleted file]
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h [deleted file]
drivers/net/ethernet/intel/ice/ice_xsk.h
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.h
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/lantiq_xrx200.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/prestera/prestera_acl.c
drivers/net/ethernet/marvell/prestera/prestera_main.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.h
drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlx5/core/pd.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/rl.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/minimal.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/microchip/lan743x_ethtool.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/microchip/lan743x_main.h
drivers/net/ethernet/microchip/lan743x_ptp.c
drivers/net/ethernet/microchip/lan743x_ptp.h
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
drivers/net/ethernet/microchip/sparx5/Makefile
drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
drivers/net/ethernet/microchip/sparx5/sparx5_main.h
drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot.h
drivers/net/ethernet/mscc/ocelot_flower.c
drivers/net/ethernet/mscc/ocelot_net.c
drivers/net/ethernet/mscc/ocelot_vcap.c
drivers/net/ethernet/netronome/nfp/Makefile
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/nfd3/dp.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfd3/rings.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfd3/xsk.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfdk/dp.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfdk/rings.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfp_app.c
drivers/net/ethernet/netronome/nfp/nfp_app.h
drivers/net/ethernet/netronome/nfp/nfp_devlink.c
drivers/net/ethernet/netronome/nfp/nfp_main.c
drivers/net/ethernet/netronome/nfp/nfp_main.h
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
drivers/net/ethernet/netronome/nfp/nfp_net_dp.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfp_net_dp.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_net_main.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h
drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
drivers/net/ethernet/netronome/nfp/nfp_port.c
drivers/net/ethernet/netronome/nfp/nfp_port.h
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h [new file with mode: 0644]
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/packetengines/yellowfin.c
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/ti/am65-cpsw-ethtool.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/ti/am65-cpsw-nuss.h
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/geneve.c
drivers/net/gtp.c
drivers/net/hamradio/baycom_epp.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipa/ipa_power.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/macvlan.c
drivers/net/mctp/mctp-serial.c
drivers/net/mdio/mdio-mscc-miim.c
drivers/net/mdio/mdio-mux.c
drivers/net/net_failover.c
drivers/net/netdevsim/Makefile
drivers/net/netdevsim/dev.c
drivers/net/netdevsim/hwstats.c [new file with mode: 0644]
drivers/net/netdevsim/netdevsim.h
drivers/net/phy/Kconfig
drivers/net/phy/dp83640.c
drivers/net/phy/dp83822.c
drivers/net/phy/marvell.c
drivers/net/phy/meson-gxl.c
drivers/net/phy/micrel.c
drivers/net/phy/microchip_t1.c
drivers/net/phy/mscc/mscc_main.c
drivers/net/phy/mscc/mscc_ptp.c
drivers/net/phy/nxp-c45-tja11xx.c
drivers/net/phy/phy_device.c
drivers/net/phy/sfp-bus.c
drivers/net/plip/plip.c
drivers/net/slip/slip.c
drivers/net/tap.c
drivers/net/tun.c
drivers/net/usb/asix.h
drivers/net/usb/asix_devices.c
drivers/net/usb/smsc95xx.c
drivers/net/vrf.c
drivers/net/vxlan/vxlan_core.c
drivers/net/wireless/ath/ar5523/ar5523.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/snoc.c
drivers/net/wireless/ath/ath10k/swap.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath11k/ahb.c
drivers/net/wireless/ath/ath11k/ce.h
drivers/net/wireless/ath/ath11k/core.c
drivers/net/wireless/ath/ath11k/core.h
drivers/net/wireless/ath/ath11k/dbring.c
drivers/net/wireless/ath/ath11k/debugfs.c
drivers/net/wireless/ath/ath11k/debugfs.h
drivers/net/wireless/ath/ath11k/dp.h
drivers/net/wireless/ath/ath11k/dp_rx.c
drivers/net/wireless/ath/ath11k/dp_tx.c
drivers/net/wireless/ath/ath11k/hal_desc.h
drivers/net/wireless/ath/ath11k/hal_rx.c
drivers/net/wireless/ath/ath11k/hal_rx.h
drivers/net/wireless/ath/ath11k/hw.c
drivers/net/wireless/ath/ath11k/hw.h
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/mhi.c
drivers/net/wireless/ath/ath11k/pci.c
drivers/net/wireless/ath/ath11k/qmi.c
drivers/net/wireless/ath/ath11k/qmi.h
drivers/net/wireless/ath/ath11k/rx_desc.h
drivers/net/wireless/ath/ath11k/spectral.c
drivers/net/wireless/ath/ath11k/wmi.c
drivers/net/wireless/ath/ath11k/wmi.h
drivers/net/wireless/ath/ath6kl/txrx.c
drivers/net/wireless/ath/ath6kl/usb.c
drivers/net/wireless/ath/ath6kl/wmi.c
drivers/net/wireless/ath/ath6kl/wmi.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/mci.c
drivers/net/wireless/ath/ath9k/rng.c
drivers/net/wireless/ath/carl9170/carl9170.h
drivers/net/wireless/ath/carl9170/fwdesc.h
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/wlan.h
drivers/net/wireless/ath/regd.c
drivers/net/wireless/ath/spectral_common.h
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/ath/wcn36xx/txrx.c
drivers/net/wireless/ath/wcn36xx/wcn36xx.h
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h
drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
drivers/net/wireless/cisco/airo.c
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/dvm/main.c
drivers/net/wireless/intel/iwlwifi/dvm/rx.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.h
drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
drivers/net/wireless/intel/iwlwifi/fw/api/config.h
drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
drivers/net/wireless/intel/iwlwifi/fw/api/power.h
drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/fw/img.c
drivers/net/wireless/intel/iwlwifi/fw/img.h
drivers/net/wireless/intel/iwlwifi/fw/init.c
drivers/net/wireless/intel/iwlwifi/fw/paging.c
drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
drivers/net/wireless/intel/iwlwifi/fw/runtime.h
drivers/net/wireless/intel/iwlwifi/fw/smem.c
drivers/net/wireless/intel/iwlwifi/fw/uefi.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.h
drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/iwl-io.c
drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mei/main.c
drivers/net/wireless/intel/iwlwifi/mei/net.c
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/quota.c
drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intel/iwlwifi/queue/tx.c
drivers/net/wireless/intel/iwlwifi/queue/tx.h
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/libertas/rx.c
drivers/net/wireless/marvell/mwifiex/uap_cmd.c
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
drivers/net/wireless/marvell/mwifiex/util.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
drivers/net/wireless/mediatek/mt76/mt7615/main.c
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
drivers/net/wireless/mediatek/mt76/mt7615/usb.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
drivers/net/wireless/mediatek/mt76/mt76_connac.h
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
drivers/net/wireless/mediatek/mt76/mt7915/Makefile
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
drivers/net/wireless/mediatek/mt76/mt7915/init.c
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
drivers/net/wireless/mediatek/mt76/mt7915/main.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
drivers/net/wireless/mediatek/mt76/mt7915/soc.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
drivers/net/wireless/mediatek/mt76/mt7921/Makefile
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7921/dma.c
drivers/net/wireless/mediatek/mt76/mt7921/init.c
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
drivers/net/wireless/mediatek/mt76/mt7921/mac.h
drivers/net/wireless/mediatek/mt76/mt7921/main.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
drivers/net/wireless/mediatek/mt76/mt7921/pci.c
drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
drivers/net/wireless/mediatek/mt76/mt7921/regs.h
drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/usb.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
drivers/net/wireless/mediatek/mt76/testmode.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
drivers/net/wireless/realtek/rtw88/coex.c
drivers/net/wireless/realtek/rtw88/coex.h
drivers/net/wireless/realtek/rtw88/debug.c
drivers/net/wireless/realtek/rtw88/debug.h
drivers/net/wireless/realtek/rtw88/fw.c
drivers/net/wireless/realtek/rtw88/fw.h
drivers/net/wireless/realtek/rtw88/mac80211.c
drivers/net/wireless/realtek/rtw88/main.c
drivers/net/wireless/realtek/rtw88/main.h
drivers/net/wireless/realtek/rtw88/rtw8723d.c
drivers/net/wireless/realtek/rtw88/rtw8821c.c
drivers/net/wireless/realtek/rtw88/rtw8822b.c
drivers/net/wireless/realtek/rtw88/rtw8822c.c
drivers/net/wireless/realtek/rtw88/sar.c
drivers/net/wireless/realtek/rtw88/tx.c
drivers/net/wireless/realtek/rtw89/coex.c
drivers/net/wireless/realtek/rtw89/core.c
drivers/net/wireless/realtek/rtw89/core.h
drivers/net/wireless/realtek/rtw89/debug.h
drivers/net/wireless/realtek/rtw89/efuse.c
drivers/net/wireless/realtek/rtw89/fw.c
drivers/net/wireless/realtek/rtw89/fw.h
drivers/net/wireless/realtek/rtw89/mac.c
drivers/net/wireless/realtek/rtw89/mac.h
drivers/net/wireless/realtek/rtw89/mac80211.c
drivers/net/wireless/realtek/rtw89/pci.c
drivers/net/wireless/realtek/rtw89/pci.h
drivers/net/wireless/realtek/rtw89/phy.c
drivers/net/wireless/realtek/rtw89/phy.h
drivers/net/wireless/realtek/rtw89/reg.h
drivers/net/wireless/realtek/rtw89/rtw8852a.c
drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
drivers/net/wireless/realtek/rtw89/rtw8852ae.c
drivers/net/wireless/realtek/rtw89/rtw8852c.c [new file with mode: 0644]
drivers/net/wireless/realtek/rtw89/rtw8852c.h [new file with mode: 0644]
drivers/net/wireless/realtek/rtw89/rtw8852ce.c [new file with mode: 0644]
drivers/net/wireless/st/cw1200/queue.c
drivers/net/wireless/zydas/zd1201.c
drivers/net/xen-netfront.c
drivers/nfc/port100.c
drivers/nfc/st21nfca/i2c.c
drivers/phy/freescale/Kconfig
drivers/phy/freescale/Makefile
drivers/phy/freescale/phy-fsl-lynx-28g.c [new file with mode: 0644]
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/ptp/ptp_idt82p33.c
drivers/ptp/ptp_idt82p33.h
drivers/ptp/ptp_ocp.c
drivers/s390/net/ctcm_fsms.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/netiucv.c
drivers/scsi/xen-scsifront.c
drivers/soc/mediatek/mt8192-mmsys.h
drivers/soc/samsung/exynos-chipid.c
drivers/spi/spi.c
drivers/staging/gdm724x/gdm_lte.c
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
drivers/staging/rtl8723bs/core/rtw_recv.c
drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
drivers/staging/rtl8723bs/core/rtw_xmit.c
drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
drivers/staging/rtl8723bs/include/rtw_mlme.h
drivers/staging/wlan-ng/p80211netdev.c
drivers/thermal/thermal_netlink.c
drivers/usb/host/xen-hcd.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa.c
drivers/vdpa/vdpa_user/iova_domain.c
drivers/vdpa/virtio_pci/vp_vdpa.c
drivers/vhost/iotlb.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/vhost/vsock.c
drivers/virtio/Kconfig
drivers/virtio/virtio.c
drivers/virtio/virtio_vdpa.c
drivers/xen/gntalloc.c
drivers/xen/grant-table.c
drivers/xen/pvcalls-front.c
drivers/xen/xenbus/xenbus_client.c
fs/afs/write.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/qgroup.c
fs/btrfs/relocation.c
fs/btrfs/root-tree.c
fs/btrfs/subpage.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-checker.c
fs/btrfs/tree-log.c
fs/cachefiles/interface.c
fs/cachefiles/xattr.c
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/fuse/ioctl.c
fs/ocfs2/super.c
fs/pipe.c
fs/proc/task_mmu.c
fs/userfaultfd.c
include/linux/arm-smccc.h
include/linux/bpf.h
include/linux/dma-mapping.h
include/linux/ieee80211.h
include/linux/if_arp.h
include/linux/if_bridge.h
include/linux/if_macvlan.h
include/linux/mfd/idt82p33_reg.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/port.h
include/linux/mm.h
include/linux/mm_inline.h
include/linux/mm_types.h
include/linux/netdevice.h
include/linux/pci_ids.h
include/linux/phy.h
include/linux/ptp_classify.h
include/linux/skbuff.h
include/linux/vdpa.h
include/linux/virtio.h
include/linux/virtio_config.h
include/linux/watch_queue.h
include/net/af_vsock.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bonding.h
include/net/cfg80211.h
include/net/checksum.h
include/net/devlink.h
include/net/dsa.h
include/net/esp.h
include/net/flow.h
include/net/flow_offload.h
include/net/gtp.h
include/net/mac80211.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_helper.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nft_fib.h
include/net/netfilter/nft_meta.h
include/net/netns/ipv4.h
include/net/netns/xfrm.h
include/net/switchdev.h
include/net/tc_act/tc_vlan.h
include/net/tcp.h
include/net/tls.h
include/net/xfrm.h
include/soc/mscc/ocelot.h
include/soc/mscc/ocelot_vcap.h
include/trace/events/cachefiles.h
include/trace/events/mptcp.h
include/trace/events/skb.h
include/uapi/linux/can/isotp.h
include/uapi/linux/gtp.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/if_tunnel.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/magic.h
include/uapi/linux/mptcp.h
include/uapi/linux/nl80211.h
include/uapi/linux/openvswitch.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/rfkill.h
include/uapi/linux/rtnetlink.h
include/xen/grant_table.h
kernel/configs/debug.config
kernel/dma/swiotlb.c
kernel/fork.c
kernel/sys.c
kernel/sysctl.c
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_osnoise.c
kernel/watch_queue.c
mm/gup.c
mm/madvise.c
mm/memfd.c
mm/mempolicy.c
mm/mlock.c
mm/mmap.c
mm/mprotect.c
mm/swap_state.c
mm/util.c
net/8021q/vlanproc.c
net/9p/trans_xen.c
net/ax25/af_ax25.c
net/batman-adv/bridge_loop_avoidance.c
net/bluetooth/6lowpan.c
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sync.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bluetooth/msft.c
net/bridge/Makefile
net/bridge/br.c
net/bridge/br_input.c
net/bridge/br_mst.c [new file with mode: 0644]
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp.c
net/bridge/br_switchdev.c
net/bridge/br_vlan.c
net/bridge/br_vlan_options.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/bridge/netfilter/nft_meta_bridge.c
net/bridge/netfilter/nft_reject_bridge.c
net/can/af_can.c
net/can/isotp.c
net/core/dev.c
net/core/devlink.c
net/core/flow_dissector.c
net/core/gro.c
net/core/gro_cells.c
net/core/ptp_classifier.c
net/core/rtnetlink.c
net/core/sock.c
net/core/xdp.c
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/dsa_priv.h
net/dsa/port.c
net/dsa/slave.c
net/dsa/switch.c
net/dsa/tag_dsa.c
net/dsa/tag_rtl8_4.c
net/hsr/hsr_device.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/netfilter/nf_nat_h323.c
net/ipv4/netfilter/nft_dup_ipv4.c
net/ipv4/netfilter/nft_fib_ipv4.c
net/ipv4/netfilter/nft_reject_ipv4.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/xfrm4_policy.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/nft_dup_ipv6.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/netfilter/nft_reject_ipv6.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_policy.c
net/key/af_key.c
net/l3mdev/l3mdev.c
net/mac80211/Makefile
net/mac80211/agg-rx.c
net/mac80211/airtime.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/eht.c [new file with mode: 0644]
net/mac80211/ieee80211_i.h
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_ip.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_bitwise.c
net/netfilter/nft_byteorder.c
net/netfilter/nft_cmp.c
net/netfilter/nft_compat.c
net/netfilter/nft_connlimit.c
net/netfilter/nft_counter.c
net/netfilter/nft_ct.c
net/netfilter/nft_dup_netdev.c
net/netfilter/nft_dynset.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_fib.c
net/netfilter/nft_fib_inet.c
net/netfilter/nft_fib_netdev.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_fwd_netdev.c
net/netfilter/nft_hash.c
net/netfilter/nft_immediate.c
net/netfilter/nft_last.c
net/netfilter/nft_limit.c
net/netfilter/nft_log.c
net/netfilter/nft_lookup.c
net/netfilter/nft_masq.c
net/netfilter/nft_meta.c
net/netfilter/nft_nat.c
net/netfilter/nft_numgen.c
net/netfilter/nft_objref.c
net/netfilter/nft_osf.c
net/netfilter/nft_payload.c
net/netfilter/nft_queue.c
net/netfilter/nft_quota.c
net/netfilter/nft_range.c
net/netfilter/nft_redir.c
net/netfilter/nft_reject_inet.c
net/netfilter/nft_reject_netdev.c
net/netfilter/nft_rt.c
net/netfilter/nft_socket.c
net/netfilter/nft_synproxy.c
net/netfilter/nft_tproxy.c
net/netfilter/nft_tunnel.c
net/netfilter/nft_xfrm.c
net/netlabel/netlabel_kapi.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/phonet/af_phonet.c
net/rfkill/core.c
net/sched/act_ct.c
net/sched/act_vlan.c
net/sched/cls_flower.c
net/sctp/diag.c
net/smc/Makefile
net/smc/af_smc.c
net/smc/smc_sysctl.c
net/smc/smc_sysctl.h
net/smc/smc_wr.c
net/tipc/bearer.c
net/tipc/link.c
net/tls/tls_device.c
net/tls/tls_main.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport.c
net/vmw_vsock/vmci_transport.c
net/wireless/chan.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/util.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
sound/soc/codecs/cs4265.c
sound/soc/soc-ops.c
sound/x86/intel_hdmi_audio.c
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/perf/bench/epoll-ctl.c
tools/perf/util/parse-events.c
tools/testing/selftests/drivers/net/mlxsw/hw_stats_l3.sh [new file with mode: 0755]
tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh [new file with mode: 0755]
tools/testing/selftests/net/cmsg_sender.c
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/forwarding/bridge_locked_port.sh
tools/testing/selftests/net/forwarding/lib.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/mptcp/pm_nl_ctl.c
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/psock_fanout.c
tools/testing/selftests/net/reuseport_bpf_numa.c
tools/testing/selftests/net/toeplitz.c
tools/testing/selftests/net/txtimestamp.c
tools/testing/selftests/netfilter/nft_nat.sh
tools/testing/selftests/tc-testing/tdc_config.py
tools/testing/selftests/vm/Makefile
tools/testing/selftests/vm/hugepage-mremap.c
tools/testing/selftests/vm/run_vmtests.sh
tools/testing/selftests/vm/userfaultfd.c
tools/testing/vsock/vsock_test.c
tools/virtio/linux/mm_types.h [new file with mode: 0644]
tools/virtio/virtio_test.c

index 10ee110..97ccdf1 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -187,6 +187,8 @@ Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com>
 Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com>
 Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.cz>
 Jiri Slaby <jirislaby@kernel.org> <xslaby@fi.muni.cz>
+Jisheng Zhang <jszhang@kernel.org> <jszhang@marvell.com>
+Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com>
 Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
 Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
 John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
@@ -216,6 +218,7 @@ Koushik <raghavendra.koushik@neterion.com>
 Krishna Manikandan <quic_mkrishn@quicinc.com> <mkrishn@codeaurora.org>
 Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
 Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
+Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
 Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org>
 Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
index 5bf7848..2204781 100644 (file)
@@ -37,8 +37,15 @@ Description: (RO) Set of available destinations (sinks) for a SMA
                 PPS2   signal is sent to the PPS2 selector
                 TS1    signal is sent to timestamper 1
                 TS2    signal is sent to timestamper 2
+                TS3    signal is sent to timestamper 3
+                TS4    signal is sent to timestamper 4
                 IRIG   signal is sent to the IRIG-B module
                 DCF    signal is sent to the DCF module
+                FREQ1  signal is sent to frequency counter 1
+                FREQ2  signal is sent to frequency counter 2
+                FREQ3  signal is sent to frequency counter 3
+                FREQ4  signal is sent to frequency counter 4
+                None   signal input is disabled
                 =====  ================================================
 
 What:          /sys/class/timecard/ocpN/available_sma_outputs
@@ -50,10 +57,16 @@ Description:        (RO) Set of available sources for a SMA output signal.
                 10Mhz  output is from the 10Mhz reference clock
                 PHC    output PPS is from the PHC clock
                 MAC    output PPS is from the Miniature Atomic Clock
-                GNSS   output PPS is from the GNSS module
+                GNSS1  output PPS is from the first GNSS module
                 GNSS2  output PPS is from the second GNSS module
                 IRIG   output is from the PHC, in IRIG-B format
                 DCF    output is from the PHC, in DCF format
+                GEN1   output is from frequency generator 1
+                GEN2   output is from frequency generator 2
+                GEN3   output is from frequency generator 3
+                GEN4   output is from frequency generator 4
+                GND    output is GND
+                VCC    output is VCC
                 =====  ================================================
 
 What:          /sys/class/timecard/ocpN/clock_source
@@ -75,6 +88,85 @@ Contact:     Jonathan Lemon <jonathan.lemon@gmail.com>
 Description:   (RO) Contains the current offset value used by the firmware
                for internal disciplining of the atomic clock.
 
+What:          /sys/class/timecard/ocpN/freqX
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RO) Optional directory containing the sysfs nodes for
+               frequency counter <X>.
+
+What:          /sys/class/timecard/ocpN/freqX/frequency
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RO) Contains the measured frequency over the specified
+               measurement period.
+
+What:          /sys/class/timecard/ocpN/freqX/seconds
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RW) Specifies the number of seconds from 0-255 that the
+               frequency should be measured over.  Write 0 to disable.
+
+What:          /sys/class/timecard/ocpN/genX
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RO) Optional directory containing the sysfs nodes for
+               frequency generator <X>.
+
+What:          /sys/class/timecard/ocpN/genX/duty
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RO) Specifies the signal duty cycle as a percentage from 1-99.
+
+What:          /sys/class/timecard/ocpN/genX/period
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RO) Specifies the signal period in nanoseconds.
+
+What:          /sys/class/timecard/ocpN/genX/phase
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RO) Specifies the signal phase offset in nanoseconds.
+
+What:          /sys/class/timecard/ocpN/genX/polarity
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RO) Specifies the signal polarity, either 1 or 0.
+
+What:          /sys/class/timecard/ocpN/genX/running
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RO) Either 0 or 1, showing if the signal generator is running.
+
+What:          /sys/class/timecard/ocpN/genX/start
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RO) Shows the time in <sec>.<nsec> that the signal generator
+               started running.
+
+What:          /sys/class/timecard/ocpN/genX/signal
+Date:          March 2022
+Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:   (RW) Used to start the signal generator, and summarize
+               the current status.
+
+               The signal generator may be started by writing the signal
+               period, followed by the optional signal values.  If the
+               optional values are not provided, they default to the current
+               settings, which may be obtained from the other sysfs nodes.
+
+                   period [duty [phase [polarity]]]
+
+               echo 500000000 > signal       # 1/2 second period
+               echo 1000000 40 100 > signal
+               echo 0 > signal               # turn off generator
+
+               Period and phase are specified in nanoseconds.  Duty cycle is
+               a percentage from 1-99.  Polarity is 1 or 0.
+
+               Reading this node will return:
+
+                   period duty phase polarity start_time
+
 What:          /sys/class/timecard/ocpN/gnss_sync
 Date:          September 2021
 Contact:       Jonathan Lemon <jonathan.lemon@gmail.com>
index a2b22d5..9e95568 100644 (file)
@@ -60,8 +60,8 @@ privileged data touched during the speculative execution.
 Spectre variant 1 attacks take advantage of speculative execution of
 conditional branches, while Spectre variant 2 attacks use speculative
 execution of indirect branches to leak privileged memory.
-See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[7] <spec_ref7>`
-:ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
+See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[6] <spec_ref6>`
+:ref:`[7] <spec_ref7>` :ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
 
 Spectre variant 1 (Bounds Check Bypass)
 ---------------------------------------
@@ -131,6 +131,19 @@ steer its indirect branch speculations to gadget code, and measure the
 speculative execution's side effects left in level 1 cache to infer the
 victim's data.
 
+Yet another variant 2 attack vector is for the attacker to poison the
+Branch History Buffer (BHB) to speculatively steer an indirect branch
+to a specific Branch Target Buffer (BTB) entry, even if the entry isn't
+associated with the source address of the indirect branch. Specifically,
+the BHB might be shared across privilege levels even in the presence of
+Enhanced IBRS.
+
+Currently the only known real-world BHB attack vector is via
+unprivileged eBPF. Therefore, it's highly recommended to not enable
+unprivileged eBPF, especially when eIBRS is used (without retpolines).
+For a full mitigation against BHB attacks, it's recommended to use
+retpolines (or eIBRS combined with retpolines).
+
 Attack scenarios
 ----------------
 
@@ -364,13 +377,15 @@ The possible values in this file are:
 
   - Kernel status:
 
-  ====================================  =================================
-  'Not affected'                        The processor is not vulnerable
-  'Vulnerable'                          Vulnerable, no mitigation
-  'Mitigation: Full generic retpoline'  Software-focused mitigation
-  'Mitigation: Full AMD retpoline'      AMD-specific software mitigation
-  'Mitigation: Enhanced IBRS'           Hardware-focused mitigation
-  ====================================  =================================
+  ========================================  =================================
+  'Not affected'                            The processor is not vulnerable
+  'Mitigation: None'                        Vulnerable, no mitigation
+  'Mitigation: Retpolines'                  Use Retpoline thunks
+  'Mitigation: LFENCE'                      Use LFENCE instructions
+  'Mitigation: Enhanced IBRS'               Hardware-focused mitigation
+  'Mitigation: Enhanced IBRS + Retpolines'  Hardware-focused + Retpolines
+  'Mitigation: Enhanced IBRS + LFENCE'      Hardware-focused + LFENCE
+  ========================================  =================================
 
   - Firmware status: Show if Indirect Branch Restricted Speculation (IBRS) is
     used to protect against Spectre variant 2 attacks when calling firmware (x86 only).
@@ -583,12 +598,13 @@ kernel command line.
 
                Specific mitigations can also be selected manually:
 
-               retpoline
-                                       replace indirect branches
-               retpoline,generic
-                                       google's original retpoline
-               retpoline,amd
-                                       AMD-specific minimal thunk
+                retpoline               auto pick between generic,lfence
+                retpoline,generic       Retpolines
+                retpoline,lfence        LFENCE; indirect branch
+                retpoline,amd           alias for retpoline,lfence
+                eibrs                   enhanced IBRS
+                eibrs,retpoline         enhanced IBRS + Retpolines
+                eibrs,lfence            enhanced IBRS + LFENCE
 
                Not specifying this option is equivalent to
                spectre_v2=auto.
@@ -599,7 +615,7 @@ kernel command line.
                spectre_v2=off. Spectre variant 1 mitigations
                cannot be disabled.
 
-For spectre_v2_user see :doc:`/admin-guide/kernel-parameters`.
+For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
 
 Mitigation selection guide
 --------------------------
@@ -681,7 +697,7 @@ AMD white papers:
 
 .. _spec_ref6:
 
-[6] `Software techniques for managing speculation on AMD processors <https://developer.amd.com/wp-content/resources/90343-B_SoftwareTechniquesforManagingSpeculation_WP_7-18Update_FNL.pdf>`_.
+[6] `Software techniques for managing speculation on AMD processors <https://developer.amd.com/wp-content/resources/Managing-Speculation-on-AMD-Processors.pdf>`_.
 
 ARM white papers:
 
index f5a27f0..7123524 100644 (file)
                        Specific mitigations can also be selected manually:
 
                        retpoline         - replace indirect branches
-                       retpoline,generic - google's original retpoline
-                       retpoline,amd     - AMD-specific minimal thunk
+                       retpoline,generic - Retpolines
+                       retpoline,lfence  - LFENCE; indirect branch
+                       retpoline,amd     - alias for retpoline,lfence
+                       eibrs             - enhanced IBRS
+                       eibrs,retpoline   - enhanced IBRS + Retpolines
+                       eibrs,lfence      - enhanced IBRS + LFENCE
 
                        Not specifying this option is equivalent to
                        spectre_v2=auto.
index bfc2870..6e2e416 100644 (file)
@@ -23,7 +23,7 @@ There are four components to pagemap:
     * Bit  56    page exclusively mapped (since 4.2)
     * Bit  57    pte is uffd-wp write-protected (since 5.13) (see
       :ref:`Documentation/admin-guide/mm/userfaultfd.rst <userfaultfd>`)
-    * Bits 57-60 zero
+    * Bits 58-60 zero
     * Bit  61    page is file-page or shared-anon (since 3.5)
     * Bit  62    page swapped
     * Bit  63    page present
index 17706dc..1887d92 100644 (file)
@@ -130,11 +130,3 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged
 subsystem that the buffer is fully accessible at the elevated privilege
 level (and ideally inaccessible or at least read-only at the
 lesser-privileged levels).
-
-DMA_ATTR_OVERWRITE
-------------------
-
-This is a hint to the DMA-mapping subsystem that the device is expected to
-overwrite the entire mapped size, thus the caller does not require any of the
-previous buffer contents to be preserved. This allows bounce-buffering
-implementations to optimise DMA_FROM_DEVICE transfers.
index 370aab2..04ff0b5 100644 (file)
@@ -48,7 +48,6 @@ description: |
         sdx65
         sm7225
         sm8150
-        sdx65
         sm8250
         sm8350
         sm8450
@@ -228,11 +227,6 @@ properties:
               - qcom,sdx65-mtp
           - const: qcom,sdx65
 
-      - items:
-          - enum:
-              - qcom,sdx65-mtp
-          - const: qcom,sdx65
-
       - items:
           - enum:
               - qcom,ipq6018-cp01
index 1d3e88d..25b5ef3 100644 (file)
@@ -91,22 +91,7 @@ properties:
         $ref: /schemas/graph.yaml#/$defs/port-base
         unevaluatedProperties: false
         description:
-          MIPI DSI/DPI input.
-
-        properties:
-          endpoint:
-            $ref: /schemas/media/video-interfaces.yaml#
-            type: object
-            additionalProperties: false
-
-            properties:
-              remote-endpoint: true
-
-              bus-type:
-                enum: [1, 5]
-                default: 1
-
-              data-lanes: true
+          Video port for MIPI DSI input.
 
       port@1:
         $ref: /schemas/graph.yaml#/properties/port
@@ -155,8 +140,6 @@ examples:
                     reg = <0>;
                     anx7625_in: endpoint {
                         remote-endpoint = <&mipi_dsi>;
-                        bus-type = <5>;
-                        data-lanes = <0 1 2 3>;
                     };
                 };
 
index be4a2df..b85819f 100644 (file)
@@ -39,7 +39,7 @@ patternProperties:
   '^phy@[a-f0-9]+$':
     $ref: ../phy/bcm-ns-usb2-phy.yaml
 
-  '^pin-controller@[a-f0-9]+$':
+  '^pinctrl@[a-f0-9]+$':
     $ref: ../pinctrl/brcm,ns-pinmux.yaml
 
   '^syscon@[a-f0-9]+$':
@@ -94,7 +94,7 @@ examples:
             reg = <0x180 0x4>;
         };
 
-        pin-controller@1c0 {
+        pinctrl@1c0 {
             compatible = "brcm,bcm4708-pinmux";
             reg = <0x1c0 0x24>;
             reg-names = "cru_gpio_control";
index c00ad3e..ad285cb 100644 (file)
@@ -126,7 +126,7 @@ properties:
       clock-frequency:
         const: 12288000
 
-  lochnagar-pinctrl:
+  pinctrl:
     type: object
     $ref: /schemas/pinctrl/cirrus,lochnagar.yaml#
 
@@ -255,7 +255,7 @@ required:
   - reg
   - reset-gpios
   - lochnagar-clk
-  - lochnagar-pinctrl
+  - pinctrl
 
 additionalProperties: false
 
@@ -293,7 +293,7 @@ examples:
                 clock-frequency = <32768>;
             };
 
-            lochnagar-pinctrl {
+            pinctrl {
                 compatible = "cirrus,lochnagar-pinctrl";
 
                 gpio-controller;
index 546c6e6..91a3554 100644 (file)
@@ -35,6 +35,8 @@ properties:
               - renesas,r9a07g044-canfd    # RZ/G2{L,LC}
           - const: renesas,rzg2l-canfd     # RZ/G2L family
 
+      - const: renesas,r8a779a0-canfd      # R-Car V3U
+
   reg:
     maxItems: 1
 
diff --git a/Documentation/devicetree/bindings/net/can/xilinx,can.yaml b/Documentation/devicetree/bindings/net/can/xilinx,can.yaml
new file mode 100644 (file)
index 0000000..65af818
--- /dev/null
@@ -0,0 +1,161 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/can/xilinx,can.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title:
+  Xilinx Axi CAN/Zynq CANPS controller
+
+maintainers:
+  - Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+
+properties:
+  compatible:
+    enum:
+      - xlnx,zynq-can-1.0
+      - xlnx,axi-can-1.00.a
+      - xlnx,canfd-1.0
+      - xlnx,canfd-2.0
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+    maxItems: 2
+
+  clock-names:
+    maxItems: 2
+
+  power-domains:
+    maxItems: 1
+
+  tx-fifo-depth:
+    $ref: "/schemas/types.yaml#/definitions/uint32"
+    description: CAN Tx fifo depth (Zynq, Axi CAN).
+
+  rx-fifo-depth:
+    $ref: "/schemas/types.yaml#/definitions/uint32"
+    description: CAN Rx fifo depth (Zynq, Axi CAN, CAN FD in sequential Rx mode)
+
+  tx-mailbox-count:
+    $ref: "/schemas/types.yaml#/definitions/uint32"
+    description: CAN Tx mailbox buffer count (CAN FD)
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+unevaluatedProperties: false
+
+allOf:
+  - $ref: can-controller.yaml#
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - xlnx,zynq-can-1.0
+
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: can_clk
+            - const: pclk
+      required:
+        - tx-fifo-depth
+        - rx-fifo-depth
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - xlnx,axi-can-1.00.a
+
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: can_clk
+            - const: s_axi_aclk
+      required:
+        - tx-fifo-depth
+        - rx-fifo-depth
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - xlnx,canfd-1.0
+              - xlnx,canfd-2.0
+
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: can_clk
+            - const: s_axi_aclk
+      required:
+        - tx-mailbox-count
+        - rx-fifo-depth
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    can@e0008000 {
+        compatible = "xlnx,zynq-can-1.0";
+        reg = <0xe0008000 0x1000>;
+        clocks = <&clkc 19>, <&clkc 36>;
+        clock-names = "can_clk", "pclk";
+        interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-parent = <&intc>;
+        tx-fifo-depth = <0x40>;
+        rx-fifo-depth = <0x40>;
+    };
+
+  - |
+    can@40000000 {
+        compatible = "xlnx,axi-can-1.00.a";
+        reg = <0x40000000 0x10000>;
+        clocks = <&clkc 0>, <&clkc 1>;
+        clock-names = "can_clk", "s_axi_aclk";
+        interrupt-parent = <&intc>;
+        interrupts = <GIC_SPI 59 IRQ_TYPE_EDGE_RISING>;
+        tx-fifo-depth = <0x40>;
+        rx-fifo-depth = <0x40>;
+    };
+
+  - |
+    can@40000000 {
+        compatible = "xlnx,canfd-1.0";
+        reg = <0x40000000 0x2000>;
+        clocks = <&clkc 0>, <&clkc 1>;
+        clock-names = "can_clk", "s_axi_aclk";
+        interrupt-parent = <&intc>;
+        interrupts = <GIC_SPI 59 IRQ_TYPE_EDGE_RISING>;
+        tx-mailbox-count = <0x20>;
+        rx-fifo-depth = <0x20>;
+    };
+
+  - |
+    can@ff060000 {
+        compatible = "xlnx,canfd-2.0";
+        reg = <0xff060000 0x6000>;
+        clocks = <&clkc 0>, <&clkc 1>;
+        clock-names = "can_clk", "s_axi_aclk";
+        interrupt-parent = <&intc>;
+        interrupts = <GIC_SPI 59 IRQ_TYPE_EDGE_RISING>;
+        tx-mailbox-count = <0x20>;
+        rx-fifo-depth = <0x40>;
+    };
diff --git a/Documentation/devicetree/bindings/net/can/xilinx_can.txt b/Documentation/devicetree/bindings/net/can/xilinx_can.txt
deleted file mode 100644 (file)
index 100cc40..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-Xilinx Axi CAN/Zynq CANPS controller Device Tree Bindings
----------------------------------------------------------
-
-Required properties:
-- compatible           : Should be:
-                         - "xlnx,zynq-can-1.0" for Zynq CAN controllers
-                         - "xlnx,axi-can-1.00.a" for Axi CAN controllers
-                         - "xlnx,canfd-1.0" for CAN FD controllers
-                         - "xlnx,canfd-2.0" for CAN FD 2.0 controllers
-- reg                  : Physical base address and size of the controller
-                         registers map.
-- interrupts           : Property with a value describing the interrupt
-                         number.
-- clock-names          : List of input clock names
-                         - "can_clk", "pclk" (For CANPS),
-                         - "can_clk", "s_axi_aclk" (For AXI CAN and CAN FD).
-                         (See clock bindings for details).
-- clocks               : Clock phandles (see clock bindings for details).
-- tx-fifo-depth                : Can Tx fifo depth (Zynq, Axi CAN).
-- rx-fifo-depth                : Can Rx fifo depth (Zynq, Axi CAN, CAN FD in
-                          sequential Rx mode).
-- tx-mailbox-count     : Can Tx mailbox buffer count (CAN FD).
-- rx-mailbox-count     : Can Rx mailbox buffer count (CAN FD in mailbox Rx
-                         mode).
-
-
-Example:
-
-For Zynq CANPS Dts file:
-       zynq_can_0: can@e0008000 {
-                       compatible = "xlnx,zynq-can-1.0";
-                       clocks = <&clkc 19>, <&clkc 36>;
-                       clock-names = "can_clk", "pclk";
-                       reg = <0xe0008000 0x1000>;
-                       interrupts = <0 28 4>;
-                       interrupt-parent = <&intc>;
-                       tx-fifo-depth = <0x40>;
-                       rx-fifo-depth = <0x40>;
-               };
-For Axi CAN Dts file:
-       axi_can_0: axi-can@40000000 {
-                       compatible = "xlnx,axi-can-1.00.a";
-                       clocks = <&clkc 0>, <&clkc 1>;
-                       clock-names = "can_clk","s_axi_aclk" ;
-                       reg = <0x40000000 0x10000>;
-                       interrupt-parent = <&intc>;
-                       interrupts = <0 59 1>;
-                       tx-fifo-depth = <0x40>;
-                       rx-fifo-depth = <0x40>;
-               };
-For CAN FD Dts file:
-       canfd_0: canfd@40000000 {
-                       compatible = "xlnx,canfd-1.0";
-                       clocks = <&clkc 0>, <&clkc 1>;
-                       clock-names = "can_clk", "s_axi_aclk";
-                       reg = <0x40000000 0x2000>;
-                       interrupt-parent = <&intc>;
-                       interrupts = <0 59 1>;
-                       tx-mailbox-count = <0x20>;
-                       rx-fifo-depth = <0x20>;
-               };
index 691f886..2bf3157 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
        "marvell,armada-370-neta"
        "marvell,armada-xp-neta"
        "marvell,armada-3700-neta"
+       "marvell,armada-ac5-neta"
 - reg: address and length of the register set for the device.
 - interrupts: interrupt for the device
 - phy: See ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/net/mediatek-dwmac.txt b/Documentation/devicetree/bindings/net/mediatek-dwmac.txt
deleted file mode 100644 (file)
index afbcaeb..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-MediaTek DWMAC glue layer controller
-
-This file documents platform glue layer for stmmac.
-Please see stmmac.txt for the other unchanged properties.
-
-The device node has following properties.
-
-Required properties:
-- compatible:  Should be "mediatek,mt2712-gmac" for MT2712 SoC
-- reg:  Address and length of the register set for the device
-- interrupts:  Should contain the MAC interrupts
-- interrupt-names: Should contain a list of interrupt names corresponding to
-       the interrupts in the interrupts property, if available.
-       Should be "macirq" for the main MAC IRQ
-- clocks: Must contain a phandle for each entry in clock-names.
-- clock-names: The name of the clock listed in the clocks property. These are
-       "axi", "apb", "mac_main", "ptp_ref", "rmii_internal" for MT2712 SoC.
-- mac-address: See ethernet.txt in the same directory
-- phy-mode: See ethernet.txt in the same directory
-- mediatek,pericfg: A phandle to the syscon node that control ethernet
-       interface and timing delay.
-
-Optional properties:
-- mediatek,tx-delay-ps: TX clock delay macro value. Default is 0.
-       It should be defined for RGMII/MII interface.
-       It should be defined for RMII interface when the reference clock is from MT2712 SoC.
-- mediatek,rx-delay-ps: RX clock delay macro value. Default is 0.
-       It should be defined for RGMII/MII interface.
-       It should be defined for RMII interface.
-Both delay properties need to be a multiple of 170 for RGMII interface,
-or will round down. Range 0~31*170.
-Both delay properties need to be a multiple of 550 for MII/RMII interface,
-or will round down. Range 0~31*550.
-
-- mediatek,rmii-rxc: boolean property, if present indicates that the RMII
-       reference clock, which is from external PHYs, is connected to RXC pin
-       on MT2712 SoC.
-       Otherwise, is connected to TXC pin.
-- mediatek,rmii-clk-from-mac: boolean property, if present indicates that
-       MT2712 SoC provides the RMII reference clock, which outputs to TXC pin only.
-- mediatek,txc-inverse: boolean property, if present indicates that
-       1. tx clock will be inversed in MII/RGMII case,
-       2. tx clock inside MAC will be inversed relative to reference clock
-          which is from external PHYs in RMII case, and it rarely happen.
-       3. the reference clock, which outputs to TXC pin will be inversed in RMII case
-          when the reference clock is from MT2712 SoC.
-- mediatek,rxc-inverse: boolean property, if present indicates that
-       1. rx clock will be inversed in MII/RGMII case.
-       2. reference clock will be inversed when arrived at MAC in RMII case, when
-          the reference clock is from external PHYs.
-       3. the inside clock, which be sent to MAC, will be inversed in RMII case when
-          the reference clock is from MT2712 SoC.
-- assigned-clocks: mac_main and ptp_ref clocks
-- assigned-clock-parents: parent clocks of the assigned clocks
-
-Example:
-       eth: ethernet@1101c000 {
-               compatible = "mediatek,mt2712-gmac";
-               reg = <0 0x1101c000 0 0x1300>;
-               interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
-               interrupt-names = "macirq";
-               phy-mode ="rgmii-rxid";
-               mac-address = [00 55 7b b5 7d f7];
-               clock-names = "axi",
-                             "apb",
-                             "mac_main",
-                             "ptp_ref",
-                             "rmii_internal";
-               clocks = <&pericfg CLK_PERI_GMAC>,
-                        <&pericfg CLK_PERI_GMAC_PCLK>,
-                        <&topckgen CLK_TOP_ETHER_125M_SEL>,
-                        <&topckgen CLK_TOP_ETHER_50M_SEL>,
-                        <&topckgen CLK_TOP_ETHER_50M_RMII_SEL>;
-               assigned-clocks = <&topckgen CLK_TOP_ETHER_125M_SEL>,
-                                 <&topckgen CLK_TOP_ETHER_50M_SEL>,
-                                 <&topckgen CLK_TOP_ETHER_50M_RMII_SEL>;
-               assigned-clock-parents = <&topckgen CLK_TOP_ETHERPLL_125M>,
-                                        <&topckgen CLK_TOP_APLL1_D3>,
-                                        <&topckgen CLK_TOP_ETHERPLL_50M>;
-               power-domains = <&scpsys MT2712_POWER_DOMAIN_AUDIO>;
-               mediatek,pericfg = <&pericfg>;
-               mediatek,tx-delay-ps = <1530>;
-               mediatek,rx-delay-ps = <1530>;
-               mediatek,rmii-rxc;
-               mediatek,txc-inverse;
-               mediatek,rxc-inverse;
-               snps,txpbl = <1>;
-               snps,rxpbl = <1>;
-               snps,reset-gpio = <&pio 87 GPIO_ACTIVE_LOW>;
-               snps,reset-active-low;
-       };
diff --git a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
new file mode 100644 (file)
index 0000000..9019446
--- /dev/null
@@ -0,0 +1,175 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/mediatek-dwmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek DWMAC glue layer controller
+
+maintainers:
+  - Biao Huang <biao.huang@mediatek.com>
+
+description:
+  This file documents platform glue layer for stmmac.
+
+# We need a select here so we don't match all nodes with 'snps,dwmac'
+select:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - mediatek,mt2712-gmac
+          - mediatek,mt8195-gmac
+  required:
+    - compatible
+
+allOf:
+  - $ref: "snps,dwmac.yaml#"
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - mediatek,mt2712-gmac
+          - const: snps,dwmac-4.20a
+      - items:
+          - enum:
+              - mediatek,mt8195-gmac
+          - const: snps,dwmac-5.10a
+
+  clocks:
+    minItems: 5
+    items:
+      - description: AXI clock
+      - description: APB clock
+      - description: MAC Main clock
+      - description: PTP clock
+      - description: RMII reference clock provided by MAC
+      - description: MAC clock gate
+
+  clock-names:
+    minItems: 5
+    items:
+      - const: axi
+      - const: apb
+      - const: mac_main
+      - const: ptp_ref
+      - const: rmii_internal
+      - const: mac_cg
+
+  mediatek,pericfg:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description:
+      The phandle to the syscon node that control ethernet
+      interface and timing delay.
+
+  mediatek,tx-delay-ps:
+    description:
+      The internal TX clock delay (provided by this driver) in nanoseconds.
+      For MT2712 RGMII interface, Allowed value need to be a multiple of 170,
+      or will round down. Range 0~31*170.
+      For MT2712 RMII/MII interface, Allowed value need to be a multiple of 550,
+      or will round down. Range 0~31*550.
+      For MT8195 RGMII/RMII/MII interface, Allowed value need to be a multiple of 290,
+      or will round down. Range 0~31*290.
+
+  mediatek,rx-delay-ps:
+    description:
+      The internal RX clock delay (provided by this driver) in nanoseconds.
+      For MT2712 RGMII interface, Allowed value need to be a multiple of 170,
+      or will round down. Range 0~31*170.
+      For MT2712 RMII/MII interface, Allowed value need to be a multiple of 550,
+      or will round down. Range 0~31*550.
+      For MT8195 RGMII/RMII/MII interface, Allowed value need to be a multiple
+      of 290, or will round down. Range 0~31*290.
+
+  mediatek,rmii-rxc:
+    type: boolean
+    description:
+      If present, indicates that the RMII reference clock, which is from external
+      PHYs, is connected to RXC pin. Otherwise, is connected to TXC pin.
+
+  mediatek,rmii-clk-from-mac:
+    type: boolean
+    description:
+      If present, indicates that MAC provides the RMII reference clock, which
+      outputs to TXC pin only.
+
+  mediatek,txc-inverse:
+    type: boolean
+    description:
+      If present, indicates that
+      1. tx clock will be inversed in MII/RGMII case,
+      2. tx clock inside MAC will be inversed relative to reference clock
+         which is from external PHYs in RMII case, and it rarely happen.
+      3. the reference clock, which outputs to TXC pin will be inversed in RMII case
+         when the reference clock is from MAC.
+
+  mediatek,rxc-inverse:
+    type: boolean
+    description:
+      If present, indicates that
+      1. rx clock will be inversed in MII/RGMII case.
+      2. reference clock will be inversed when arrived at MAC in RMII case, when
+         the reference clock is from external PHYs.
+      3. the inside clock, which be sent to MAC, will be inversed in RMII case when
+         the reference clock is from MAC.
+
+  mediatek,mac-wol:
+    type: boolean
+    description:
+      If present, indicates that MAC supports WOL(Wake-On-LAN), and MAC WOL will be enabled.
+      Otherwise, PHY WOL is perferred.
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - interrupt-names
+  - clocks
+  - clock-names
+  - phy-mode
+  - mediatek,pericfg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/mt2712-clk.h>
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/power/mt2712-power.h>
+
+    eth: ethernet@1101c000 {
+        compatible = "mediatek,mt2712-gmac", "snps,dwmac-4.20a";
+        reg = <0x1101c000 0x1300>;
+        interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
+        interrupt-names = "macirq";
+        phy-mode ="rgmii-rxid";
+        mac-address = [00 55 7b b5 7d f7];
+        clock-names = "axi",
+                      "apb",
+                      "mac_main",
+                      "ptp_ref",
+                      "rmii_internal";
+        clocks = <&pericfg CLK_PERI_GMAC>,
+                 <&pericfg CLK_PERI_GMAC_PCLK>,
+                 <&topckgen CLK_TOP_ETHER_125M_SEL>,
+                 <&topckgen CLK_TOP_ETHER_50M_SEL>,
+                 <&topckgen CLK_TOP_ETHER_50M_RMII_SEL>;
+        assigned-clocks = <&topckgen CLK_TOP_ETHER_125M_SEL>,
+                          <&topckgen CLK_TOP_ETHER_50M_SEL>,
+                          <&topckgen CLK_TOP_ETHER_50M_RMII_SEL>;
+        assigned-clock-parents = <&topckgen CLK_TOP_ETHERPLL_125M>,
+                                 <&topckgen CLK_TOP_APLL1_D3>,
+                                 <&topckgen CLK_TOP_ETHERPLL_50M>;
+        power-domains = <&scpsys MT2712_POWER_DOMAIN_AUDIO>;
+        mediatek,pericfg = <&pericfg>;
+        mediatek,tx-delay-ps = <1530>;
+        snps,txpbl = <1>;
+        snps,rxpbl = <1>;
+        snps,reset-gpio = <&pio 87 GPIO_ACTIVE_LOW>;
+        snps,reset-delays-us = <0 10000 10000>;
+    };
index 7104679..70e0cb1 100644 (file)
@@ -2,7 +2,7 @@ Microsemi MII Management Controller (MIIM) / MDIO
 =================================================
 
 Properties:
-- compatible: must be "mscc,ocelot-miim"
+- compatible: must be "mscc,ocelot-miim" or "microchip,lan966x-miim"
 - reg: The base address of the MDIO bus controller register bank. Optionally, a
   second register bank can be defined if there is an associated reset register
   for internal PHYs
index 025f424..3216c99 100644 (file)
@@ -18,7 +18,7 @@ description: |
   wireless device. The node is expected to be specified as a child
   node of the PCI controller to which the wireless chip is connected.
   Alternatively, it can specify the wireless part of the MT7628/MT7688
-  or MT7622 SoC.
+  or MT7622/MT7986 SoC.
 
 allOf:
   - $ref: ieee80211.yaml#
@@ -29,9 +29,13 @@ properties:
       - mediatek,mt76
       - mediatek,mt7628-wmac
       - mediatek,mt7622-wmac
+      - mediatek,mt7986-wmac
 
   reg:
-    maxItems: 1
+    minItems: 1
+    maxItems: 3
+    description:
+      MT7986 should contain 3 regions consys, dcm, and sku, in this order.
 
   interrupts:
     maxItems: 1
@@ -39,6 +43,17 @@ properties:
   power-domains:
     maxItems: 1
 
+  memory-region:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+    description:
+      Specify the consys reset for mt7986.
+
+  reset-name:
+    const: consys
+
   mediatek,infracfg:
     $ref: /schemas/types.yaml#/definitions/phandle
     description:
@@ -174,7 +189,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
@@ -240,3 +255,15 @@ examples:
 
       power-domains = <&scpsys 3>;
     };
+
+  - |
+    wifi@18000000 {
+        compatible = "mediatek,mt7986-wmac";
+        resets = <&watchdog 23>;
+        reset-names = "consys";
+        reg = <0x18000000 0x1000000>,
+              <0x10003000 0x1000>,
+              <0x11d10000 0x1000>;
+        interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>;
+        memory-region = <&wmcpu_emi>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/fsl,lynx-28g.yaml b/Documentation/devicetree/bindings/phy/fsl,lynx-28g.yaml
new file mode 100644 (file)
index 0000000..4d91e2f
--- /dev/null
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/fsl,lynx-28g.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Lynx 28G SerDes PHY binding
+
+maintainers:
+  - Ioana Ciornei <ioana.ciornei@nxp.com>
+
+properties:
+  compatible:
+    enum:
+      - fsl,lynx-28g
+
+  reg:
+    maxItems: 1
+
+  "#phy-cells":
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    soc {
+      #address-cells = <2>;
+      #size-cells = <2>;
+      serdes_1: phy@1ea0000 {
+        compatible = "fsl,lynx-28g";
+        reg = <0x0 0x1ea0000 0x0 0x1e30>;
+        #phy-cells = <1>;
+      };
+    };
index 6107880..02b76f1 100644 (file)
@@ -37,6 +37,12 @@ properties:
       max bit rate supported in bps
     minimum: 1
 
+  mux-states:
+    description:
+      mux controller node to route the signals from controller to
+      transceiver.
+    maxItems: 1
+
 required:
   - compatible
   - '#phy-cells'
@@ -53,4 +59,5 @@ examples:
       max-bitrate = <5000000>;
       standby-gpios = <&wakeup_gpio1 16 GPIO_ACTIVE_LOW>;
       enable-gpios = <&main_gpio1 67 GPIO_ACTIVE_HIGH>;
+      mux-states = <&mux0 1>;
     };
index c85f759..8a90d82 100644 (file)
@@ -107,9 +107,6 @@ properties:
 
     additionalProperties: false
 
-allOf:
-  - $ref: "pinctrl.yaml#"
-
 required:
   - pinctrl-0
   - pinctrl-names
index 4431237..c17cdb0 100644 (file)
@@ -4,6 +4,22 @@ Linux Devlink Documentation
 devlink is an API to expose device information and resources not directly
 related to any device class, such as chip-wide/switch-ASIC-wide configuration.
 
+Locking
+-------
+
+Driver facing APIs are currently transitioning to allow more explicit
+locking. Drivers can use the existing ``devlink_*`` set of APIs, or
+new APIs prefixed by ``devl_*``. The older APIs handle all the locking
+in devlink core, but don't allow registration of most sub-objects once
+the main devlink object is itself registered. The newer ``devl_*`` APIs assume
+the devlink instance lock is already held. Drivers can take the instance
+lock by calling ``devl_lock()``. It is also held in most of the callbacks.
+Eventually all callbacks will be invoked under the devlink instance lock,
+refer to the use of the ``DEVLINK_NL_FLAG_NO_LOCK`` flag in devlink core
+to find out which callbacks are not converted, yet.
+
+Drivers are encouraged to use the devlink instance lock for their own needs.
+
 Interface documentation
 -----------------------
 
index 2572eec..b0024aa 100644 (file)
@@ -878,6 +878,29 @@ tcp_min_tso_segs - INTEGER
 
        Default: 2
 
+tcp_tso_rtt_log - INTEGER
+       Adjustment of TSO packet sizes based on min_rtt
+
+       Starting from linux-5.18, TCP autosizing can be tweaked
+       for flows having small RTT.
+
+       Old autosizing was splitting the pacing budget to send 1024 TSO
+       per second.
+
+       tso_packet_size = sk->sk_pacing_rate / 1024;
+
+       With the new mechanism, we increase this TSO sizing using:
+
+       distance = min_rtt_usec / (2^tcp_tso_rtt_log)
+       tso_packet_size += gso_max_size >> distance;
+
+       This means that flows between very close hosts can use bigger
+       TSO packets, reducing their cpu costs.
+
+       If you want to use the old autosizing, set this sysctl to 0.
+
+       Default: 9  (2^9 = 512 usec)
+
 tcp_pacing_ss_ratio - INTEGER
        sk->sk_pacing_rate is set by TCP stack using a ratio applied
        to current rate. (current_rate = cwnd * mss / srtt)
index cb75c5d..e5ce4f6 100644 (file)
@@ -2572,7 +2572,7 @@ F:        sound/soc/rockchip/
 N:     rockchip
 
 ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org
@@ -2739,7 +2739,7 @@ N:        stm32
 N:     stm
 
 ARM/Synaptics SoC support
-M:     Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+M:     Jisheng Zhang <jszhang@kernel.org>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -3821,9 +3821,6 @@ BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
 M:     Arend van Spriel <aspriel@gmail.com>
 M:     Franky Lin <franky.lin@broadcom.com>
 M:     Hante Meuleman <hante.meuleman@broadcom.com>
-M:     Chi-hsien Lin <chi-hsien.lin@infineon.com>
-M:     Wright Feng <wright.feng@infineon.com>
-M:     Chung-hsien Hsu <chung-hsien.hsu@infineon.com>
 L:     linux-wireless@vger.kernel.org
 L:     brcm80211-dev-list.pdl@broadcom.com
 L:     SHA-cyfmac-dev-list@infineon.com
@@ -3907,7 +3904,7 @@ M:        Scott Branden <sbranden@broadcom.com>
 M:     bcm-kernel-feedback-list@broadcom.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-T:     git git://github.com/broadcom/cygnus-linux.git
+T:     git git://github.com/broadcom/stblinux.git
 F:     arch/arm64/boot/dts/broadcom/northstar2/*
 F:     arch/arm64/boot/dts/broadcom/stingray/*
 F:     drivers/clk/bcm/clk-ns*
@@ -11336,6 +11333,13 @@ S:     Maintained
 W:     http://linux-test-project.github.io/
 T:     git git://github.com/linux-test-project/ltp.git
 
+LYNX 28G SERDES PHY DRIVER
+M:     Ioana Ciornei <ioana.ciornei@nxp.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/phy/fsl,lynx-28g.yaml
+F:     drivers/phy/freescale/phy-fsl-lynx-28g.c
+
 LYNX PCS MODULE
 M:     Ioana Ciornei <ioana.ciornei@nxp.com>
 L:     netdev@vger.kernel.org
@@ -11683,7 +11687,7 @@ F:      drivers/iio/proximity/mb1232.c
 
 MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS
 R:     Iskren Chernev <iskren.chernev@gmail.com>
-R:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+R:     Krzysztof Kozlowski <krzk@kernel.org>
 R:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Matheus Castello <matheus@castello.eng.br>
 L:     linux-pm@vger.kernel.org
@@ -11693,7 +11697,7 @@ F:      drivers/power/supply/max17040_battery.c
 
 MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
 R:     Hans de Goede <hdegoede@redhat.com>
-R:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+R:     Krzysztof Kozlowski <krzk@kernel.org>
 R:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
 R:     Purism Kernel Team <kernel@puri.sm>
@@ -11738,7 +11742,7 @@ F:      Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
 F:     drivers/power/supply/max77976_charger.c
 
 MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
@@ -11747,7 +11751,7 @@ F:      drivers/power/supply/max77693_charger.c
 
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:     Chanwoo Choi <cw00.choi@samsung.com>
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
@@ -12173,6 +12177,7 @@ R:      Shayne Chen <shayne.chen@mediatek.com>
 R:     Sean Wang <sean.wang@mediatek.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
+F:     Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
 F:     drivers/net/wireless/mediatek/mt76/
 
 MEDIATEK MT7601U WIRELESS LAN DRIVER
@@ -12436,7 +12441,7 @@ F:      include/linux/memblock.h
 F:     mm/memblock.c
 
 MEMORY CONTROLLER DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
@@ -13384,6 +13389,7 @@ F:      net/core/drop_monitor.c
 NETWORKING DRIVERS
 M:     "David S. Miller" <davem@davemloft.net>
 M:     Jakub Kicinski <kuba@kernel.org>
+M:     Paolo Abeni <pabeni@redhat.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 Q:     https://patchwork.kernel.org/project/netdevbpf/list/
@@ -13430,6 +13436,7 @@ F:      tools/testing/selftests/drivers/net/dsa/
 NETWORKING [GENERAL]
 M:     "David S. Miller" <davem@davemloft.net>
 M:     Jakub Kicinski <kuba@kernel.org>
+M:     Paolo Abeni <pabeni@redhat.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 Q:     https://patchwork.kernel.org/project/netdevbpf/list/
@@ -13573,7 +13580,7 @@ F:      include/uapi/linux/nexthop.h
 F:     net/ipv4/nexthop.c
 
 NFC SUBSYSTEM
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-nfc@lists.01.org (subscribers-only)
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -13887,7 +13894,7 @@ F:      Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
 F:     drivers/regulator/pf8x00-regulator.c
 
 NXP PTN5150A CC LOGIC AND EXTCON DRIVER
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -15313,7 +15320,7 @@ F:      drivers/pinctrl/renesas/
 
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <tomasz.figa@gmail.com>
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -15931,8 +15938,8 @@ M:      Kalle Valo <kvalo@kernel.org>
 L:     ath11k@lists.infradead.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
+F:     Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
 F:     drivers/net/wireless/ath/ath11k/
-F:     Documentation/devicetree/bindings/net/wireless/qcom,ath11k.txt
 
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 M:     Toke Høiland-Jørgensen <toke@toke.dk>
@@ -16378,7 +16385,7 @@ REALTEK RTL83xx SMI DSA ROUTER CHIPS
 M:     Linus Walleij <linus.walleij@linaro.org>
 M:     Alvin Å ipraga <alsi@bang-olufsen.dk>
 S:     Maintained
-F:     Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
+F:     Documentation/devicetree/bindings/net/dsa/realtek.yaml
 F:     drivers/net/dsa/realtek/*
 
 REALTEK WIRELESS DRIVER (rtlwifi family)
@@ -16954,7 +16961,7 @@ W:      http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/scsi/zfcp_*
 
 S3C ADC BATTERY DRIVER
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-samsung-soc@vger.kernel.org
 S:     Odd Fixes
 F:     drivers/power/supply/s3c_adc_battery.c
@@ -16999,7 +17006,7 @@ F:      Documentation/admin-guide/LSM/SafeSetID.rst
 F:     security/safesetid/
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
@@ -17007,7 +17014,7 @@ F:      Documentation/devicetree/bindings/sound/samsung*
 F:     sound/soc/samsung/
 
 SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -17042,7 +17049,7 @@ S:      Maintained
 F:     drivers/platform/x86/samsung-laptop.c
 
 SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17068,7 +17075,7 @@ F:      drivers/media/platform/s3c-camif/
 F:     include/media/drv-intf/s3c_camif.h
 
 SAMSUNG S3FWRN5 NFC DRIVER
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Krzysztof Opasiak <k.opasiak@samsung.com>
 L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Maintained
@@ -17090,7 +17097,7 @@ S:      Supported
 F:     drivers/media/i2c/s5k5baf.c
 
 SAMSUNG S5P Security SubSystem (SSS) DRIVER
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Vladimir Zapolskiy <vz@mleia.com>
 L:     linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17125,7 +17132,7 @@ F:      include/linux/clk/samsung.h
 F:     include/linux/platform_data/clk-s3c2410.h
 
 SAMSUNG SPI DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Andi Shyti <andi@etezian.org>
 L:     linux-spi@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -21475,7 +21482,6 @@ THE REST
 M:     Linus Torvalds <torvalds@linux-foundation.org>
 L:     linux-kernel@vger.kernel.org
 S:     Buried alive in reporters
-Q:     http://patchwork.kernel.org/project/LKML/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
 F:     *
 F:     */
index daeb5c8..55a30ca 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 17
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc8
 NAME = Superb Owl
 
 # *DOCUMENTATION*
index 6dde51c..e4775bb 100644 (file)
        };
 
        pinctrl_fwqspid_default: fwqspid_default {
-               function = "FWQSPID";
+               function = "FWSPID";
                groups = "FWQSPID";
        };
 
index dff18fc..21294f7 100644 (file)
 
                hvs: hvs@7e400000 {
                        compatible = "brcm,bcm2711-hvs";
+                       reg = <0x7e400000 0x8000>;
                        interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
                };
 
index d35fb79..4db4332 100644 (file)
@@ -5,7 +5,13 @@
 
 / {
        /* Version of Nyan Big with 1080p panel */
-       panel {
-               compatible = "auo,b133htn01";
+       host1x@50000000 {
+               dpaux@545c0000 {
+                       aux-bus {
+                               panel: panel {
+                                       compatible = "auo,b133htn01";
+                               };
+                       };
+               };
        };
 };
index 6fe6796..aee73ef 100644 (file)
        .endm
 #endif
 
+#if __LINUX_ARM_ARCH__ < 7
+       .macro  dsb, args
+       mcr     p15, 0, r0, c7, c10, 4
+       .endm
+
+       .macro  isb, args
+       mcr     p15, 0, r0, c7, c5, 4
+       .endm
+#endif
+
        .macro asm_trace_hardirqs_off, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
        .if \save
diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h
new file mode 100644 (file)
index 0000000..85f9e53
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SPECTRE_H
+#define __ASM_SPECTRE_H
+
+enum {
+       SPECTRE_UNAFFECTED,
+       SPECTRE_MITIGATED,
+       SPECTRE_VULNERABLE,
+};
+
+enum {
+       __SPECTRE_V2_METHOD_BPIALL,
+       __SPECTRE_V2_METHOD_ICIALLU,
+       __SPECTRE_V2_METHOD_SMC,
+       __SPECTRE_V2_METHOD_HVC,
+       __SPECTRE_V2_METHOD_LOOP8,
+};
+
+enum {
+       SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
+       SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
+       SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
+       SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
+       SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
+};
+
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+void spectre_v2_update_state(unsigned int state, unsigned int methods);
+#else
+static inline void spectre_v2_update_state(unsigned int state,
+                                          unsigned int methods)
+{}
+#endif
+
+int spectre_bhb_update_vectors(unsigned int method);
+
+#endif
index 4a91428..fad45c8 100644 (file)
 #define ARM_MMU_DISCARD(x)     x
 #endif
 
+/*
+ * ld.lld does not support NOCROSSREFS:
+ * https://github.com/ClangBuiltLinux/linux/issues/1609
+ */
+#ifdef CONFIG_LD_IS_LLD
+#define NOCROSSREFS
+#endif
+
+/* Set start/end symbol names to the LMA for the section */
+#define ARM_LMA(sym, section)                                          \
+       sym##_start = LOADADDR(section);                                \
+       sym##_end = LOADADDR(section) + SIZEOF(section)
+
 #define PROC_INFO                                                      \
                . = ALIGN(4);                                           \
                __proc_info_begin = .;                                  \
  * only thing that matters is their relative offsets
  */
 #define ARM_VECTORS                                                    \
-       __vectors_start = .;                                            \
-       .vectors 0xffff0000 : AT(__vectors_start) {                     \
-               *(.vectors)                                             \
+       __vectors_lma = .;                                              \
+       OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) {            \
+               .vectors {                                              \
+                       *(.vectors)                                     \
+               }                                                       \
+               .vectors.bhb.loop8 {                                    \
+                       *(.vectors.bhb.loop8)                           \
+               }                                                       \
+               .vectors.bhb.bpiall {                                   \
+                       *(.vectors.bhb.bpiall)                          \
+               }                                                       \
        }                                                               \
-       . = __vectors_start + SIZEOF(.vectors);                         \
-       __vectors_end = .;                                              \
+       ARM_LMA(__vectors, .vectors);                                   \
+       ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8);               \
+       ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall);             \
+       . = __vectors_lma + SIZEOF(.vectors) +                          \
+               SIZEOF(.vectors.bhb.loop8) +                            \
+               SIZEOF(.vectors.bhb.bpiall);                            \
                                                                        \
-       __stubs_start = .;                                              \
-       .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {            \
+       __stubs_lma = .;                                                \
+       .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {              \
                *(.stubs)                                               \
        }                                                               \
-       . = __stubs_start + SIZEOF(.stubs);                             \
-       __stubs_end = .;                                                \
+       ARM_LMA(__stubs, .stubs);                                       \
+       . = __stubs_lma + SIZEOF(.stubs);                               \
                                                                        \
        PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
 
index ae295a3..6ef3b53 100644 (file)
@@ -106,4 +106,6 @@ endif
 
 obj-$(CONFIG_HAVE_ARM_SMCCC)   += smccc-call.o
 
+obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
+
 extra-y := $(head-y) vmlinux.lds
index 5cd0578..ee3f7a5 100644 (file)
@@ -1002,12 +1002,11 @@ vector_\name:
        sub     lr, lr, #\correction
        .endif
 
-       @
-       @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
-       @ (parent CPSR)
-       @
+       @ Save r0, lr_<exception> (parent PC)
        stmia   sp, {r0, lr}            @ save r0, lr
-       mrs     lr, spsr
+
+       @ Save spsr_<exception> (parent CPSR)
+2:     mrs     lr, spsr
        str     lr, [sp, #8]            @ save spsr
 
        @
@@ -1028,6 +1027,44 @@ vector_\name:
        movs    pc, lr                  @ branch to handler in SVC mode
 ENDPROC(vector_\name)
 
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+       .subsection 1
+       .align 5
+vector_bhb_loop8_\name:
+       .if \correction
+       sub     lr, lr, #\correction
+       .endif
+
+       @ Save r0, lr_<exception> (parent PC)
+       stmia   sp, {r0, lr}
+
+       @ bhb workaround
+       mov     r0, #8
+3:     b       . + 4
+       subs    r0, r0, #1
+       bne     3b
+       dsb
+       isb
+       b       2b
+ENDPROC(vector_bhb_loop8_\name)
+
+vector_bhb_bpiall_\name:
+       .if \correction
+       sub     lr, lr, #\correction
+       .endif
+
+       @ Save r0, lr_<exception> (parent PC)
+       stmia   sp, {r0, lr}
+
+       @ bhb workaround
+       mcr     p15, 0, r0, c7, c5, 6   @ BPIALL
+       @ isb not needed due to "movs pc, lr" in the vector stub
+       @ which gives a "context synchronisation".
+       b       2b
+ENDPROC(vector_bhb_bpiall_\name)
+       .previous
+#endif
+
        .align  2
        @ handler addresses follow this label
 1:
@@ -1036,6 +1073,10 @@ ENDPROC(vector_\name)
        .section .stubs, "ax", %progbits
        @ This must be the first word
        .word   vector_swi
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+       .word   vector_bhb_loop8_swi
+       .word   vector_bhb_bpiall_swi
+#endif
 
 vector_rst:
  ARM(  swi     SYS_ERROR0      )
@@ -1150,8 +1191,10 @@ vector_addrexcptn:
  * FIQ "NMI" handler
  *-----------------------------------------------------------------------------
  * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
- * systems.
+ * systems. This must be the last vector stub, so lets place it in its own
+ * subsection.
  */
+       .subsection 2
        vector_stub     fiq, FIQ_MODE, 4
 
        .long   __fiq_usr                       @  0  (USR_26 / USR_32)
@@ -1184,6 +1227,30 @@ vector_addrexcptn:
        W(b)    vector_irq
        W(b)    vector_fiq
 
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+       .section .vectors.bhb.loop8, "ax", %progbits
+.L__vectors_bhb_loop8_start:
+       W(b)    vector_rst
+       W(b)    vector_bhb_loop8_und
+       W(ldr)  pc, .L__vectors_bhb_loop8_start + 0x1004
+       W(b)    vector_bhb_loop8_pabt
+       W(b)    vector_bhb_loop8_dabt
+       W(b)    vector_addrexcptn
+       W(b)    vector_bhb_loop8_irq
+       W(b)    vector_bhb_loop8_fiq
+
+       .section .vectors.bhb.bpiall, "ax", %progbits
+.L__vectors_bhb_bpiall_start:
+       W(b)    vector_rst
+       W(b)    vector_bhb_bpiall_und
+       W(ldr)  pc, .L__vectors_bhb_bpiall_start + 0x1008
+       W(b)    vector_bhb_bpiall_pabt
+       W(b)    vector_bhb_bpiall_dabt
+       W(b)    vector_addrexcptn
+       W(b)    vector_bhb_bpiall_irq
+       W(b)    vector_bhb_bpiall_fiq
+#endif
+
        .data
        .align  2
 
index ac86c34..dbc1913 100644 (file)
@@ -153,6 +153,29 @@ ENDPROC(ret_from_fork)
  *-----------------------------------------------------------------------------
  */
 
+       .align  5
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ENTRY(vector_bhb_loop8_swi)
+       sub     sp, sp, #PT_REGS_SIZE
+       stmia   sp, {r0 - r12}
+       mov     r8, #8
+1:     b       2f
+2:     subs    r8, r8, #1
+       bne     1b
+       dsb
+       isb
+       b       3f
+ENDPROC(vector_bhb_loop8_swi)
+
+       .align  5
+ENTRY(vector_bhb_bpiall_swi)
+       sub     sp, sp, #PT_REGS_SIZE
+       stmia   sp, {r0 - r12}
+       mcr     p15, 0, r8, c7, c5, 6   @ BPIALL
+       isb
+       b       3f
+ENDPROC(vector_bhb_bpiall_swi)
+#endif
        .align  5
 ENTRY(vector_swi)
 #ifdef CONFIG_CPU_V7M
@@ -160,6 +183,7 @@ ENTRY(vector_swi)
 #else
        sub     sp, sp, #PT_REGS_SIZE
        stmia   sp, {r0 - r12}                  @ Calling r0 - r12
+3:
  ARM(  add     r8, sp, #S_PC           )
  ARM(  stmdb   r8, {sp, lr}^           )       @ Calling sp, lr
  THUMB(        mov     r8, sp                  )
diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c
new file mode 100644 (file)
index 0000000..0dcefc3
--- /dev/null
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bpf.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+
+#include <asm/spectre.h>
+
+static bool _unprivileged_ebpf_enabled(void)
+{
+#ifdef CONFIG_BPF_SYSCALL
+       return !sysctl_unprivileged_bpf_disabled;
+#else
+       return false;
+#endif
+}
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+static unsigned int spectre_v2_state;
+static unsigned int spectre_v2_methods;
+
+void spectre_v2_update_state(unsigned int state, unsigned int method)
+{
+       if (state > spectre_v2_state)
+               spectre_v2_state = state;
+       spectre_v2_methods |= method;
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       const char *method;
+
+       if (spectre_v2_state == SPECTRE_UNAFFECTED)
+               return sprintf(buf, "%s\n", "Not affected");
+
+       if (spectre_v2_state != SPECTRE_MITIGATED)
+               return sprintf(buf, "%s\n", "Vulnerable");
+
+       if (_unprivileged_ebpf_enabled())
+               return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
+
+       switch (spectre_v2_methods) {
+       case SPECTRE_V2_METHOD_BPIALL:
+               method = "Branch predictor hardening";
+               break;
+
+       case SPECTRE_V2_METHOD_ICIALLU:
+               method = "I-cache invalidation";
+               break;
+
+       case SPECTRE_V2_METHOD_SMC:
+       case SPECTRE_V2_METHOD_HVC:
+               method = "Firmware call";
+               break;
+
+       case SPECTRE_V2_METHOD_LOOP8:
+               method = "History overwrite";
+               break;
+
+       default:
+               method = "Multiple mitigations";
+               break;
+       }
+
+       return sprintf(buf, "Mitigation: %s\n", method);
+}
index da04ed8..cae4a74 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/atomic.h>
 #include <asm/cacheflush.h>
 #include <asm/exception.h>
+#include <asm/spectre.h>
 #include <asm/unistd.h>
 #include <asm/traps.h>
 #include <asm/ptrace.h>
@@ -789,10 +790,59 @@ static inline void __init kuser_init(void *vectors)
 }
 #endif
 
+#ifndef CONFIG_CPU_V7M
+static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
+{
+       memcpy(vma, lma_start, lma_end - lma_start);
+}
+
+static void flush_vectors(void *vma, size_t offset, size_t size)
+{
+       unsigned long start = (unsigned long)vma + offset;
+       unsigned long end = start + size;
+
+       flush_icache_range(start, end);
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+int spectre_bhb_update_vectors(unsigned int method)
+{
+       extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
+       extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
+       void *vec_start, *vec_end;
+
+       if (system_state >= SYSTEM_FREEING_INITMEM) {
+               pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
+                      smp_processor_id());
+               return SPECTRE_VULNERABLE;
+       }
+
+       switch (method) {
+       case SPECTRE_V2_METHOD_LOOP8:
+               vec_start = __vectors_bhb_loop8_start;
+               vec_end = __vectors_bhb_loop8_end;
+               break;
+
+       case SPECTRE_V2_METHOD_BPIALL:
+               vec_start = __vectors_bhb_bpiall_start;
+               vec_end = __vectors_bhb_bpiall_end;
+               break;
+
+       default:
+               pr_err("CPU%u: unknown Spectre BHB state %d\n",
+                      smp_processor_id(), method);
+               return SPECTRE_VULNERABLE;
+       }
+
+       copy_from_lma(vectors_page, vec_start, vec_end);
+       flush_vectors(vectors_page, 0, vec_end - vec_start);
+
+       return SPECTRE_MITIGATED;
+}
+#endif
+
 void __init early_trap_init(void *vectors_base)
 {
-#ifndef CONFIG_CPU_V7M
-       unsigned long vectors = (unsigned long)vectors_base;
        extern char __stubs_start[], __stubs_end[];
        extern char __vectors_start[], __vectors_end[];
        unsigned i;
@@ -813,17 +863,20 @@ void __init early_trap_init(void *vectors_base)
         * into the vector page, mapped at 0xffff0000, and ensure these
         * are visible to the instruction stream.
         */
-       memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
-       memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
+       copy_from_lma(vectors_base, __vectors_start, __vectors_end);
+       copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
 
        kuser_init(vectors_base);
 
-       flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
+       flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
+}
 #else /* ifndef CONFIG_CPU_V7M */
+void __init early_trap_init(void *vectors_base)
+{
        /*
         * on V7-M there is no need to copy the vector table to a dedicated
         * memory area. The address is configurable and so a table in the kernel
         * image can be used.
         */
-#endif
 }
+#endif
index cd300ee..0bf4d31 100644 (file)
@@ -3,6 +3,7 @@ menuconfig ARCH_MSTARV7
        depends on ARCH_MULTI_V7
        select ARM_GIC
        select ARM_HEAVY_MB
+       select HAVE_ARM_ARCH_TIMER
        select MST_IRQ
        select MSTAR_MSC313_MPLL
        help
index 58afba3..9724c16 100644 (file)
@@ -830,6 +830,7 @@ config CPU_BPREDICT_DISABLE
 
 config CPU_SPECTRE
        bool
+       select GENERIC_CPU_VULNERABILITIES
 
 config HARDEN_BRANCH_PREDICTOR
        bool "Harden the branch predictor against aliasing attacks" if EXPERT
@@ -850,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR
 
           If unsure, say Y.
 
+config HARDEN_BRANCH_HISTORY
+       bool "Harden Spectre style attacks against branch history" if EXPERT
+       depends on CPU_SPECTRE
+       default y
+       help
+         Speculation attacks against some high-performance processors can
+         make use of branch history to influence future speculation. When
+         taking an exception, a sequence of branches overwrites the branch
+         history, or branch history is invalidated.
+
 config TLS_REG_EMUL
        bool
        select NEED_KUSER_HELPERS
index 114c05a..06dbfb9 100644 (file)
@@ -6,8 +6,35 @@
 #include <asm/cp15.h>
 #include <asm/cputype.h>
 #include <asm/proc-fns.h>
+#include <asm/spectre.h>
 #include <asm/system_misc.h>
 
+#ifdef CONFIG_ARM_PSCI
+static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+       struct arm_smccc_res res;
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                            ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+
+       switch ((int)res.a0) {
+       case SMCCC_RET_SUCCESS:
+               return SPECTRE_MITIGATED;
+
+       case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+               return SPECTRE_UNAFFECTED;
+
+       default:
+               return SPECTRE_VULNERABLE;
+       }
+}
+#else
+static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+       return SPECTRE_VULNERABLE;
+}
+#endif
+
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
 
@@ -36,13 +63,61 @@ static void __maybe_unused call_hvc_arch_workaround_1(void)
        arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 }
 
-static void cpu_v7_spectre_init(void)
+static unsigned int spectre_v2_install_workaround(unsigned int method)
 {
        const char *spectre_v2_method = NULL;
        int cpu = smp_processor_id();
 
        if (per_cpu(harden_branch_predictor_fn, cpu))
-               return;
+               return SPECTRE_MITIGATED;
+
+       switch (method) {
+       case SPECTRE_V2_METHOD_BPIALL:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_bpiall;
+               spectre_v2_method = "BPIALL";
+               break;
+
+       case SPECTRE_V2_METHOD_ICIALLU:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_iciallu;
+               spectre_v2_method = "ICIALLU";
+               break;
+
+       case SPECTRE_V2_METHOD_HVC:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       call_hvc_arch_workaround_1;
+               cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
+               spectre_v2_method = "hypervisor";
+               break;
+
+       case SPECTRE_V2_METHOD_SMC:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       call_smc_arch_workaround_1;
+               cpu_do_switch_mm = cpu_v7_smc_switch_mm;
+               spectre_v2_method = "firmware";
+               break;
+       }
+
+       if (spectre_v2_method)
+               pr_info("CPU%u: Spectre v2: using %s workaround\n",
+                       smp_processor_id(), spectre_v2_method);
+
+       return SPECTRE_MITIGATED;
+}
+#else
+static unsigned int spectre_v2_install_workaround(unsigned int method)
+{
+       pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n",
+               smp_processor_id());
+
+       return SPECTRE_VULNERABLE;
+}
+#endif
+
+static void cpu_v7_spectre_v2_init(void)
+{
+       unsigned int state, method = 0;
 
        switch (read_cpuid_part()) {
        case ARM_CPU_PART_CORTEX_A8:
@@ -51,69 +126,133 @@ static void cpu_v7_spectre_init(void)
        case ARM_CPU_PART_CORTEX_A17:
        case ARM_CPU_PART_CORTEX_A73:
        case ARM_CPU_PART_CORTEX_A75:
-               per_cpu(harden_branch_predictor_fn, cpu) =
-                       harden_branch_predictor_bpiall;
-               spectre_v2_method = "BPIALL";
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_BPIALL;
                break;
 
        case ARM_CPU_PART_CORTEX_A15:
        case ARM_CPU_PART_BRAHMA_B15:
-               per_cpu(harden_branch_predictor_fn, cpu) =
-                       harden_branch_predictor_iciallu;
-               spectre_v2_method = "ICIALLU";
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_ICIALLU;
                break;
 
-#ifdef CONFIG_ARM_PSCI
        case ARM_CPU_PART_BRAHMA_B53:
                /* Requires no workaround */
+               state = SPECTRE_UNAFFECTED;
                break;
+
        default:
                /* Other ARM CPUs require no workaround */
-               if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
+               if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
+                       state = SPECTRE_UNAFFECTED;
                        break;
+               }
+
                fallthrough;
-               /* Cortex A57/A72 require firmware workaround */
-       case ARM_CPU_PART_CORTEX_A57:
-       case ARM_CPU_PART_CORTEX_A72: {
-               struct arm_smccc_res res;
 
-               arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
-                                    ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if ((int)res.a0 != 0)
-                       return;
+       /* Cortex A57/A72 require firmware workaround */
+       case ARM_CPU_PART_CORTEX_A57:
+       case ARM_CPU_PART_CORTEX_A72:
+               state = spectre_v2_get_cpu_fw_mitigation_state();
+               if (state != SPECTRE_MITIGATED)
+                       break;
 
                switch (arm_smccc_1_1_get_conduit()) {
                case SMCCC_CONDUIT_HVC:
-                       per_cpu(harden_branch_predictor_fn, cpu) =
-                               call_hvc_arch_workaround_1;
-                       cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
-                       spectre_v2_method = "hypervisor";
+                       method = SPECTRE_V2_METHOD_HVC;
                        break;
 
                case SMCCC_CONDUIT_SMC:
-                       per_cpu(harden_branch_predictor_fn, cpu) =
-                               call_smc_arch_workaround_1;
-                       cpu_do_switch_mm = cpu_v7_smc_switch_mm;
-                       spectre_v2_method = "firmware";
+                       method = SPECTRE_V2_METHOD_SMC;
                        break;
 
                default:
+                       state = SPECTRE_VULNERABLE;
                        break;
                }
        }
-#endif
+
+       if (state == SPECTRE_MITIGATED)
+               state = spectre_v2_install_workaround(method);
+
+       spectre_v2_update_state(state, method);
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+static int spectre_bhb_method;
+
+static const char *spectre_bhb_method_name(int method)
+{
+       switch (method) {
+       case SPECTRE_V2_METHOD_LOOP8:
+               return "loop";
+
+       case SPECTRE_V2_METHOD_BPIALL:
+               return "BPIALL";
+
+       default:
+               return "unknown";
        }
+}
 
-       if (spectre_v2_method)
-               pr_info("CPU%u: Spectre v2: using %s workaround\n",
-                       smp_processor_id(), spectre_v2_method);
+static int spectre_bhb_install_workaround(int method)
+{
+       if (spectre_bhb_method != method) {
+               if (spectre_bhb_method) {
+                       pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
+                              smp_processor_id());
+
+                       return SPECTRE_VULNERABLE;
+               }
+
+               if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
+                       return SPECTRE_VULNERABLE;
+
+               spectre_bhb_method = method;
+       }
+
+       pr_info("CPU%u: Spectre BHB: using %s workaround\n",
+               smp_processor_id(), spectre_bhb_method_name(method));
+
+       return SPECTRE_MITIGATED;
 }
 #else
-static void cpu_v7_spectre_init(void)
+static int spectre_bhb_install_workaround(int method)
 {
+       return SPECTRE_VULNERABLE;
 }
 #endif
 
+static void cpu_v7_spectre_bhb_init(void)
+{
+       unsigned int state, method = 0;
+
+       switch (read_cpuid_part()) {
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_BRAHMA_B15:
+       case ARM_CPU_PART_CORTEX_A57:
+       case ARM_CPU_PART_CORTEX_A72:
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_LOOP8;
+               break;
+
+       case ARM_CPU_PART_CORTEX_A73:
+       case ARM_CPU_PART_CORTEX_A75:
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_BPIALL;
+               break;
+
+       default:
+               state = SPECTRE_UNAFFECTED;
+               break;
+       }
+
+       if (state == SPECTRE_MITIGATED)
+               state = spectre_bhb_install_workaround(method);
+
+       spectre_v2_update_state(state, method);
+}
+
 static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
                                                  u32 mask, const char *msg)
 {
@@ -142,16 +281,17 @@ static bool check_spectre_auxcr(bool *warned, u32 bit)
 void cpu_v7_ca8_ibe(void)
 {
        if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
-               cpu_v7_spectre_init();
+               cpu_v7_spectre_v2_init();
 }
 
 void cpu_v7_ca15_ibe(void)
 {
        if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
-               cpu_v7_spectre_init();
+               cpu_v7_spectre_v2_init();
 }
 
 void cpu_v7_bugs_init(void)
 {
-       cpu_v7_spectre_init();
+       cpu_v7_spectre_v2_init();
+       cpu_v7_spectre_bhb_init();
 }
index 09b885c..c842878 100644 (file)
@@ -1252,9 +1252,6 @@ config HW_PERF_EVENTS
        def_bool y
        depends on ARM_PMU
 
-config ARCH_HAS_FILTER_PGPROT
-       def_bool y
-
 # Supported by clang >= 7.0
 config CC_HAVE_SHADOW_CALL_STACK
        def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)
@@ -1383,6 +1380,15 @@ config UNMAP_KERNEL_AT_EL0
 
          If unsure, say Y.
 
+config MITIGATE_SPECTRE_BRANCH_HISTORY
+       bool "Mitigate Spectre style attacks against branch history" if EXPERT
+       default y
+       help
+         Speculation attacks against some high-performance processors can
+         make use of branch history to influence future speculation.
+         When taking an exception from user-space, a sequence of branches
+         or a firmware call overwrites the branch history.
+
 config RODATA_FULL_DEFAULT_ENABLED
        bool "Apply r/o permissions of VM areas also to their linear aliases"
        default y
index 17f8e73..41702e7 100644 (file)
 &dpmac7 {
        sfp = <&sfp0>;
        managed = "in-band-status";
+       phys = <&serdes_1 3>;
 };
 
 &dpmac8 {
        sfp = <&sfp1>;
        managed = "in-band-status";
+       phys = <&serdes_1 2>;
 };
 
 &dpmac9 {
        sfp = <&sfp2>;
        managed = "in-band-status";
+       phys = <&serdes_1 1>;
 };
 
 &dpmac10 {
        sfp = <&sfp3>;
        managed = "in-band-status";
+       phys = <&serdes_1 0>;
 };
 
 &emdio2 {
index 7032505..92a8813 100644 (file)
                ranges;
                dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
 
+               serdes_1: phy@1ea0000 {
+                       compatible = "fsl,lynx-28g";
+                       reg = <0x0 0x1ea0000 0x0 0x1e30>;
+                       #phy-cells = <1>;
+               };
+
                crypto: crypto@8000000 {
                        compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
                        fsl,sec-era = <10>;
index 04da07a..1cee264 100644 (file)
@@ -18,6 +18,7 @@
 
        aliases {
                spi0 = &spi0;
+               ethernet0 = &eth0;
                ethernet1 = &eth1;
                mmc0 = &sdhci0;
                mmc1 = &sdhci1;
        /*
         * U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property
         * contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and
-        * 2 size cells and also expects that the second range starts at 16 MB offset. If these
+        * 2 size cells and also expects that the second range starts at 16 MB offset. Also it
+        * expects that first range uses same address for PCI (child) and CPU (parent) cells (so
+        * no remapping) and that this address is the lowest from all specified ranges. If these
         * conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address
         * space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window
         * for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB.
         * https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7
         * https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf
         * https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33
+        * Bug related to requirement of same child and parent addresses for first range is fixed
+        * in U-Boot version 2022.04 by following commit:
+        * https://source.denx.de/u-boot/u-boot/-/commit/1fd54253bca7d43d046bba4853fe5fafd034bc17
         */
        #address-cells = <3>;
        #size-cells = <2>;
index 673f490..fb78ef6 100644 (file)
                         * (totaling 127 MiB) for MEM.
                         */
                        ranges = <0x82000000 0 0xe8000000   0 0xe8000000   0 0x07f00000   /* Port 0 MEM */
-                                 0x81000000 0 0xefff0000   0 0xefff0000   0 0x00010000>; /* Port 0 IO */
+                                 0x81000000 0 0x00000000   0 0xefff0000   0 0x00010000>; /* Port 0 IO */
                        interrupt-map-mask = <0 0 0 7>;
                        interrupt-map = <0 0 0 1 &pcie_intc 0>,
                                        <0 0 0 2 &pcie_intc 1>,
index 7d369fd..11aa135 100644 (file)
        phy-handle = <&ethernet_phy0>;
        mediatek,tx-delay-ps = <1530>;
        snps,reset-gpio = <&pio 87 GPIO_ACTIVE_LOW>;
+       snps,reset-delays-us = <0 10000 10000>;
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&eth_default>;
        pinctrl-1 = <&eth_sleep>;
index de16c0d..a27b762 100644 (file)
        };
 
        eth: ethernet@1101c000 {
-               compatible = "mediatek,mt2712-gmac";
+               compatible = "mediatek,mt2712-gmac", "snps,dwmac-4.20a";
                reg = <0 0x1101c000 0 0x1300>;
                interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
                interrupt-names = "macirq";
                clock-names = "axi",
                              "apb",
                              "mac_main",
-                             "ptp_ref";
+                             "ptp_ref",
+                             "rmii_internal";
                clocks = <&pericfg CLK_PERI_GMAC>,
                         <&pericfg CLK_PERI_GMAC_PCLK>,
                         <&topckgen CLK_TOP_ETHER_125M_SEL>,
-                        <&topckgen CLK_TOP_ETHER_50M_SEL>;
+                        <&topckgen CLK_TOP_ETHER_50M_SEL>,
+                        <&topckgen CLK_TOP_ETHER_50M_RMII_SEL>;
                assigned-clocks = <&topckgen CLK_TOP_ETHER_125M_SEL>,
-                                 <&topckgen CLK_TOP_ETHER_50M_SEL>;
+                                 <&topckgen CLK_TOP_ETHER_50M_SEL>,
+                                 <&topckgen CLK_TOP_ETHER_50M_RMII_SEL>;
                assigned-clock-parents = <&topckgen CLK_TOP_ETHERPLL_125M>,
-                                        <&topckgen CLK_TOP_APLL1_D3>;
+                                        <&topckgen CLK_TOP_APLL1_D3>,
+                                        <&topckgen CLK_TOP_ETHERPLL_50M>;
                power-domains = <&scpsys MT2712_POWER_DOMAIN_AUDIO>;
                mediatek,pericfg = <&pericfg>;
                snps,axi-config = <&stmmac_axi_setup>;
index 2d48c37..aaa00da 100644 (file)
                        #iommu-cells = <1>;
 
                        nvidia,memory-controller = <&mc>;
-                       status = "okay";
+                       status = "disabled";
                };
 
                smmu: iommu@12000000 {
index 58845a1..e2b9ec1 100644 (file)
 
        qcom,snoc-host-cap-8bit-quirk;
 };
+
+&crypto {
+       /* FIXME: qce_start triggers an SError */
+       status= "disable";
+};
index 53b39e7..4b19744 100644 (file)
                        clock-frequency = <32000>;
                        #clock-cells = <0>;
                };
+
+               ufs_phy_rx_symbol_0_clk: ufs-phy-rx-symbol-0 {
+                       compatible = "fixed-clock";
+                       clock-frequency = <1000>;
+                       #clock-cells = <0>;
+               };
+
+               ufs_phy_rx_symbol_1_clk: ufs-phy-rx-symbol-1 {
+                       compatible = "fixed-clock";
+                       clock-frequency = <1000>;
+                       #clock-cells = <0>;
+               };
+
+               ufs_phy_tx_symbol_0_clk: ufs-phy-tx-symbol-0 {
+                       compatible = "fixed-clock";
+                       clock-frequency = <1000>;
+                       #clock-cells = <0>;
+               };
        };
 
        cpus {
                                 <0>,
                                 <0>,
                                 <0>,
-                                <0>,
-                                <0>,
-                                <0>,
+                                <&ufs_phy_rx_symbol_0_clk>,
+                                <&ufs_phy_rx_symbol_1_clk>,
+                                <&ufs_phy_tx_symbol_0_clk>,
                                 <0>,
                                 <0>;
                };
                                <75000000 300000000>,
                                <0 0>,
                                <0 0>,
-                               <75000000 300000000>,
-                               <75000000 300000000>;
+                               <0>,
+                               <0>;
                        status = "disabled";
                };
 
index 10c25ad..02b97e8 100644 (file)
                        compatible = "qcom,sm8450-smmu-500", "arm,mmu-500";
                        reg = <0 0x15000000 0 0x100000>;
                        #iommu-cells = <2>;
-                       #global-interrupts = <2>;
+                       #global-interrupts = <1>;
                        interrupts =    <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
                                        <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
                                        <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
                                        <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
                                        <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
                                        <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>,
+                                       <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
                                        <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
                                        <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
                                        <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
                                 <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
                                 <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
+                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+                                <&gcc GCC_USB3_0_CLKREF_EN>;
                        clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep";
+                                     "sleep", "xo";
 
                        assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_PRIM_MASTER_CLK>;
index e8bd0af..6ebdc0f 100644 (file)
        hint    #20
        .endm
 
+/*
+ * Clear Branch History instruction
+ */
+       .macro clearbhb
+       hint    #22
+       .endm
+
 /*
  * Speculation barrier
  */
@@ -850,4 +857,50 @@ alternative_endif
 
 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
 
+       .macro __mitigate_spectre_bhb_loop      tmp
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+alternative_cb  spectre_bhb_patch_loop_iter
+       mov     \tmp, #32               // Patched to correct the immediate
+alternative_cb_end
+.Lspectre_bhb_loop\@:
+       b       . + 4
+       subs    \tmp, \tmp, #1
+       b.ne    .Lspectre_bhb_loop\@
+       sb
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+       .endm
+
+       .macro mitigate_spectre_bhb_loop        tmp
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+alternative_cb spectre_bhb_patch_loop_mitigation_enable
+       b       .L_spectre_bhb_loop_done\@      // Patched to NOP
+alternative_cb_end
+       __mitigate_spectre_bhb_loop     \tmp
+.L_spectre_bhb_loop_done\@:
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+       .endm
+
+       /* Save/restores x0-x3 to the stack */
+       .macro __mitigate_spectre_bhb_fw
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+       stp     x0, x1, [sp, #-16]!
+       stp     x2, x3, [sp, #-16]!
+       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_3
+alternative_cb smccc_patch_fw_mitigation_conduit
+       nop                                     // Patched to SMC/HVC #0
+alternative_cb_end
+       ldp     x2, x3, [sp], #16
+       ldp     x0, x1, [sp], #16
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+       .endm
+
+       .macro mitigate_spectre_bhb_clear_insn
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+alternative_cb spectre_bhb_patch_clearbhb
+       /* Patched to NOP when not supported */
+       clearbhb
+       isb
+alternative_cb_end
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+       .endm
 #endif /* __ASM_ASSEMBLER_H */
index ef6be92..a77b5f4 100644 (file)
@@ -637,6 +637,35 @@ static inline bool cpu_supports_mixed_endian_el0(void)
        return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
 }
 
+
+static inline bool supports_csv2p3(int scope)
+{
+       u64 pfr0;
+       u8 csv2_val;
+
+       if (scope == SCOPE_LOCAL_CPU)
+               pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
+       else
+               pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+       csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
+                                                       ID_AA64PFR0_CSV2_SHIFT);
+       return csv2_val == 3;
+}
+
+static inline bool supports_clearbhb(int scope)
+{
+       u64 isar2;
+
+       if (scope == SCOPE_LOCAL_CPU)
+               isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
+       else
+               isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
+
+       return cpuid_feature_extract_unsigned_field(isar2,
+                                                   ID_AA64ISAR2_CLEARBHB_SHIFT);
+}
+
 const struct cpumask *system_32bit_el0_cpumask(void);
 DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
 
index 999b914..bfbf0c4 100644 (file)
 #define ARM_CPU_PART_CORTEX_A76                0xD0B
 #define ARM_CPU_PART_NEOVERSE_N1       0xD0C
 #define ARM_CPU_PART_CORTEX_A77                0xD0D
+#define ARM_CPU_PART_NEOVERSE_V1       0xD40
+#define ARM_CPU_PART_CORTEX_A78                0xD41
+#define ARM_CPU_PART_CORTEX_X1         0xD44
 #define ARM_CPU_PART_CORTEX_A510       0xD46
 #define ARM_CPU_PART_CORTEX_A710       0xD47
 #define ARM_CPU_PART_CORTEX_X2         0xD48
 #define ARM_CPU_PART_NEOVERSE_N2       0xD49
+#define ARM_CPU_PART_CORTEX_A78C       0xD4B
 
 #define APM_CPU_PART_POTENZA           0x000
 
 #define MIDR_CORTEX_A76        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
 #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
 #define MIDR_CORTEX_A77        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+#define MIDR_NEOVERSE_V1       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
+#define MIDR_CORTEX_A78        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
+#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
 #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
 #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+#define MIDR_CORTEX_A78C       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
 #define MIDR_THUNDERX  MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
index 4335800..daff882 100644 (file)
@@ -62,9 +62,11 @@ enum fixed_addresses {
 #endif /* CONFIG_ACPI_APEI_GHES */
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       FIX_ENTRY_TRAMP_TEXT3,
+       FIX_ENTRY_TRAMP_TEXT2,
+       FIX_ENTRY_TRAMP_TEXT1,
        FIX_ENTRY_TRAMP_DATA,
-       FIX_ENTRY_TRAMP_TEXT,
-#define TRAMP_VALIAS           (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+#define TRAMP_VALIAS           (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
        __end_of_permanent_fixed_addresses,
 
index 0b6b313..1e5760d 100644 (file)
@@ -65,6 +65,7 @@ enum aarch64_insn_hint_cr_op {
        AARCH64_INSN_HINT_PSB  = 0x11 << 5,
        AARCH64_INSN_HINT_TSB  = 0x12 << 5,
        AARCH64_INSN_HINT_CSDB = 0x14 << 5,
+       AARCH64_INSN_HINT_CLEARBHB = 0x16 << 5,
 
        AARCH64_INSN_HINT_BTI   = 0x20 << 5,
        AARCH64_INSN_HINT_BTIC  = 0x22 << 5,
index 5bc01e6..031e3a2 100644 (file)
@@ -714,6 +714,11 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
        ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
 }
 
+static inline bool kvm_system_needs_idmapped_vectors(void)
+{
+       return cpus_have_const_cap(ARM64_SPECTRE_V3A);
+}
+
 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
 
 static inline void kvm_arch_hardware_unsetup(void) {}
index e4704a4..a857bca 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef __ASM_MTE_KASAN_H
 #define __ASM_MTE_KASAN_H
 
+#include <asm/compiler.h>
 #include <asm/mte-def.h>
 
 #ifndef __ASSEMBLY__
index 7032f04..b1e1b74 100644 (file)
@@ -92,7 +92,7 @@ extern bool arm64_use_ng_mappings;
 #define __P001  PAGE_READONLY
 #define __P010  PAGE_READONLY
 #define __P011  PAGE_READONLY
-#define __P100  PAGE_EXECONLY
+#define __P100  PAGE_READONLY_EXEC     /* PAGE_EXECONLY if Enhanced PAN */
 #define __P101  PAGE_READONLY_EXEC
 #define __P110  PAGE_READONLY_EXEC
 #define __P111  PAGE_READONLY_EXEC
@@ -101,7 +101,7 @@ extern bool arm64_use_ng_mappings;
 #define __S001  PAGE_READONLY
 #define __S010  PAGE_SHARED
 #define __S011  PAGE_SHARED
-#define __S100  PAGE_EXECONLY
+#define __S100  PAGE_READONLY_EXEC     /* PAGE_EXECONLY if Enhanced PAN */
 #define __S101  PAGE_READONLY_EXEC
 #define __S110  PAGE_SHARED_EXEC
 #define __S111  PAGE_SHARED_EXEC
index c4ba047..94e147e 100644 (file)
@@ -1017,17 +1017,6 @@ static inline bool arch_wants_old_prefaulted_pte(void)
 }
 #define arch_wants_old_prefaulted_pte  arch_wants_old_prefaulted_pte
 
-static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
-{
-       if (cpus_have_const_cap(ARM64_HAS_EPAN))
-               return prot;
-
-       if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
-               return prot;
-
-       return PAGE_READONLY_EXEC;
-}
-
 static inline bool pud_sect_supported(void)
 {
        return PAGE_SIZE == SZ_4K;
index 1bce62f..56f7b1d 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef __ASM_RWONCE_H
 #define __ASM_RWONCE_H
 
-#ifdef CONFIG_LTO
+#if defined(CONFIG_LTO) && !defined(__ASSEMBLY__)
 
 #include <linux/compiler_types.h>
 #include <asm/alternative-macros.h>
@@ -66,7 +66,7 @@
 })
 
 #endif /* !BUILD_VDSO */
-#endif /* CONFIG_LTO */
+#endif /* CONFIG_LTO && !__ASSEMBLY__ */
 
 #include <asm-generic/rwonce.h>
 
index 152cb35..40971ac 100644 (file)
@@ -23,4 +23,9 @@ extern char __mmuoff_data_start[], __mmuoff_data_end[];
 extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
 extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
 
+static inline size_t entry_tramp_text_size(void)
+{
+       return __entry_tramp_text_end - __entry_tramp_text_start;
+}
+
 #endif /* __ASM_SECTIONS_H */
index f62ca39..86e0cc9 100644 (file)
@@ -93,5 +93,9 @@ void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
 
 enum mitigation_state arm64_get_meltdown_state(void);
 
+enum mitigation_state arm64_get_spectre_bhb_state(void);
+bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
+u8 spectre_bhb_loop_affected(int scope);
+void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_SPECTRE_H */
index 898bee0..932d45b 100644 (file)
 #define ID_AA64ISAR1_GPI_IMP_DEF               0x1
 
 /* id_aa64isar2 */
+#define ID_AA64ISAR2_CLEARBHB_SHIFT    28
 #define ID_AA64ISAR2_RPRES_SHIFT       4
 #define ID_AA64ISAR2_WFXT_SHIFT                0
 
 #endif
 
 /* id_aa64mmfr1 */
+#define ID_AA64MMFR1_ECBHB_SHIFT       60
 #define ID_AA64MMFR1_AFP_SHIFT         44
 #define ID_AA64MMFR1_ETS_SHIFT         36
 #define ID_AA64MMFR1_TWED_SHIFT                32
diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h
new file mode 100644 (file)
index 0000000..f64613a
--- /dev/null
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef __ASM_VECTORS_H
+#define __ASM_VECTORS_H
+
+#include <linux/bug.h>
+#include <linux/percpu.h>
+
+#include <asm/fixmap.h>
+
+extern char vectors[];
+extern char tramp_vectors[];
+extern char __bp_harden_el1_vectors[];
+
+/*
+ * Note: the order of this enum corresponds to two arrays in entry.S:
+ * tramp_vecs and __bp_harden_el1_vectors. By default the canonical
+ * 'full fat' vectors are used directly.
+ */
+enum arm64_bp_harden_el1_vectors {
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+       /*
+        * Perform the BHB loop mitigation, before branching to the canonical
+        * vectors.
+        */
+       EL1_VECTOR_BHB_LOOP,
+
+       /*
+        * Make the SMC call for firmware mitigation, before branching to the
+        * canonical vectors.
+        */
+       EL1_VECTOR_BHB_FW,
+
+       /*
+        * Use the ClearBHB instruction, before branching to the canonical
+        * vectors.
+        */
+       EL1_VECTOR_BHB_CLEAR_INSN,
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+
+       /*
+        * Remap the kernel before branching to the canonical vectors.
+        */
+       EL1_VECTOR_KPTI,
+};
+
+#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+#define EL1_VECTOR_BHB_LOOP            -1
+#define EL1_VECTOR_BHB_FW              -1
+#define EL1_VECTOR_BHB_CLEAR_INSN      -1
+#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+
+/* The vectors to use on return from EL0. e.g. to remap the kernel */
+DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
+
+#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
+#define TRAMP_VALIAS   0
+#endif
+
+static inline const char *
+arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
+{
+       if (arm64_kernel_unmapped_at_el0())
+               return (char *)TRAMP_VALIAS + SZ_2K * slot;
+
+       WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
+
+       return __bp_harden_el1_vectors + SZ_2K * slot;
+}
+
+#endif /* __ASM_VECTORS_H */
index b3edde6..323e251 100644 (file)
@@ -281,6 +281,11 @@ struct kvm_arm_copy_mte_tags {
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED       3
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED            (1U << 4)
 
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3    KVM_REG_ARM_FW_REG(3)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL          0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL              1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED       2
+
 /* SVE registers */
 #define KVM_REG_ARM64_SVE              (0x15 << KVM_REG_ARM_COPROC_SHIFT)
 
index b217941..a401180 100644 (file)
@@ -502,6 +502,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .matches = has_spectre_v4,
                .cpu_enable = spectre_v4_enable_mitigation,
        },
+       {
+               .desc = "Spectre-BHB",
+               .capability = ARM64_SPECTRE_BHB,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = is_spectre_bhb_affected,
+               .cpu_enable = spectre_bhb_enable_mitigation,
+       },
 #ifdef CONFIG_ARM64_ERRATUM_1418040
        {
                .desc = "ARM erratum 1418040",
index e5f23da..d336876 100644 (file)
@@ -73,6 +73,8 @@
 #include <linux/mm.h>
 #include <linux/cpu.h>
 #include <linux/kasan.h>
+#include <linux/percpu.h>
+
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
@@ -85,6 +87,7 @@
 #include <asm/smp.h>
 #include <asm/sysreg.h>
 #include <asm/traps.h>
+#include <asm/vectors.h>
 #include <asm/virt.h>
 
 /* Kernel representation of AT_HWCAP and AT_HWCAP2 */
@@ -110,6 +113,8 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
 bool arm64_use_ng_mappings = false;
 EXPORT_SYMBOL(arm64_use_ng_mappings);
 
+DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
+
 /*
  * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
  * support it?
@@ -226,6 +231,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
@@ -1590,6 +1596,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 
        int cpu = smp_processor_id();
 
+       if (__this_cpu_read(this_cpu_vector) == vectors) {
+               const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
+
+               __this_cpu_write(this_cpu_vector, v);
+       }
+
        /*
         * We don't need to rewrite the page-tables if either we've done
         * it already or we have KASLR enabled and therefore have not
index 772ec2e..4a3a653 100644 (file)
 
        .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
        .align 7
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+.Lventry_start\@:
        .if     \el == 0
-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+       /*
+        * This must be the first instruction of the EL0 vector entries. It is
+        * skipped by the trampoline vectors, to trigger the cleanup.
+        */
+       b       .Lskip_tramp_vectors_cleanup\@
        .if     \regsize == 64
        mrs     x30, tpidrro_el0
        msr     tpidrro_el0, xzr
        .else
        mov     x30, xzr
        .endif
-alternative_else_nop_endif
+.Lskip_tramp_vectors_cleanup\@:
        .endif
-#endif
 
        sub     sp, sp, #PT_REGS_SIZE
 #ifdef CONFIG_VMAP_STACK
@@ -95,11 +98,15 @@ alternative_else_nop_endif
        mrs     x0, tpidrro_el0
 #endif
        b       el\el\ht\()_\regsize\()_\label
+.org .Lventry_start\@ + 128    // Did we overflow the ventry slot?
        .endm
 
-       .macro tramp_alias, dst, sym
+       .macro tramp_alias, dst, sym, tmp
        mov_q   \dst, TRAMP_VALIAS
-       add     \dst, \dst, #(\sym - .entry.tramp.text)
+       adr_l   \tmp, \sym
+       add     \dst, \dst, \tmp
+       adr_l   \tmp, .entry.tramp.text
+       sub     \dst, \dst, \tmp
        .endm
 
        /*
@@ -116,7 +123,7 @@ alternative_cb_end
        tbnz    \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
        mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
        mov     w1, #\state
-alternative_cb spectre_v4_patch_fw_mitigation_conduit
+alternative_cb smccc_patch_fw_mitigation_conduit
        nop                                     // Patched to SMC/HVC #0
 alternative_cb_end
 .L__asm_ssbd_skip\@:
@@ -413,21 +420,26 @@ alternative_else_nop_endif
        ldp     x24, x25, [sp, #16 * 12]
        ldp     x26, x27, [sp, #16 * 13]
        ldp     x28, x29, [sp, #16 * 14]
-       ldr     lr, [sp, #S_LR]
-       add     sp, sp, #PT_REGS_SIZE           // restore sp
 
        .if     \el == 0
-alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+       ldr     lr, [sp, #S_LR]
+       add     sp, sp, #PT_REGS_SIZE           // restore sp
+       eret
+alternative_else_nop_endif
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
        bne     4f
-       msr     far_el1, x30
-       tramp_alias     x30, tramp_exit_native
+       msr     far_el1, x29
+       tramp_alias     x30, tramp_exit_native, x29
        br      x30
 4:
-       tramp_alias     x30, tramp_exit_compat
+       tramp_alias     x30, tramp_exit_compat, x29
        br      x30
 #endif
        .else
+       ldr     lr, [sp, #S_LR]
+       add     sp, sp, #PT_REGS_SIZE           // restore sp
+
        /* Ensure any device/NC reads complete */
        alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
 
@@ -594,12 +606,6 @@ SYM_CODE_END(ret_to_user)
 
        .popsection                             // .entry.text
 
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-/*
- * Exception vectors trampoline.
- */
-       .pushsection ".entry.tramp.text", "ax"
-
        // Move from tramp_pg_dir to swapper_pg_dir
        .macro tramp_map_kernel, tmp
        mrs     \tmp, ttbr1_el1
@@ -633,12 +639,47 @@ alternative_else_nop_endif
         */
        .endm
 
-       .macro tramp_ventry, regsize = 64
+       .macro tramp_data_page  dst
+       adr_l   \dst, .entry.tramp.text
+       sub     \dst, \dst, PAGE_SIZE
+       .endm
+
+       .macro tramp_data_read_var      dst, var
+#ifdef CONFIG_RANDOMIZE_BASE
+       tramp_data_page         \dst
+       add     \dst, \dst, #:lo12:__entry_tramp_data_\var
+       ldr     \dst, [\dst]
+#else
+       ldr     \dst, =\var
+#endif
+       .endm
+
+#define BHB_MITIGATION_NONE    0
+#define BHB_MITIGATION_LOOP    1
+#define BHB_MITIGATION_FW      2
+#define BHB_MITIGATION_INSN    3
+
+       .macro tramp_ventry, vector_start, regsize, kpti, bhb
        .align  7
 1:
        .if     \regsize == 64
        msr     tpidrro_el0, x30        // Restored in kernel_ventry
        .endif
+
+       .if     \bhb == BHB_MITIGATION_LOOP
+       /*
+        * This sequence must appear before the first indirect branch. i.e. the
+        * ret out of tramp_ventry. It appears here because x30 is free.
+        */
+       __mitigate_spectre_bhb_loop     x30
+       .endif // \bhb == BHB_MITIGATION_LOOP
+
+       .if     \bhb == BHB_MITIGATION_INSN
+       clearbhb
+       isb
+       .endif // \bhb == BHB_MITIGATION_INSN
+
+       .if     \kpti == 1
        /*
         * Defend against branch aliasing attacks by pushing a dummy
         * entry onto the return stack and using a RET instruction to
@@ -648,46 +689,75 @@ alternative_else_nop_endif
        b       .
 2:
        tramp_map_kernel        x30
-#ifdef CONFIG_RANDOMIZE_BASE
-       adr     x30, tramp_vectors + PAGE_SIZE
 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
-       ldr     x30, [x30]
-#else
-       ldr     x30, =vectors
-#endif
+       tramp_data_read_var     x30, vectors
 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
-       prfm    plil1strm, [x30, #(1b - tramp_vectors)]
+       prfm    plil1strm, [x30, #(1b - \vector_start)]
 alternative_else_nop_endif
+
        msr     vbar_el1, x30
-       add     x30, x30, #(1b - tramp_vectors)
        isb
+       .else
+       ldr     x30, =vectors
+       .endif // \kpti == 1
+
+       .if     \bhb == BHB_MITIGATION_FW
+       /*
+        * The firmware sequence must appear before the first indirect branch.
+        * i.e. the ret out of tramp_ventry. But it also needs the stack to be
+        * mapped to save/restore the registers the SMC clobbers.
+        */
+       __mitigate_spectre_bhb_fw
+       .endif // \bhb == BHB_MITIGATION_FW
+
+       add     x30, x30, #(1b - \vector_start + 4)
        ret
+.org 1b + 128  // Did we overflow the ventry slot?
        .endm
 
        .macro tramp_exit, regsize = 64
-       adr     x30, tramp_vectors
+       tramp_data_read_var     x30, this_cpu_vector
+       get_this_cpu_offset x29
+       ldr     x30, [x30, x29]
+
        msr     vbar_el1, x30
-       tramp_unmap_kernel      x30
+       ldr     lr, [sp, #S_LR]
+       tramp_unmap_kernel      x29
        .if     \regsize == 64
-       mrs     x30, far_el1
+       mrs     x29, far_el1
        .endif
+       add     sp, sp, #PT_REGS_SIZE           // restore sp
        eret
        sb
        .endm
 
-       .align  11
-SYM_CODE_START_NOALIGN(tramp_vectors)
+       .macro  generate_tramp_vector,  kpti, bhb
+.Lvector_start\@:
        .space  0x400
 
-       tramp_ventry
-       tramp_ventry
-       tramp_ventry
-       tramp_ventry
+       .rept   4
+       tramp_ventry    .Lvector_start\@, 64, \kpti, \bhb
+       .endr
+       .rept   4
+       tramp_ventry    .Lvector_start\@, 32, \kpti, \bhb
+       .endr
+       .endm
 
-       tramp_ventry    32
-       tramp_ventry    32
-       tramp_ventry    32
-       tramp_ventry    32
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ * The order must match __bp_harden_el1_vectors and the
+ * arm64_bp_harden_el1_vectors enum.
+ */
+       .pushsection ".entry.tramp.text", "ax"
+       .align  11
+SYM_CODE_START_NOALIGN(tramp_vectors)
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+       generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_LOOP
+       generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_FW
+       generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_INSN
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+       generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_NONE
 SYM_CODE_END(tramp_vectors)
 
 SYM_CODE_START(tramp_exit_native)
@@ -704,12 +774,56 @@ SYM_CODE_END(tramp_exit_compat)
        .pushsection ".rodata", "a"
        .align PAGE_SHIFT
 SYM_DATA_START(__entry_tramp_data_start)
+__entry_tramp_data_vectors:
        .quad   vectors
+#ifdef CONFIG_ARM_SDE_INTERFACE
+__entry_tramp_data___sdei_asm_handler:
+       .quad   __sdei_asm_handler
+#endif /* CONFIG_ARM_SDE_INTERFACE */
+__entry_tramp_data_this_cpu_vector:
+       .quad   this_cpu_vector
 SYM_DATA_END(__entry_tramp_data_start)
        .popsection                             // .rodata
 #endif /* CONFIG_RANDOMIZE_BASE */
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
+/*
+ * Exception vectors for spectre mitigations on entry from EL1 when
+ * kpti is not in use.
+ */
+       .macro generate_el1_vector, bhb
+.Lvector_start\@:
+       kernel_ventry   1, t, 64, sync          // Synchronous EL1t
+       kernel_ventry   1, t, 64, irq           // IRQ EL1t
+       kernel_ventry   1, t, 64, fiq           // FIQ EL1h
+       kernel_ventry   1, t, 64, error         // Error EL1t
+
+       kernel_ventry   1, h, 64, sync          // Synchronous EL1h
+       kernel_ventry   1, h, 64, irq           // IRQ EL1h
+       kernel_ventry   1, h, 64, fiq           // FIQ EL1h
+       kernel_ventry   1, h, 64, error         // Error EL1h
+
+       .rept   4
+       tramp_ventry    .Lvector_start\@, 64, 0, \bhb
+       .endr
+       .rept 4
+       tramp_ventry    .Lvector_start\@, 32, 0, \bhb
+       .endr
+       .endm
+
+/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
+       .pushsection ".entry.text", "ax"
+       .align  11
+SYM_CODE_START(__bp_harden_el1_vectors)
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+       generate_el1_vector     bhb=BHB_MITIGATION_LOOP
+       generate_el1_vector     bhb=BHB_MITIGATION_FW
+       generate_el1_vector     bhb=BHB_MITIGATION_INSN
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+SYM_CODE_END(__bp_harden_el1_vectors)
+       .popsection
+
+
 /*
  * Register switch for AArch64. The callee-saved registers need to be saved
  * and restored. On entry:
@@ -835,14 +949,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
         * Remember whether to unmap the kernel on exit.
         */
 1:     str     x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
-
-#ifdef CONFIG_RANDOMIZE_BASE
-       adr     x4, tramp_vectors + PAGE_SIZE
-       add     x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
-       ldr     x4, [x4]
-#else
-       ldr     x4, =__sdei_asm_handler
-#endif
+       tramp_data_read_var     x4, __sdei_asm_handler
        br      x4
 SYM_CODE_END(__sdei_asm_entry_trampoline)
 NOKPROBE(__sdei_asm_entry_trampoline)
@@ -865,13 +972,6 @@ SYM_CODE_END(__sdei_asm_exit_trampoline)
 NOKPROBE(__sdei_asm_exit_trampoline)
        .ltorg
 .popsection            // .entry.tramp.text
-#ifdef CONFIG_RANDOMIZE_BASE
-.pushsection ".rodata", "a"
-SYM_DATA_START(__sdei_asm_trampoline_next_handler)
-       .quad   __sdei_asm_handler
-SYM_DATA_END(__sdei_asm_trampoline_next_handler)
-.popsection            // .rodata
-#endif /* CONFIG_RANDOMIZE_BASE */
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 /*
@@ -981,7 +1081,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
 alternative_else_nop_endif
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-       tramp_alias     dst=x5, sym=__sdei_asm_exit_trampoline
+       tramp_alias     dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
        br      x5
 #endif
 SYM_CODE_END(__sdei_asm_handler)
index 7eaf1f7..55a1ced 100644 (file)
@@ -66,6 +66,10 @@ KVM_NVHE_ALIAS(kvm_patch_vector_branch);
 KVM_NVHE_ALIAS(kvm_update_va_mask);
 KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
 KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
+KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
+KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
+KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
+KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
 
 /* Global kernel state accessed by nVHE hyp code. */
 KVM_NVHE_ALIAS(kvm_vgic_global_state);
index 902e408..6d45c63 100644 (file)
  */
 
 #include <linux/arm-smccc.h>
+#include <linux/bpf.h>
 #include <linux/cpu.h>
 #include <linux/device.h>
 #include <linux/nospec.h>
 #include <linux/prctl.h>
 #include <linux/sched/task_stack.h>
 
+#include <asm/debug-monitors.h>
 #include <asm/insn.h>
 #include <asm/spectre.h>
 #include <asm/traps.h>
+#include <asm/vectors.h>
 #include <asm/virt.h>
 
 /*
@@ -96,14 +99,51 @@ static bool spectre_v2_mitigations_off(void)
        return ret;
 }
 
+static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
+{
+       switch (bhb_state) {
+       case SPECTRE_UNAFFECTED:
+               return "";
+       default:
+       case SPECTRE_VULNERABLE:
+               return ", but not BHB";
+       case SPECTRE_MITIGATED:
+               return ", BHB";
+       }
+}
+
+static bool _unprivileged_ebpf_enabled(void)
+{
+#ifdef CONFIG_BPF_SYSCALL
+       return !sysctl_unprivileged_bpf_disabled;
+#else
+       return false;
+#endif
+}
+
 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
                            char *buf)
 {
+       enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
+       const char *bhb_str = get_bhb_affected_string(bhb_state);
+       const char *v2_str = "Branch predictor hardening";
+
        switch (spectre_v2_state) {
        case SPECTRE_UNAFFECTED:
-               return sprintf(buf, "Not affected\n");
+               if (bhb_state == SPECTRE_UNAFFECTED)
+                       return sprintf(buf, "Not affected\n");
+
+               /*
+                * Platforms affected by Spectre-BHB can't report
+                * "Not affected" for Spectre-v2.
+                */
+               v2_str = "CSV2";
+               fallthrough;
        case SPECTRE_MITIGATED:
-               return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+               if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
+                       return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
+
+               return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
        case SPECTRE_VULNERABLE:
                fallthrough;
        default:
@@ -554,9 +594,9 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
  * to call into firmware to adjust the mitigation state.
  */
-void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
-                                                  __le32 *origptr,
-                                                  __le32 *updptr, int nr_inst)
+void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
+                                              __le32 *origptr,
+                                              __le32 *updptr, int nr_inst)
 {
        u32 insn;
 
@@ -770,3 +810,344 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
                return -ENODEV;
        }
 }
+
+/*
+ * Spectre BHB.
+ *
+ * A CPU is either:
+ * - Mitigated by a branchy loop a CPU specific number of times, and listed
+ *   in our "loop mitigated list".
+ * - Mitigated in software by the firmware Spectre v2 call.
+ * - Has the ClearBHB instruction to perform the mitigation.
+ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
+ *   software mitigation in the vectors is needed.
+ * - Has CSV2.3, so is unaffected.
+ */
+static enum mitigation_state spectre_bhb_state;
+
+enum mitigation_state arm64_get_spectre_bhb_state(void)
+{
+       return spectre_bhb_state;
+}
+
+enum bhb_mitigation_bits {
+       BHB_LOOP,
+       BHB_FW,
+       BHB_HW,
+       BHB_INSN,
+};
+static unsigned long system_bhb_mitigations;
+
+/*
+ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
+ * SCOPE_SYSTEM call will give the right answer.
+ */
+u8 spectre_bhb_loop_affected(int scope)
+{
+       u8 k = 0;
+       static u8 max_bhb_k;
+
+       if (scope == SCOPE_LOCAL_CPU) {
+               static const struct midr_range spectre_bhb_k32_list[] = {
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+                       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+                       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+                       {},
+               };
+               static const struct midr_range spectre_bhb_k24_list[] = {
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+                       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+                       {},
+               };
+               static const struct midr_range spectre_bhb_k8_list[] = {
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+                       {},
+               };
+
+               if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
+                       k = 32;
+               else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
+                       k = 24;
+               else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
+                       k =  8;
+
+               max_bhb_k = max(max_bhb_k, k);
+       } else {
+               k = max_bhb_k;
+       }
+
+       return k;
+}
+
+static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
+{
+       int ret;
+       struct arm_smccc_res res;
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                            ARM_SMCCC_ARCH_WORKAROUND_3, &res);
+
+       ret = res.a0;
+       switch (ret) {
+       case SMCCC_RET_SUCCESS:
+               return SPECTRE_MITIGATED;
+       case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+               return SPECTRE_UNAFFECTED;
+       default:
+               fallthrough;
+       case SMCCC_RET_NOT_SUPPORTED:
+               return SPECTRE_VULNERABLE;
+       }
+}
+
+static bool is_spectre_bhb_fw_affected(int scope)
+{
+       static bool system_affected;
+       enum mitigation_state fw_state;
+       bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
+       static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+               {},
+       };
+       bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
+                                        spectre_bhb_firmware_mitigated_list);
+
+       if (scope != SCOPE_LOCAL_CPU)
+               return system_affected;
+
+       fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+       if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
+               system_affected = true;
+               return true;
+       }
+
+       return false;
+}
+
+static bool supports_ecbhb(int scope)
+{
+       u64 mmfr1;
+
+       if (scope == SCOPE_LOCAL_CPU)
+               mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
+       else
+               mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+
+       return cpuid_feature_extract_unsigned_field(mmfr1,
+                                                   ID_AA64MMFR1_ECBHB_SHIFT);
+}
+
+bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
+                            int scope)
+{
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+       if (supports_csv2p3(scope))
+               return false;
+
+       if (supports_clearbhb(scope))
+               return true;
+
+       if (spectre_bhb_loop_affected(scope))
+               return true;
+
+       if (is_spectre_bhb_fw_affected(scope))
+               return true;
+
+       return false;
+}
+
+static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
+{
+       const char *v = arm64_get_bp_hardening_vector(slot);
+
+       if (slot < 0)
+               return;
+
+       __this_cpu_write(this_cpu_vector, v);
+
+       /*
+        * When KPTI is in use, the vectors are switched when exiting to
+        * user-space.
+        */
+       if (arm64_kernel_unmapped_at_el0())
+               return;
+
+       write_sysreg(v, vbar_el1);
+       isb();
+}
+
+void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
+{
+       bp_hardening_cb_t cpu_cb;
+       enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
+       struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
+
+       if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
+               return;
+
+       if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
+               /* No point mitigating Spectre-BHB alone. */
+       } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
+               pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
+       } else if (cpu_mitigations_off()) {
+               pr_info_once("spectre-bhb mitigation disabled by command line option\n");
+       } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
+               state = SPECTRE_MITIGATED;
+               set_bit(BHB_HW, &system_bhb_mitigations);
+       } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
+               /*
+                * Ensure KVM uses the indirect vector which will have ClearBHB
+                * added.
+                */
+               if (!data->slot)
+                       data->slot = HYP_VECTOR_INDIRECT;
+
+               this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
+               state = SPECTRE_MITIGATED;
+               set_bit(BHB_INSN, &system_bhb_mitigations);
+       } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
+               /*
+                * Ensure KVM uses the indirect vector which will have the
+                * branchy-loop added. A57/A72-r0 will already have selected
+                * the spectre-indirect vector, which is sufficient for BHB
+                * too.
+                */
+               if (!data->slot)
+                       data->slot = HYP_VECTOR_INDIRECT;
+
+               this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
+               state = SPECTRE_MITIGATED;
+               set_bit(BHB_LOOP, &system_bhb_mitigations);
+       } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
+               fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+               if (fw_state == SPECTRE_MITIGATED) {
+                       /*
+                        * Ensure KVM uses one of the spectre bp_hardening
+                        * vectors. The indirect vector doesn't include the EL3
+                        * call, so needs upgrading to
+                        * HYP_VECTOR_SPECTRE_INDIRECT.
+                        */
+                       if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
+                               data->slot += 1;
+
+                       this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
+
+                       /*
+                        * The WA3 call in the vectors supersedes the WA1 call
+                        * made during context-switch. Uninstall any firmware
+                        * bp_hardening callback.
+                        */
+                       cpu_cb = spectre_v2_get_sw_mitigation_cb();
+                       if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
+                               __this_cpu_write(bp_hardening_data.fn, NULL);
+
+                       state = SPECTRE_MITIGATED;
+                       set_bit(BHB_FW, &system_bhb_mitigations);
+               }
+       }
+
+       update_mitigation_state(&spectre_bhb_state, state);
+}
+
+/* Patched to NOP when enabled */
+void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
+                                                    __le32 *origptr,
+                                                     __le32 *updptr, int nr_inst)
+{
+       BUG_ON(nr_inst != 1);
+
+       if (test_bit(BHB_LOOP, &system_bhb_mitigations))
+               *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+/* Patched to NOP when enabled */
+void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
+                                                  __le32 *origptr,
+                                                  __le32 *updptr, int nr_inst)
+{
+       BUG_ON(nr_inst != 1);
+
+       if (test_bit(BHB_FW, &system_bhb_mitigations))
+               *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+/* Patched to correct the immediate */
+void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
+                                  __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+       u8 rd;
+       u32 insn;
+       u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
+
+       BUG_ON(nr_inst != 1); /* MOV -> MOV */
+
+       if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
+               return;
+
+       insn = le32_to_cpu(*origptr);
+       rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
+       insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
+                                        AARCH64_INSN_VARIANT_64BIT,
+                                        AARCH64_INSN_MOVEWIDE_ZERO);
+       *updptr++ = cpu_to_le32(insn);
+}
+
+/* Patched to mov WA3 when supported */
+void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
+                                  __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+       u8 rd;
+       u32 insn;
+
+       BUG_ON(nr_inst != 1); /* MOV -> MOV */
+
+       if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
+           !test_bit(BHB_FW, &system_bhb_mitigations))
+               return;
+
+       insn = le32_to_cpu(*origptr);
+       rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
+
+       insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
+                                                 AARCH64_INSN_VARIANT_32BIT,
+                                                 AARCH64_INSN_REG_ZR, rd,
+                                                 ARM_SMCCC_ARCH_WORKAROUND_3);
+       if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
+               return;
+
+       *updptr++ = cpu_to_le32(insn);
+}
+
+/* Patched to NOP when not supported */
+void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
+                                  __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+       BUG_ON(nr_inst != 2);
+
+       if (test_bit(BHB_INSN, &system_bhb_mitigations))
+               return;
+
+       *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+       *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
+void unpriv_ebpf_notify(int new_state)
+{
+       if (spectre_v2_state == SPECTRE_VULNERABLE ||
+           spectre_bhb_state != SPECTRE_MITIGATED)
+               return;
+
+       if (!new_state)
+               pr_err("WARNING: %s", EBPF_WARN);
+}
+#endif
index 50bab18..edaf0fa 100644 (file)
@@ -341,7 +341,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
        <= SZ_4K, "Hibernate exit text too big or misaligned")
 #endif
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
        "Entry trampoline text too big")
 #endif
 #ifdef CONFIG_KVM
index ecc5958..4dca6ff 100644 (file)
@@ -1491,10 +1491,7 @@ static int kvm_init_vector_slots(void)
        base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
        kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
 
-       if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
-               return 0;
-
-       if (!has_vhe()) {
+       if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
                err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
                                               __BP_HARDEN_HYP_VECS_SZ, &base);
                if (err)
index b6b6801..7839d07 100644 (file)
@@ -62,6 +62,10 @@ el1_sync:                            // Guest trapped into EL2
        /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
        eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
                          ARM_SMCCC_ARCH_WORKAROUND_2)
+       cbz     w1, wa_epilogue
+
+       eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
+                         ARM_SMCCC_ARCH_WORKAROUND_3)
        cbnz    w1, el1_trap
 
 wa_epilogue:
@@ -192,7 +196,10 @@ SYM_CODE_END(__kvm_hyp_vector)
        sub     sp, sp, #(8 * 4)
        stp     x2, x3, [sp, #(8 * 0)]
        stp     x0, x1, [sp, #(8 * 2)]
+       alternative_cb spectre_bhb_patch_wa3
+       /* Patched to mov WA3 when supported */
        mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_1
+       alternative_cb_end
        smc     #0
        ldp     x2, x3, [sp, #(8 * 0)]
        add     sp, sp, #(8 * 2)
@@ -205,6 +212,8 @@ SYM_CODE_END(__kvm_hyp_vector)
        spectrev2_smccc_wa1_smc
        .else
        stp     x0, x1, [sp, #-16]!
+       mitigate_spectre_bhb_loop       x0
+       mitigate_spectre_bhb_clear_insn
        .endif
        .if \indirect != 0
        alternative_cb  kvm_patch_vector_branch
index 526a7d6..cdbe8e2 100644 (file)
@@ -148,8 +148,10 @@ int hyp_map_vectors(void)
        phys_addr_t phys;
        void *bp_base;
 
-       if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
+       if (!kvm_system_needs_idmapped_vectors()) {
+               __hyp_bp_vect_base = __bp_harden_hyp_vecs;
                return 0;
+       }
 
        phys = __hyp_pa(__bp_harden_hyp_vecs);
        bp_base = (void *)__pkvm_create_private_mapping(phys,
index 11d053f..54af470 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kvm_host.h>
 #include <linux/types.h>
 #include <linux/jump_label.h>
+#include <linux/percpu.h>
 #include <uapi/linux/psci.h>
 
 #include <kvm/arm_psci.h>
@@ -24,6 +25,8 @@
 #include <asm/fpsimd.h>
 #include <asm/debug-monitors.h>
 #include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/vectors.h>
 
 /* VHE specific context */
 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
@@ -67,7 +70,7 @@ NOKPROBE_SYMBOL(__activate_traps);
 
 static void __deactivate_traps(struct kvm_vcpu *vcpu)
 {
-       extern char vectors[];  /* kernel exception vectors */
+       const char *host_vectors = vectors;
 
        ___deactivate_traps(vcpu);
 
@@ -81,7 +84,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
        asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 
        write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
-       write_sysreg(vectors, vbar_el1);
+
+       if (!arm64_kernel_unmapped_at_el0())
+               host_vectors = __this_cpu_read(this_cpu_vector);
+       write_sysreg(host_vectors, vbar_el1);
 }
 NOKPROBE_SYMBOL(__deactivate_traps);
 
index 30da78f..202b8c4 100644 (file)
@@ -107,6 +107,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
                                break;
                        }
                        break;
+               case ARM_SMCCC_ARCH_WORKAROUND_3:
+                       switch (arm64_get_spectre_bhb_state()) {
+                       case SPECTRE_VULNERABLE:
+                               break;
+                       case SPECTRE_MITIGATED:
+                               val[0] = SMCCC_RET_SUCCESS;
+                               break;
+                       case SPECTRE_UNAFFECTED:
+                               val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
+                               break;
+                       }
+                       break;
                case ARM_SMCCC_HV_PV_TIME_FEATURES:
                        val[0] = SMCCC_RET_SUCCESS;
                        break;
index 2ce60fe..5918095 100644 (file)
@@ -405,7 +405,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
 
 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
 {
-       return 3;               /* PSCI version and two workaround registers */
+       return 4;               /* PSCI version and three workaround registers */
 }
 
 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
@@ -419,6 +419,9 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
        if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
                return -EFAULT;
 
+       if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++))
+               return -EFAULT;
+
        return 0;
 }
 
@@ -458,6 +461,17 @@ static int get_kernel_wa_level(u64 regid)
                case SPECTRE_VULNERABLE:
                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
                }
+               break;
+       case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
+               switch (arm64_get_spectre_bhb_state()) {
+               case SPECTRE_VULNERABLE:
+                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
+               case SPECTRE_MITIGATED:
+                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
+               case SPECTRE_UNAFFECTED:
+                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
+               }
+               return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
        }
 
        return -EINVAL;
@@ -474,6 +488,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
                break;
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
+       case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
                val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
                break;
        default:
@@ -519,6 +534,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        }
 
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
+       case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
                if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
                        return -EINVAL;
 
index a38f54c..77ada00 100644 (file)
@@ -7,8 +7,10 @@
 
 #include <linux/io.h>
 #include <linux/memblock.h>
+#include <linux/mm.h>
 #include <linux/types.h>
 
+#include <asm/cpufeature.h>
 #include <asm/page.h>
 
 /*
@@ -38,3 +40,18 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 {
        return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
 }
+
+static int __init adjust_protection_map(void)
+{
+       /*
+        * With Enhanced PAN we can honour the execute-only permissions as
+        * there is no PAN override with such mappings.
+        */
+       if (cpus_have_const_cap(ARM64_HAS_EPAN)) {
+               protection_map[VM_EXEC] = PAGE_EXECONLY;
+               protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
+       }
+
+       return 0;
+}
+arch_initcall(adjust_protection_map);
index acfae9b..49abbf4 100644 (file)
@@ -617,6 +617,8 @@ early_param("rodata", parse_rodata);
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 static int __init map_entry_trampoline(void)
 {
+       int i;
+
        pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
        phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
 
@@ -625,11 +627,15 @@ static int __init map_entry_trampoline(void)
 
        /* Map only the text into the trampoline page table */
        memset(tramp_pg_dir, 0, PGD_SIZE);
-       __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
-                            prot, __pgd_pgtable_alloc, 0);
+       __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
+                            entry_tramp_text_size(), prot,
+                            __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
 
        /* Map both the text and data into the kernel page table */
-       __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
+       for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
+               __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
+                            pa_start + i * PAGE_SIZE, prot);
+
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
                extern char __entry_tramp_data_start[];
 
index 9c65b1e..cea7533 100644 (file)
@@ -44,6 +44,7 @@ MTE_ASYMM
 SPECTRE_V2
 SPECTRE_V3A
 SPECTRE_V4
+SPECTRE_BHB
 SSBS
 SVE
 UNMAP_KERNEL_AT_EL0
index ba5b1be..006cbec 100644 (file)
@@ -202,7 +202,6 @@ static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
 /*
  * The current system page and segment sizes
  */
-extern int mmu_linear_psize;
 extern int mmu_virtual_psize;
 extern int mmu_vmalloc_psize;
 extern int mmu_io_psize;
@@ -213,6 +212,7 @@ extern int mmu_io_psize;
 #define mmu_virtual_psize MMU_PAGE_4K
 #endif
 #endif
+extern int mmu_linear_psize;
 extern int mmu_vmemmap_psize;
 
 /* MMU initialization */
index 350de8f..ab3832b 100644 (file)
@@ -112,6 +112,13 @@ static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
 #endif
 }
 
+#define HAVE_ARCH_CSUM_SHIFT
+static __always_inline __wsum csum_shift(__wsum sum, int offset)
+{
+       /* rotate sum to align it with a 16b boundary */
+       return (__force __wsum)rol32((__force u32)sum, (offset & 1) << 3);
+}
+
 /*
  * This is a version of ip_compute_csum() optimized for IP headers,
  * which always checksum on 4 octet boundaries.  ihl is the number
index 7a90000..f83866a 100644 (file)
@@ -9,7 +9,7 @@ struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges);
 int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size);
 int add_tce_mem_ranges(struct crash_mem **mem_ranges);
 int add_initrd_mem_range(struct crash_mem **mem_ranges);
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 int add_htab_mem_range(struct crash_mem **mem_ranges);
 #else
 static inline int add_htab_mem_range(struct crash_mem **mem_ranges)
index 160abcb..ea0e487 100644 (file)
@@ -9,7 +9,7 @@ long soft_nmi_interrupt(struct pt_regs *regs);
 static inline void arch_touch_nmi_watchdog(void) {}
 #endif
 
-#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
+#ifdef CONFIG_NMI_IPI
 extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
                                           bool exclude_self);
 #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
index b44d6ec..0aacd70 100644 (file)
@@ -2,6 +2,7 @@ menu "CPU errata selection"
 
 config RISCV_ERRATA_ALTERNATIVE
        bool "RISC-V alternative scheme"
+       depends on !XIP_KERNEL
        default y
        help
          This Kconfig allows the kernel to automatically patch the
index 6ec44a2..c112ab2 100644 (file)
@@ -14,8 +14,8 @@ config SOC_SIFIVE
        select CLK_SIFIVE
        select CLK_SIFIVE_PRCI
        select SIFIVE_PLIC
-       select RISCV_ERRATA_ALTERNATIVE
-       select ERRATA_SIFIVE
+       select RISCV_ERRATA_ALTERNATIVE if !XIP_KERNEL
+       select ERRATA_SIFIVE if !XIP_KERNEL
        help
          This enables support for SiFive SoC platform hardware.
 
index 56f5711..44d3385 100644 (file)
                        compatible = "canaan,k210-plic", "sifive,plic-1.0.0";
                        reg = <0xC000000 0x4000000>;
                        interrupt-controller;
-                       interrupts-extended = <&cpu0_intc 11>, <&cpu1_intc 11>;
+                       interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>,
+                                             <&cpu1_intc 11>, <&cpu1_intc 9>;
                        riscv,ndev = <65>;
                };
 
index 160e3a1..004372f 100644 (file)
@@ -119,7 +119,7 @@ extern phys_addr_t phys_ram_base;
        ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
 
 #define is_linear_mapping(x)   \
-       ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < kernel_map.virt_addr))
+       ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
 
 #define linear_mapping_pa_to_va(x)     ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
 #define kernel_mapping_pa_to_va(y)     ({                                              \
index 7e949f2..e3549e5 100644 (file)
@@ -13,6 +13,7 @@
 
 #ifndef CONFIG_MMU
 #define KERNEL_LINK_ADDR       PAGE_OFFSET
+#define KERN_VIRT_SIZE         (UL(-1))
 #else
 
 #define ADDRESS_SPACE_END      (UL(-1))
index 68a9e3d..4a48287 100644 (file)
 #include <linux/pgtable.h>
 #include <asm/sections.h>
 
+/*
+ * The auipc+jalr instruction pair can reach any PC-relative offset
+ * in the range [-2^31 - 2^11, 2^31 - 2^11)
+ */
+static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
+{
+#ifdef CONFIG_32BIT
+       return true;
+#else
+       return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11));
+#endif
+}
+
 static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
 {
        if (v != (u32)v) {
@@ -95,7 +108,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
        ptrdiff_t offset = (void *)v - (void *)location;
        s32 hi20;
 
-       if (offset != (s32)offset) {
+       if (!riscv_insn_valid_32bit_offset(offset)) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
                  me->name, (long long)v, location);
@@ -197,10 +210,9 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
        ptrdiff_t offset = (void *)v - (void *)location;
-       s32 fill_v = offset;
        u32 hi20, lo12;
 
-       if (offset != fill_v) {
+       if (!riscv_insn_valid_32bit_offset(offset)) {
                /* Only emit the plt entry if offset over 32-bit range */
                if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
                        offset = module_emit_plt_entry(me, v);
@@ -224,10 +236,9 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
                                   Elf_Addr v)
 {
        ptrdiff_t offset = (void *)v - (void *)location;
-       s32 fill_v = offset;
        u32 hi20, lo12;
 
-       if (offset != fill_v) {
+       if (!riscv_insn_valid_32bit_offset(offset)) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
                  me->name, (long long)v, location);
index 7ebaef1..ac7a252 100644 (file)
@@ -24,6 +24,9 @@ obj-$(CONFIG_KASAN)   += kasan_init.o
 ifdef CONFIG_KASAN
 KASAN_SANITIZE_kasan_init.o := n
 KASAN_SANITIZE_init.o := n
+ifdef CONFIG_DEBUG_VIRTUAL
+KASAN_SANITIZE_physaddr.o := n
+endif
 endif
 
 obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
index c272941..0d58803 100644 (file)
@@ -125,7 +125,6 @@ void __init mem_init(void)
        else
                swiotlb_force = SWIOTLB_NO_FORCE;
 #endif
-       high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
        memblock_free_all();
 
        print_vm_layout();
@@ -195,6 +194,7 @@ static void __init setup_bootmem(void)
 
        min_low_pfn = PFN_UP(phys_ram_base);
        max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
+       high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
 
        dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
        set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
index f61f7ca..cd1a145 100644 (file)
@@ -113,8 +113,11 @@ static void __init kasan_populate_pud(pgd_t *pgd,
                base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
        } else {
                base_pud = (pud_t *)pgd_page_vaddr(*pgd);
-               if (base_pud == lm_alias(kasan_early_shadow_pud))
+               if (base_pud == lm_alias(kasan_early_shadow_pud)) {
                        base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
+                       memcpy(base_pud, (void *)kasan_early_shadow_pud,
+                              sizeof(pud_t) * PTRS_PER_PUD);
+               }
        }
 
        pudp = base_pud + pud_index(vaddr);
@@ -202,8 +205,7 @@ asmlinkage void __init kasan_early_init(void)
 
        for (i = 0; i < PTRS_PER_PTE; ++i)
                set_pte(kasan_early_shadow_pte + i,
-                       mk_pte(virt_to_page(kasan_early_shadow_page),
-                              PAGE_KERNEL));
+                       pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
 
        for (i = 0; i < PTRS_PER_PMD; ++i)
                set_pmd(kasan_early_shadow_pmd + i,
index e7fd0c2..19cf25a 100644 (file)
@@ -8,12 +8,10 @@
 
 phys_addr_t __virt_to_phys(unsigned long x)
 {
-       phys_addr_t y = x - PAGE_OFFSET;
-
        /*
         * Boundary checking aginst the kernel linear mapping space.
         */
-       WARN(y >= KERN_VIRT_SIZE,
+       WARN(!is_linear_mapping(x) && !is_kernel_mapping(x),
             "virt_to_phys used for non-linear address: %pK (%pS)\n",
             (void *)x, (void *)x);
 
index 16dc57d..8511f0e 100644 (file)
@@ -69,8 +69,13 @@ static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
 {
        a->fixup = b->fixup + delta;
        b->fixup = tmp.fixup - delta;
-       a->handler = b->handler + delta;
-       b->handler = tmp.handler - delta;
+       a->handler = b->handler;
+       if (a->handler)
+               a->handler += delta;
+       b->handler = tmp.handler;
+       if (b->handler)
+               b->handler -= delta;
 }
+#define swap_ex_entry_fixup swap_ex_entry_fixup
 
 #endif
index 267f70f..6f80ec9 100644 (file)
@@ -47,15 +47,17 @@ struct ftrace_regs {
 
 static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
 {
-       return &fregs->regs;
+       struct pt_regs *regs = &fregs->regs;
+
+       if (test_pt_regs_flag(regs, PIF_FTRACE_FULL_REGS))
+               return regs;
+       return NULL;
 }
 
 static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
                                                           unsigned long ip)
 {
-       struct pt_regs *regs = arch_ftrace_get_regs(fregs);
-
-       regs->psw.addr = ip;
+       fregs->regs.psw.addr = ip;
 }
 
 /*
index 4ffa8e7..ddb70fb 100644 (file)
 #define PIF_EXECVE_PGSTE_RESTART       1       /* restart execve for PGSTE binaries */
 #define PIF_SYSCALL_RET_SET            2       /* return value was set via ptrace */
 #define PIF_GUEST_FAULT                        3       /* indicates program check in sie64a */
+#define PIF_FTRACE_FULL_REGS           4       /* all register contents valid (ftrace) */
 
 #define _PIF_SYSCALL                   BIT(PIF_SYSCALL)
 #define _PIF_EXECVE_PGSTE_RESTART      BIT(PIF_EXECVE_PGSTE_RESTART)
 #define _PIF_SYSCALL_RET_SET           BIT(PIF_SYSCALL_RET_SET)
 #define _PIF_GUEST_FAULT               BIT(PIF_GUEST_FAULT)
+#define _PIF_FTRACE_FULL_REGS          BIT(PIF_FTRACE_FULL_REGS)
 
 #ifndef __ASSEMBLY__
 
index 21d62d8..89c0870 100644 (file)
@@ -159,9 +159,38 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
        return 0;
 }
 
+static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
+{
+       struct ftrace_hotpatch_trampoline *trampoline;
+       struct ftrace_insn insn;
+       s64 disp;
+       u16 opc;
+
+       if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
+               return ERR_PTR(-EFAULT);
+       disp = (s64)insn.disp * 2;
+       trampoline = (void *)(rec->ip + disp);
+       if (get_kernel_nofault(opc, &trampoline->brasl_opc))
+               return ERR_PTR(-EFAULT);
+       if (opc != 0xc015)
+               return ERR_PTR(-EINVAL);
+       return trampoline;
+}
+
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
                       unsigned long addr)
 {
+       struct ftrace_hotpatch_trampoline *trampoline;
+       u64 old;
+
+       trampoline = ftrace_get_trampoline(rec);
+       if (IS_ERR(trampoline))
+               return PTR_ERR(trampoline);
+       if (get_kernel_nofault(old, &trampoline->interceptor))
+               return -EFAULT;
+       if (old != old_addr)
+               return -EINVAL;
+       s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
        return 0;
 }
 
@@ -188,6 +217,12 @@ static void brcl_enable(void *brcl)
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
+       struct ftrace_hotpatch_trampoline *trampoline;
+
+       trampoline = ftrace_get_trampoline(rec);
+       if (IS_ERR(trampoline))
+               return PTR_ERR(trampoline);
+       s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
        brcl_enable((void *)rec->ip);
        return 0;
 }
@@ -291,7 +326,7 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
 
        regs = ftrace_get_regs(fregs);
        p = get_kprobe((kprobe_opcode_t *)ip);
-       if (unlikely(!p) || kprobe_disabled(p))
+       if (!regs || unlikely(!p) || kprobe_disabled(p))
                goto out;
 
        if (kprobe_running()) {
index 39bcc0e..a24177d 100644 (file)
@@ -27,6 +27,7 @@ ENDPROC(ftrace_stub)
 #define STACK_PTREGS_GPRS      (STACK_PTREGS + __PT_GPRS)
 #define STACK_PTREGS_PSW       (STACK_PTREGS + __PT_PSW)
 #define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
+#define STACK_PTREGS_FLAGS     (STACK_PTREGS + __PT_FLAGS)
 #ifdef __PACK_STACK
 /* allocate just enough for r14, r15 and backchain */
 #define TRACED_FUNC_FRAME_SIZE 24
@@ -57,6 +58,14 @@ ENDPROC(ftrace_stub)
        .if \allregs == 1
        stg     %r14,(STACK_PTREGS_PSW)(%r15)
        stosm   (STACK_PTREGS_PSW)(%r15),0
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+       mvghi   STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
+#else
+       lghi    %r14,_PIF_FTRACE_FULL_REGS
+       stg     %r14,STACK_PTREGS_FLAGS(%r15)
+#endif
+       .else
+       xc      STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15)
        .endif
 
        lg      %r14,(__SF_GPRS+8*8)(%r1)       # restore original return address
index f2c25d1..05327be 100644 (file)
@@ -800,6 +800,8 @@ static void __init check_initrd(void)
 static void __init reserve_kernel(void)
 {
        memblock_reserve(0, STARTUP_NORMAL_OFFSET);
+       memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
+       memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
        memblock_reserve(__amode31_base, __eamode31 - __samode31);
        memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
        memblock_reserve(__pa(_stext), _end - _stext);
index 6db4e29..65d1479 100644 (file)
 /* FREE!                                ( 7*32+10) */
 #define X86_FEATURE_PTI                        ( 7*32+11) /* Kernel Page Table Isolation enabled */
 #define X86_FEATURE_RETPOLINE          ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_AMD      ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_LFENCE   ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
 #define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_CDP_L2             ( 7*32+15) /* Code and Data Prioritization L2 */
 #define X86_FEATURE_MSR_SPEC_CTRL      ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
index cc74dc5..acbaeaf 100644 (file)
@@ -84,7 +84,7 @@
 #ifdef CONFIG_RETPOLINE
        ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
                      __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
-                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD
+                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
 #else
        jmp     *%\reg
 #endif
@@ -94,7 +94,7 @@
 #ifdef CONFIG_RETPOLINE
        ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
                      __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
-                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD
+                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
 #else
        call    *%\reg
 #endif
@@ -146,7 +146,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
        "lfence;\n"                                             \
        ANNOTATE_RETPOLINE_SAFE                                 \
        "call *%[thunk_target]\n",                              \
-       X86_FEATURE_RETPOLINE_AMD)
+       X86_FEATURE_RETPOLINE_LFENCE)
 
 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
 
@@ -176,7 +176,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
        "lfence;\n"                                             \
        ANNOTATE_RETPOLINE_SAFE                                 \
        "call *%[thunk_target]\n",                              \
-       X86_FEATURE_RETPOLINE_AMD)
+       X86_FEATURE_RETPOLINE_LFENCE)
 
 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
 #endif
@@ -188,9 +188,11 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
 /* The Spectre V2 mitigation variants */
 enum spectre_v2_mitigation {
        SPECTRE_V2_NONE,
-       SPECTRE_V2_RETPOLINE_GENERIC,
-       SPECTRE_V2_RETPOLINE_AMD,
-       SPECTRE_V2_IBRS_ENHANCED,
+       SPECTRE_V2_RETPOLINE,
+       SPECTRE_V2_LFENCE,
+       SPECTRE_V2_EIBRS,
+       SPECTRE_V2_EIBRS_RETPOLINE,
+       SPECTRE_V2_EIBRS_LFENCE,
 };
 
 /* The indirect branch speculation control variants */
index 018b61f..b4e5766 100644 (file)
@@ -389,7 +389,7 @@ static int emit_indirect(int op, int reg, u8 *bytes)
  *
  *   CALL *%\reg
  *
- * It also tries to inline spectre_v2=retpoline,amd when size permits.
+ * It also tries to inline spectre_v2=retpoline,lfence when size permits.
  */
 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
 {
@@ -407,7 +407,7 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
        BUG_ON(reg == 4);
 
        if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
-           !cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD))
+           !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE))
                return -1;
 
        op = insn->opcode.bytes[0];
@@ -438,9 +438,9 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
        }
 
        /*
-        * For RETPOLINE_AMD: prepend the indirect CALL/JMP with an LFENCE.
+        * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
         */
-       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD)) {
+       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
                bytes[i++] = 0x0f;
                bytes[i++] = 0xae;
                bytes[i++] = 0xe8; /* LFENCE */
index 1c1f218..6296e1e 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/prctl.h>
 #include <linux/sched/smt.h>
 #include <linux/pgtable.h>
+#include <linux/bpf.h>
 
 #include <asm/spec-ctrl.h>
 #include <asm/cmdline.h>
@@ -650,6 +651,32 @@ static inline const char *spectre_v2_module_string(void)
 static inline const char *spectre_v2_module_string(void) { return ""; }
 #endif
 
+#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
+#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
+#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
+
+#ifdef CONFIG_BPF_SYSCALL
+void unpriv_ebpf_notify(int new_state)
+{
+       if (new_state)
+               return;
+
+       /* Unprivileged eBPF is enabled */
+
+       switch (spectre_v2_enabled) {
+       case SPECTRE_V2_EIBRS:
+               pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
+               break;
+       case SPECTRE_V2_EIBRS_LFENCE:
+               if (sched_smt_active())
+                       pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
+               break;
+       default:
+               break;
+       }
+}
+#endif
+
 static inline bool match_option(const char *arg, int arglen, const char *opt)
 {
        int len = strlen(opt);
@@ -664,7 +691,10 @@ enum spectre_v2_mitigation_cmd {
        SPECTRE_V2_CMD_FORCE,
        SPECTRE_V2_CMD_RETPOLINE,
        SPECTRE_V2_CMD_RETPOLINE_GENERIC,
-       SPECTRE_V2_CMD_RETPOLINE_AMD,
+       SPECTRE_V2_CMD_RETPOLINE_LFENCE,
+       SPECTRE_V2_CMD_EIBRS,
+       SPECTRE_V2_CMD_EIBRS_RETPOLINE,
+       SPECTRE_V2_CMD_EIBRS_LFENCE,
 };
 
 enum spectre_v2_user_cmd {
@@ -737,6 +767,13 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
        return SPECTRE_V2_USER_CMD_AUTO;
 }
 
+static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
+{
+       return (mode == SPECTRE_V2_EIBRS ||
+               mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+               mode == SPECTRE_V2_EIBRS_LFENCE);
+}
+
 static void __init
 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
 {
@@ -804,7 +841,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
         */
        if (!boot_cpu_has(X86_FEATURE_STIBP) ||
            !smt_possible ||
-           spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+           spectre_v2_in_eibrs_mode(spectre_v2_enabled))
                return;
 
        /*
@@ -824,9 +861,11 @@ set_mode:
 
 static const char * const spectre_v2_strings[] = {
        [SPECTRE_V2_NONE]                       = "Vulnerable",
-       [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
-       [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
-       [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
+       [SPECTRE_V2_RETPOLINE]                  = "Mitigation: Retpolines",
+       [SPECTRE_V2_LFENCE]                     = "Mitigation: LFENCE",
+       [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced IBRS",
+       [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced IBRS + LFENCE",
+       [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced IBRS + Retpolines",
 };
 
 static const struct {
@@ -837,8 +876,12 @@ static const struct {
        { "off",                SPECTRE_V2_CMD_NONE,              false },
        { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
        { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
-       { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
+       { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
+       { "retpoline,lfence",   SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
        { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
+       { "eibrs",              SPECTRE_V2_CMD_EIBRS,             false },
+       { "eibrs,lfence",       SPECTRE_V2_CMD_EIBRS_LFENCE,      false },
+       { "eibrs,retpoline",    SPECTRE_V2_CMD_EIBRS_RETPOLINE,   false },
        { "auto",               SPECTRE_V2_CMD_AUTO,              false },
 };
 
@@ -875,10 +918,30 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
        }
 
        if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
-            cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
-            cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
+            cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
+            cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
            !IS_ENABLED(CONFIG_RETPOLINE)) {
-               pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
+               pr_err("%s selected but not compiled in. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+
+       if ((cmd == SPECTRE_V2_CMD_EIBRS ||
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
+           !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+               pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+
+       if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
+           !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+               pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
+                      mitigation_options[i].option);
                return SPECTRE_V2_CMD_AUTO;
        }
 
@@ -887,6 +950,16 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
        return cmd;
 }
 
+static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
+{
+       if (!IS_ENABLED(CONFIG_RETPOLINE)) {
+               pr_err("Kernel not compiled with retpoline; no mitigation available!");
+               return SPECTRE_V2_NONE;
+       }
+
+       return SPECTRE_V2_RETPOLINE;
+}
+
 static void __init spectre_v2_select_mitigation(void)
 {
        enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -907,49 +980,64 @@ static void __init spectre_v2_select_mitigation(void)
        case SPECTRE_V2_CMD_FORCE:
        case SPECTRE_V2_CMD_AUTO:
                if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
-                       mode = SPECTRE_V2_IBRS_ENHANCED;
-                       /* Force it so VMEXIT will restore correctly */
-                       x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
-                       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
-                       goto specv2_set_mode;
+                       mode = SPECTRE_V2_EIBRS;
+                       break;
                }
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_auto;
+
+               mode = spectre_v2_select_retpoline();
                break;
-       case SPECTRE_V2_CMD_RETPOLINE_AMD:
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_amd;
+
+       case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
+               pr_err(SPECTRE_V2_LFENCE_MSG);
+               mode = SPECTRE_V2_LFENCE;
                break;
+
        case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_generic;
+               mode = SPECTRE_V2_RETPOLINE;
                break;
+
        case SPECTRE_V2_CMD_RETPOLINE:
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_auto;
+               mode = spectre_v2_select_retpoline();
+               break;
+
+       case SPECTRE_V2_CMD_EIBRS:
+               mode = SPECTRE_V2_EIBRS;
+               break;
+
+       case SPECTRE_V2_CMD_EIBRS_LFENCE:
+               mode = SPECTRE_V2_EIBRS_LFENCE;
+               break;
+
+       case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
+               mode = SPECTRE_V2_EIBRS_RETPOLINE;
                break;
        }
-       pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
-       return;
 
-retpoline_auto:
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
-           boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
-       retpoline_amd:
-               if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
-                       pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
-                       goto retpoline_generic;
-               }
-               mode = SPECTRE_V2_RETPOLINE_AMD;
-               setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
-               setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
-       } else {
-       retpoline_generic:
-               mode = SPECTRE_V2_RETPOLINE_GENERIC;
+       if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+               pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
+
+       if (spectre_v2_in_eibrs_mode(mode)) {
+               /* Force it so VMEXIT will restore correctly */
+               x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+       }
+
+       switch (mode) {
+       case SPECTRE_V2_NONE:
+       case SPECTRE_V2_EIBRS:
+               break;
+
+       case SPECTRE_V2_LFENCE:
+       case SPECTRE_V2_EIBRS_LFENCE:
+               setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
+               fallthrough;
+
+       case SPECTRE_V2_RETPOLINE:
+       case SPECTRE_V2_EIBRS_RETPOLINE:
                setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+               break;
        }
 
-specv2_set_mode:
        spectre_v2_enabled = mode;
        pr_info("%s\n", spectre_v2_strings[mode]);
 
@@ -975,7 +1063,7 @@ specv2_set_mode:
         * the CPU supports Enhanced IBRS, kernel might un-intentionally not
         * enable IBRS around firmware calls.
         */
-       if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
+       if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
                pr_info("Enabling Restricted Speculation for firmware calls\n");
        }
@@ -1045,6 +1133,10 @@ void cpu_bugs_smt_update(void)
 {
        mutex_lock(&spec_ctrl_mutex);
 
+       if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+               pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
+
        switch (spectre_v2_user_stibp) {
        case SPECTRE_V2_USER_NONE:
                break;
@@ -1684,7 +1776,7 @@ static ssize_t tsx_async_abort_show_state(char *buf)
 
 static char *stibp_state(void)
 {
-       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+       if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
                return "";
 
        switch (spectre_v2_user_stibp) {
@@ -1714,6 +1806,27 @@ static char *ibpb_state(void)
        return "";
 }
 
+static ssize_t spectre_v2_show_state(char *buf)
+{
+       if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
+               return sprintf(buf, "Vulnerable: LFENCE\n");
+
+       if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+               return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
+
+       if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+               return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
+
+       return sprintf(buf, "%s%s%s%s%s%s\n",
+                      spectre_v2_strings[spectre_v2_enabled],
+                      ibpb_state(),
+                      boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+                      stibp_state(),
+                      boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+                      spectre_v2_module_string());
+}
+
 static ssize_t srbds_show_state(char *buf)
 {
        return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
@@ -1739,12 +1852,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
 
        case X86_BUG_SPECTRE_V2:
-               return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
-                              ibpb_state(),
-                              boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
-                              stibp_state(),
-                              boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
-                              spectre_v2_module_string());
+               return spectre_v2_show_state(buf);
 
        case X86_BUG_SPEC_STORE_BYPASS:
                return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
index 48afe96..7c63a19 100644 (file)
 #include "encls.h"
 #include "sgx.h"
 
+/*
+ * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
+ * follow right after the EPC data in the backing storage. In addition to the
+ * visible enclave pages, there's one extra page slot for SECS, before PCMD
+ * structs.
+ */
+static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl,
+                                                           unsigned long page_index)
+{
+       pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs);
+
+       return epc_end_off + page_index * sizeof(struct sgx_pcmd);
+}
+
+/*
+ * Free a page from the backing storage in the given page index.
+ */
+static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index)
+{
+       struct inode *inode = file_inode(encl->backing);
+
+       shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1);
+}
+
 /*
  * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
  * Pages" in the SDM.
@@ -22,9 +46,11 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
 {
        unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
        struct sgx_encl *encl = encl_page->encl;
+       pgoff_t page_index, page_pcmd_off;
        struct sgx_pageinfo pginfo;
        struct sgx_backing b;
-       pgoff_t page_index;
+       bool pcmd_page_empty;
+       u8 *pcmd_page;
        int ret;
 
        if (secs_page)
@@ -32,14 +58,16 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
        else
                page_index = PFN_DOWN(encl->size);
 
+       page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
+
        ret = sgx_encl_get_backing(encl, page_index, &b);
        if (ret)
                return ret;
 
        pginfo.addr = encl_page->desc & PAGE_MASK;
        pginfo.contents = (unsigned long)kmap_atomic(b.contents);
-       pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) +
-                         b.pcmd_offset;
+       pcmd_page = kmap_atomic(b.pcmd);
+       pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset;
 
        if (secs_page)
                pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
@@ -55,11 +83,24 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
                ret = -EFAULT;
        }
 
-       kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset));
+       memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
+
+       /*
+        * The area for the PCMD in the page was zeroed above.  Check if the
+        * whole page is now empty meaning that all PCMD's have been zeroed:
+        */
+       pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
+
+       kunmap_atomic(pcmd_page);
        kunmap_atomic((void *)(unsigned long)pginfo.contents);
 
        sgx_encl_put_backing(&b, false);
 
+       sgx_encl_truncate_backing_page(encl, page_index);
+
+       if (pcmd_page_empty)
+               sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
+
        return ret;
 }
 
@@ -579,7 +620,7 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
 int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
                         struct sgx_backing *backing)
 {
-       pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5);
+       pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
        struct page *contents;
        struct page *pcmd;
 
@@ -587,7 +628,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
        if (IS_ERR(contents))
                return PTR_ERR(contents);
 
-       pcmd = sgx_encl_get_backing_page(encl, pcmd_index);
+       pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off));
        if (IS_ERR(pcmd)) {
                put_page(contents);
                return PTR_ERR(pcmd);
@@ -596,9 +637,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
        backing->page_index = page_index;
        backing->contents = contents;
        backing->pcmd = pcmd;
-       backing->pcmd_offset =
-               (page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) *
-               sizeof(struct sgx_pcmd);
+       backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
 
        return 0;
 }
index bc0657f..f267205 100644 (file)
@@ -995,8 +995,10 @@ early_param("memmap", parse_memmap_opt);
  */
 void __init e820__reserve_setup_data(void)
 {
+       struct setup_indirect *indirect;
        struct setup_data *data;
-       u64 pa_data;
+       u64 pa_data, pa_next;
+       u32 len;
 
        pa_data = boot_params.hdr.setup_data;
        if (!pa_data)
@@ -1004,6 +1006,14 @@ void __init e820__reserve_setup_data(void)
 
        while (pa_data) {
                data = early_memremap(pa_data, sizeof(*data));
+               if (!data) {
+                       pr_warn("e820: failed to memremap setup_data entry\n");
+                       return;
+               }
+
+               len = sizeof(*data);
+               pa_next = data->next;
+
                e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
 
                /*
@@ -1015,18 +1025,27 @@ void __init e820__reserve_setup_data(void)
                                                 sizeof(*data) + data->len,
                                                 E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
 
-               if (data->type == SETUP_INDIRECT &&
-                   ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
-                       e820__range_update(((struct setup_indirect *)data->data)->addr,
-                                          ((struct setup_indirect *)data->data)->len,
-                                          E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
-                       e820__range_update_kexec(((struct setup_indirect *)data->data)->addr,
-                                                ((struct setup_indirect *)data->data)->len,
-                                                E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+               if (data->type == SETUP_INDIRECT) {
+                       len += data->len;
+                       early_memunmap(data, sizeof(*data));
+                       data = early_memremap(pa_data, len);
+                       if (!data) {
+                               pr_warn("e820: failed to memremap indirect setup_data\n");
+                               return;
+                       }
+
+                       indirect = (struct setup_indirect *)data->data;
+
+                       if (indirect->type != SETUP_INDIRECT) {
+                               e820__range_update(indirect->addr, indirect->len,
+                                                  E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+                               e820__range_update_kexec(indirect->addr, indirect->len,
+                                                        E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+                       }
                }
 
-               pa_data = data->next;
-               early_memunmap(data, sizeof(*data));
+               pa_data = pa_next;
+               early_memunmap(data, len);
        }
 
        e820__update_table(e820_table);
index 64b6da9..e2e89be 100644 (file)
@@ -88,11 +88,13 @@ create_setup_data_node(struct dentry *parent, int no,
 
 static int __init create_setup_data_nodes(struct dentry *parent)
 {
+       struct setup_indirect *indirect;
        struct setup_data_node *node;
        struct setup_data *data;
-       int error;
+       u64 pa_data, pa_next;
        struct dentry *d;
-       u64 pa_data;
+       int error;
+       u32 len;
        int no = 0;
 
        d = debugfs_create_dir("setup_data", parent);
@@ -112,12 +114,29 @@ static int __init create_setup_data_nodes(struct dentry *parent)
                        error = -ENOMEM;
                        goto err_dir;
                }
-
-               if (data->type == SETUP_INDIRECT &&
-                   ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
-                       node->paddr = ((struct setup_indirect *)data->data)->addr;
-                       node->type  = ((struct setup_indirect *)data->data)->type;
-                       node->len   = ((struct setup_indirect *)data->data)->len;
+               pa_next = data->next;
+
+               if (data->type == SETUP_INDIRECT) {
+                       len = sizeof(*data) + data->len;
+                       memunmap(data);
+                       data = memremap(pa_data, len, MEMREMAP_WB);
+                       if (!data) {
+                               kfree(node);
+                               error = -ENOMEM;
+                               goto err_dir;
+                       }
+
+                       indirect = (struct setup_indirect *)data->data;
+
+                       if (indirect->type != SETUP_INDIRECT) {
+                               node->paddr = indirect->addr;
+                               node->type  = indirect->type;
+                               node->len   = indirect->len;
+                       } else {
+                               node->paddr = pa_data;
+                               node->type  = data->type;
+                               node->len   = data->len;
+                       }
                } else {
                        node->paddr = pa_data;
                        node->type  = data->type;
@@ -125,7 +144,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
                }
 
                create_setup_data_node(d, no, node);
-               pa_data = data->next;
+               pa_data = pa_next;
 
                memunmap(data);
                no++;
index d0a1912..257892f 100644 (file)
@@ -91,26 +91,41 @@ static int get_setup_data_paddr(int nr, u64 *paddr)
 
 static int __init get_setup_data_size(int nr, size_t *size)
 {
-       int i = 0;
+       u64 pa_data = boot_params.hdr.setup_data, pa_next;
+       struct setup_indirect *indirect;
        struct setup_data *data;
-       u64 pa_data = boot_params.hdr.setup_data;
+       int i = 0;
+       u32 len;
 
        while (pa_data) {
                data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
                if (!data)
                        return -ENOMEM;
+               pa_next = data->next;
+
                if (nr == i) {
-                       if (data->type == SETUP_INDIRECT &&
-                           ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
-                               *size = ((struct setup_indirect *)data->data)->len;
-                       else
+                       if (data->type == SETUP_INDIRECT) {
+                               len = sizeof(*data) + data->len;
+                               memunmap(data);
+                               data = memremap(pa_data, len, MEMREMAP_WB);
+                               if (!data)
+                                       return -ENOMEM;
+
+                               indirect = (struct setup_indirect *)data->data;
+
+                               if (indirect->type != SETUP_INDIRECT)
+                                       *size = indirect->len;
+                               else
+                                       *size = data->len;
+                       } else {
                                *size = data->len;
+                       }
 
                        memunmap(data);
                        return 0;
                }
 
-               pa_data = data->next;
+               pa_data = pa_next;
                memunmap(data);
                i++;
        }
@@ -120,9 +135,11 @@ static int __init get_setup_data_size(int nr, size_t *size)
 static ssize_t type_show(struct kobject *kobj,
                         struct kobj_attribute *attr, char *buf)
 {
+       struct setup_indirect *indirect;
+       struct setup_data *data;
        int nr, ret;
        u64 paddr;
-       struct setup_data *data;
+       u32 len;
 
        ret = kobj_to_setup_data_nr(kobj, &nr);
        if (ret)
@@ -135,10 +152,20 @@ static ssize_t type_show(struct kobject *kobj,
        if (!data)
                return -ENOMEM;
 
-       if (data->type == SETUP_INDIRECT)
-               ret = sprintf(buf, "0x%x\n", ((struct setup_indirect *)data->data)->type);
-       else
+       if (data->type == SETUP_INDIRECT) {
+               len = sizeof(*data) + data->len;
+               memunmap(data);
+               data = memremap(paddr, len, MEMREMAP_WB);
+               if (!data)
+                       return -ENOMEM;
+
+               indirect = (struct setup_indirect *)data->data;
+
+               ret = sprintf(buf, "0x%x\n", indirect->type);
+       } else {
                ret = sprintf(buf, "0x%x\n", data->type);
+       }
+
        memunmap(data);
        return ret;
 }
@@ -149,9 +176,10 @@ static ssize_t setup_data_data_read(struct file *fp,
                                    char *buf,
                                    loff_t off, size_t count)
 {
+       struct setup_indirect *indirect;
+       struct setup_data *data;
        int nr, ret = 0;
        u64 paddr, len;
-       struct setup_data *data;
        void *p;
 
        ret = kobj_to_setup_data_nr(kobj, &nr);
@@ -165,10 +193,27 @@ static ssize_t setup_data_data_read(struct file *fp,
        if (!data)
                return -ENOMEM;
 
-       if (data->type == SETUP_INDIRECT &&
-           ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
-               paddr = ((struct setup_indirect *)data->data)->addr;
-               len = ((struct setup_indirect *)data->data)->len;
+       if (data->type == SETUP_INDIRECT) {
+               len = sizeof(*data) + data->len;
+               memunmap(data);
+               data = memremap(paddr, len, MEMREMAP_WB);
+               if (!data)
+                       return -ENOMEM;
+
+               indirect = (struct setup_indirect *)data->data;
+
+               if (indirect->type != SETUP_INDIRECT) {
+                       paddr = indirect->addr;
+                       len = indirect->len;
+               } else {
+                       /*
+                        * Even though this is technically undefined, return
+                        * the data as though it is a normal setup_data struct.
+                        * This will at least allow it to be inspected.
+                        */
+                       paddr += sizeof(*data);
+                       len = data->len;
+               }
        } else {
                paddr += sizeof(*data);
                len = data->len;
index f734e3b..d77481e 100644 (file)
@@ -463,6 +463,7 @@ static bool pv_tlb_flush_supported(void)
        return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
                !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
                kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
+               !boot_cpu_has(X86_FEATURE_MWAIT) &&
                (num_possible_cpus() != 1));
 }
 
@@ -477,6 +478,7 @@ static bool pv_sched_yield_supported(void)
        return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
                !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
            kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
+           !boot_cpu_has(X86_FEATURE_MWAIT) &&
            (num_possible_cpus() != 1));
 }
 
@@ -622,7 +624,7 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
 
        /* Make sure other vCPUs get a chance to run if they need to. */
        for_each_cpu(cpu, mask) {
-               if (vcpu_is_preempted(cpu)) {
+               if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {
                        kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
                        break;
                }
index a35cbf9..c5caa73 100644 (file)
@@ -239,6 +239,9 @@ static void __init kvmclock_init_mem(void)
 
 static int __init kvm_setup_vsyscall_timeinfo(void)
 {
+       if (!kvm_para_available() || !kvmclock)
+               return 0;
+
        kvmclock_init_mem();
 
 #ifdef CONFIG_X86_64
index 95fa745..96d7c27 100644 (file)
@@ -273,6 +273,14 @@ int module_finalize(const Elf_Ehdr *hdr,
                        retpolines = s;
        }
 
+       /*
+        * See alternative_instructions() for the ordering rules between the
+        * various patching types.
+        */
+       if (para) {
+               void *pseg = (void *)para->sh_addr;
+               apply_paravirt(pseg, pseg + para->sh_size);
+       }
        if (retpolines) {
                void *rseg = (void *)retpolines->sh_addr;
                apply_retpolines(rseg, rseg + retpolines->sh_size);
@@ -290,11 +298,6 @@ int module_finalize(const Elf_Ehdr *hdr,
                                            tseg, tseg + text->sh_size);
        }
 
-       if (para) {
-               void *pseg = (void *)para->sh_addr;
-               apply_paravirt(pseg, pseg + para->sh_size);
-       }
-
        /* make jump label nops */
        jump_label_apply_nops(me);
 
index f7a132e..90d7e17 100644 (file)
@@ -369,21 +369,41 @@ static void __init parse_setup_data(void)
 
 static void __init memblock_x86_reserve_range_setup_data(void)
 {
+       struct setup_indirect *indirect;
        struct setup_data *data;
-       u64 pa_data;
+       u64 pa_data, pa_next;
+       u32 len;
 
        pa_data = boot_params.hdr.setup_data;
        while (pa_data) {
                data = early_memremap(pa_data, sizeof(*data));
+               if (!data) {
+                       pr_warn("setup: failed to memremap setup_data entry\n");
+                       return;
+               }
+
+               len = sizeof(*data);
+               pa_next = data->next;
+
                memblock_reserve(pa_data, sizeof(*data) + data->len);
 
-               if (data->type == SETUP_INDIRECT &&
-                   ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
-                       memblock_reserve(((struct setup_indirect *)data->data)->addr,
-                                        ((struct setup_indirect *)data->data)->len);
+               if (data->type == SETUP_INDIRECT) {
+                       len += data->len;
+                       early_memunmap(data, sizeof(*data));
+                       data = early_memremap(pa_data, len);
+                       if (!data) {
+                               pr_warn("setup: failed to memremap indirect setup_data\n");
+                               return;
+                       }
 
-               pa_data = data->next;
-               early_memunmap(data, sizeof(*data));
+                       indirect = (struct setup_indirect *)data->data;
+
+                       if (indirect->type != SETUP_INDIRECT)
+                               memblock_reserve(indirect->addr, indirect->len);
+               }
+
+               pa_data = pa_next;
+               early_memunmap(data, len);
        }
 }
 
index c9d566d..8143693 100644 (file)
@@ -659,6 +659,7 @@ static bool do_int3(struct pt_regs *regs)
 
        return res == NOTIFY_STOP;
 }
+NOKPROBE_SYMBOL(do_int3);
 
 static void do_int3_user(struct pt_regs *regs)
 {
index 8e24f73..5628d0b 100644 (file)
@@ -3565,7 +3565,7 @@ set_root_pgd:
 out_unlock:
        write_unlock(&vcpu->kvm->mmu_lock);
 
-       return 0;
+       return r;
 }
 
 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
index 82a9dcd..eb40296 100644 (file)
@@ -9180,6 +9180,7 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
                likely(!pic_in_kernel(vcpu->kvm));
 }
 
+/* Called within kvm->srcu read side.  */
 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *kvm_run = vcpu->run;
@@ -9188,16 +9189,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
        kvm_run->cr8 = kvm_get_cr8(vcpu);
        kvm_run->apic_base = kvm_get_apic_base(vcpu);
 
-       /*
-        * The call to kvm_ready_for_interrupt_injection() may end up in
-        * kvm_xen_has_interrupt() which may require the srcu lock to be
-        * held, to protect against changes in the vcpu_info address.
-        */
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
        kvm_run->ready_for_interrupt_injection =
                pic_in_kernel(vcpu->kvm) ||
                kvm_vcpu_ready_for_interrupt_injection(vcpu);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
        if (is_smm(vcpu))
                kvm_run->flags |= KVM_RUN_X86_SMM;
@@ -9815,6 +9809,7 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
 
 /*
+ * Called within kvm->srcu read side.
  * Returns 1 to let vcpu_run() continue the guest execution loop without
  * exiting to the userspace.  Otherwise, the value will be returned to the
  * userspace.
@@ -10193,6 +10188,7 @@ out:
        return r;
 }
 
+/* Called within kvm->srcu read side.  */
 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
 {
        bool hv_timer;
@@ -10252,12 +10248,12 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
                !vcpu->arch.apf.halted);
 }
 
+/* Called within kvm->srcu read side.  */
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
        struct kvm *kvm = vcpu->kvm;
 
-       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
        vcpu->arch.l1tf_flush_l1d = true;
 
        for (;;) {
@@ -10285,14 +10281,12 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                if (__xfer_to_guest_mode_work_pending()) {
                        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                        r = xfer_to_guest_mode_handle_work(vcpu);
+                       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
                        if (r)
                                return r;
-                       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
                }
        }
 
-       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
-
        return r;
 }
 
@@ -10398,6 +10392,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *kvm_run = vcpu->run;
+       struct kvm *kvm = vcpu->kvm;
        int r;
 
        vcpu_load(vcpu);
@@ -10405,6 +10400,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        kvm_run->flags = 0;
        kvm_load_guest_fpu(vcpu);
 
+       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
                        r = -EINTR;
@@ -10415,7 +10411,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 * use before KVM has ever run the vCPU.
                 */
                WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
+
+               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                kvm_vcpu_block(vcpu);
+               vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+
                if (kvm_apic_accept_events(vcpu) < 0) {
                        r = 0;
                        goto out;
@@ -10475,8 +10475,9 @@ out:
        if (kvm_run->kvm_valid_regs)
                store_regs(vcpu);
        post_kvm_run_save(vcpu);
-       kvm_sigset_deactivate(vcpu);
+       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
 
+       kvm_sigset_deactivate(vcpu);
        vcpu_put(vcpu);
        return r;
 }
index 89b3fb2..afbdda5 100644 (file)
@@ -34,7 +34,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
 
        ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
                      __stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
-                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_AMD
+                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE
 
 .endm
 
index 026031b..17a492c 100644 (file)
@@ -615,6 +615,7 @@ static bool memremap_is_efi_data(resource_size_t phys_addr,
 static bool memremap_is_setup_data(resource_size_t phys_addr,
                                   unsigned long size)
 {
+       struct setup_indirect *indirect;
        struct setup_data *data;
        u64 paddr, paddr_next;
 
@@ -627,6 +628,10 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
 
                data = memremap(paddr, sizeof(*data),
                                MEMREMAP_WB | MEMREMAP_DEC);
+               if (!data) {
+                       pr_warn("failed to memremap setup_data entry\n");
+                       return false;
+               }
 
                paddr_next = data->next;
                len = data->len;
@@ -636,10 +641,21 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
                        return true;
                }
 
-               if (data->type == SETUP_INDIRECT &&
-                   ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
-                       paddr = ((struct setup_indirect *)data->data)->addr;
-                       len = ((struct setup_indirect *)data->data)->len;
+               if (data->type == SETUP_INDIRECT) {
+                       memunmap(data);
+                       data = memremap(paddr, sizeof(*data) + len,
+                                       MEMREMAP_WB | MEMREMAP_DEC);
+                       if (!data) {
+                               pr_warn("failed to memremap indirect setup_data\n");
+                               return false;
+                       }
+
+                       indirect = (struct setup_indirect *)data->data;
+
+                       if (indirect->type != SETUP_INDIRECT) {
+                               paddr = indirect->addr;
+                               len = indirect->len;
+                       }
                }
 
                memunmap(data);
@@ -660,22 +676,51 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
                                                unsigned long size)
 {
+       struct setup_indirect *indirect;
        struct setup_data *data;
        u64 paddr, paddr_next;
 
        paddr = boot_params.hdr.setup_data;
        while (paddr) {
-               unsigned int len;
+               unsigned int len, size;
 
                if (phys_addr == paddr)
                        return true;
 
                data = early_memremap_decrypted(paddr, sizeof(*data));
+               if (!data) {
+                       pr_warn("failed to early memremap setup_data entry\n");
+                       return false;
+               }
+
+               size = sizeof(*data);
 
                paddr_next = data->next;
                len = data->len;
 
-               early_memunmap(data, sizeof(*data));
+               if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
+                       early_memunmap(data, sizeof(*data));
+                       return true;
+               }
+
+               if (data->type == SETUP_INDIRECT) {
+                       size += len;
+                       early_memunmap(data, sizeof(*data));
+                       data = early_memremap_decrypted(paddr, size);
+                       if (!data) {
+                               pr_warn("failed to early memremap indirect setup_data\n");
+                               return false;
+                       }
+
+                       indirect = (struct setup_indirect *)data->data;
+
+                       if (indirect->type != SETUP_INDIRECT) {
+                               paddr = indirect->addr;
+                               len = indirect->len;
+                       }
+               }
+
+               early_memunmap(data, size);
 
                if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
                        return true;
index ec3f00b..6efbb87 100644 (file)
@@ -390,7 +390,7 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
        u8 *prog = *pprog;
 
 #ifdef CONFIG_RETPOLINE
-       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD)) {
+       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
                EMIT_LFENCE();
                EMIT2(0xFF, 0xE0 + reg);
        } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
index d69ca91..9a9185a 100644 (file)
@@ -2718,7 +2718,8 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
 
 static struct request *blk_mq_get_new_requests(struct request_queue *q,
                                               struct blk_plug *plug,
-                                              struct bio *bio)
+                                              struct bio *bio,
+                                              unsigned int nsegs)
 {
        struct blk_mq_alloc_data data = {
                .q              = q,
@@ -2730,6 +2731,11 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        if (unlikely(bio_queue_enter(bio)))
                return NULL;
 
+       if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+               goto queue_exit;
+
+       rq_qos_throttle(q, bio);
+
        if (plug) {
                data.nr_tags = plug->nr_ios;
                plug->nr_ios = 1;
@@ -2742,12 +2748,13 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        rq_qos_cleanup(q, bio);
        if (bio->bi_opf & REQ_NOWAIT)
                bio_wouldblock_error(bio);
+queue_exit:
        blk_queue_exit(q);
        return NULL;
 }
 
 static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
-               struct blk_plug *plug, struct bio *bio)
+               struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
 {
        struct request *rq;
 
@@ -2757,12 +2764,19 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
        if (!rq || rq->q != q)
                return NULL;
 
-       if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
+       if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
+               *bio = NULL;
+               return NULL;
+       }
+
+       rq_qos_throttle(q, *bio);
+
+       if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
                return NULL;
-       if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+       if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
                return NULL;
 
-       rq->cmd_flags = bio->bi_opf;
+       rq->cmd_flags = (*bio)->bi_opf;
        plug->cached_rq = rq_list_next(rq);
        INIT_LIST_HEAD(&rq->queuelist);
        return rq;
@@ -2800,14 +2814,11 @@ void blk_mq_submit_bio(struct bio *bio)
        if (!bio_integrity_prep(bio))
                return;
 
-       if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
-               return;
-
-       rq_qos_throttle(q, bio);
-
-       rq = blk_mq_get_cached_request(q, plug, bio);
+       rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
        if (!rq) {
-               rq = blk_mq_get_new_requests(q, plug, bio);
+               if (!bio)
+                       return;
+               rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
                if (unlikely(!rq))
                        return;
        }
index 1331756..8b2e5ef 100644 (file)
@@ -1377,11 +1377,11 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                if (info->valid & ACPI_VALID_HID) {
                        acpi_add_id(pnp, info->hardware_id.string);
                        pnp->type.platform_id = 1;
-                       if (info->valid & ACPI_VALID_CID) {
-                               cid_list = &info->compatible_id_list;
-                               for (i = 0; i < cid_list->count; i++)
-                                       acpi_add_id(pnp, cid_list->ids[i].string);
-                       }
+               }
+               if (info->valid & ACPI_VALID_CID) {
+                       cid_list = &info->compatible_id_list;
+                       for (i = 0; i < cid_list->count; i++)
+                               acpi_add_id(pnp, cid_list->ids[i].string);
                }
                if (info->valid & ACPI_VALID_ADR) {
                        pnp->bus_address = info->address;
index 422753d..a31ffe1 100644 (file)
@@ -1112,6 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
        skb_data3 = skb->data[3];
        paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
                               DMA_TO_DEVICE);
+       if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr))
+               return enq_next;
        ENI_PRV_PADDR(skb) = paddr;
        /* prepare DMA queue entries */
        j = 0;
index 62f5bfa..fd91a39 100644 (file)
@@ -303,7 +303,7 @@ u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
 EXPORT_SYMBOL_GPL(bcma_chipco_gpio_outen);
 
 /*
- * If the bit is set to 0, chipcommon controlls this GPIO,
+ * If the bit is set to 0, chipcommon controls this GPIO,
  * if the bit is set to 1, it is used by some part of the chip and not our code.
  */
 u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value)
index 3056f81..263ef6f 100644 (file)
@@ -206,7 +206,7 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
        usleep_range(2000, 2500);
 }
 
-/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
+/* Disable to allow reading SPROM. Don't know the advantages of enabling it. */
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
 {
        struct bcma_bus *bus = cc->core->bus;
@@ -234,7 +234,7 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
        switch (bus->chipinfo.id) {
        case BCMA_CHIP_ID_BCM4313:
                /*
-                * enable 12 mA drive strenth for 4313 and set chipControl
+                * enable 12 mA drive strength for 4313 and set chipControl
                 * register bit 1
                 */
                bcma_chipco_chipctl_maskset(cc, 0,
@@ -249,7 +249,7 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
        case BCMA_CHIP_ID_BCM43224:
        case BCMA_CHIP_ID_BCM43421:
                /*
-                * enable 12 mA drive strenth for 43224 and set chipControl
+                * enable 12 mA drive strength for 43224 and set chipControl
                 * register bit 15
                 */
                if (bus->chipinfo.rev == 0) {
index 8a1e470..1e74ec1 100644 (file)
@@ -181,7 +181,6 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        chip->set               = bcma_gpio_set_value;
        chip->direction_input   = bcma_gpio_direction_input;
        chip->direction_output  = bcma_gpio_direction_output;
-       chip->owner             = THIS_MODULE;
        chip->parent            = bus->dev;
 #if IS_BUILTIN(CONFIG_OF)
        chip->of_node           = cc->core->dev.of_node;
index 6f8fc5f..aa0581c 100644 (file)
@@ -61,7 +61,7 @@ static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
 {
        u32 addr = 0;
 
-       /* Issue config commands only when the data link is up (atleast
+       /* Issue config commands only when the data link is up (at least
         * one external pcie device is present).
         */
        if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
@@ -295,7 +295,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
        if (cap_ptr == 0x00)
                return cap_ptr;
 
-       /* loop thr'u the capability list and see if the requested capabilty
+       /* loop through the capability list and see if the requested capability
         * exists */
        bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
        while (cap_id != req_cap_id) {
@@ -317,7 +317,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
 
                *buflen = 0;
 
-               /* copy the cpability data excluding cap ID and next ptr */
+               /* copy the capability data excluding cap ID and next ptr */
                cap_data = cap_ptr + 2;
                if ((bufsize + cap_data)  > PCI_CONFIG_SPACE_SIZE)
                        bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
index 8e7ca3e..44392b6 100644 (file)
@@ -293,7 +293,7 @@ static int bcma_register_devices(struct bcma_bus *bus)
        int err;
 
        list_for_each_entry(core, &bus->cores, list) {
-               /* We support that cores ourself */
+               /* We support that core ourselves */
                switch (core->id.id) {
                case BCMA_CORE_4706_CHIPCOMMON:
                case BCMA_CORE_CHIPCOMMON:
@@ -369,7 +369,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
        if (bus->hosttype == BCMA_HOSTTYPE_SOC)
                platform_device_unregister(bus->drv_cc.watchdog);
 
-       /* Now noone uses internally-handled cores, we can free them */
+       /* Now no one uses internally-handled cores, we can free them */
        list_for_each_entry_safe(core, tmp, &bus->cores, list) {
                list_del(&core->list);
                put_device(&core->dev);
index bd2c923..3da01f1 100644 (file)
@@ -28,7 +28,7 @@ static int(*get_fallback_sprom)(struct bcma_bus *dev, struct ssb_sprom *out);
  * callback handler which fills the SPROM data structure. The fallback is
  * used for PCI based BCMA devices, where no valid SPROM can be found
  * in the shadow registers and to provide the SPROM for SoCs where BCMA is
- * to controll the system bus.
+ * to control the system bus.
  *
  * This function is useful for weird architectures that have a half-assed
  * BCMA device hardwired to their PCI bus.
@@ -281,7 +281,7 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
        SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
        SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
 
-       /* Extract cores power info info */
+       /* Extract core's power info */
        for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
                o = pwr_info_offset[i];
                SPEX(core_pwr_info[i].itssi_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
index c443cd6..8c415be 100644 (file)
@@ -76,9 +76,6 @@ struct virtio_blk {
         */
        refcount_t refs;
 
-       /* What host tells us, plus 2 for header & tailer. */
-       unsigned int sg_elems;
-
        /* Ida index - used to track minor number allocations. */
        int index;
 
@@ -322,8 +319,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_status_t status;
        int err;
 
-       BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
-
        status = virtblk_setup_cmd(vblk->vdev, req, vbr);
        if (unlikely(status))
                return status;
@@ -783,8 +778,6 @@ static int virtblk_probe(struct virtio_device *vdev)
        /* Prevent integer overflows and honor max vq size */
        sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
 
-       /* We need extra sg elements at head and tail. */
-       sg_elems += 2;
        vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
        if (!vblk) {
                err = -ENOMEM;
@@ -796,7 +789,6 @@ static int virtblk_probe(struct virtio_device *vdev)
        mutex_init(&vblk->vdev_mutex);
 
        vblk->vdev = vdev;
-       vblk->sg_elems = sg_elems;
 
        INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
 
@@ -853,7 +845,7 @@ static int virtblk_probe(struct virtio_device *vdev)
                set_disk_ro(vblk->disk, 1);
 
        /* We can handle whatever the host told us to handle. */
-       blk_queue_max_segments(q, vblk->sg_elems-2);
+       blk_queue_max_segments(q, sg_elems);
 
        /* No real sector limit. */
        blk_queue_max_hw_sectors(q, -1U);
@@ -925,9 +917,15 @@ static int virtblk_probe(struct virtio_device *vdev)
 
                virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
                             &v);
+
+               /*
+                * max_discard_seg == 0 is out of spec but we always
+                * handled it.
+                */
+               if (!v)
+                       v = sg_elems;
                blk_queue_max_discard_segments(q,
-                                              min_not_zero(v,
-                                                           MAX_DISCARD_SEGMENTS));
+                                              min(v, MAX_DISCARD_SEGMENTS));
 
                blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
        }
index ca71a05..03b5fb3 100644 (file)
@@ -1288,7 +1288,8 @@ free_shadow:
                        rinfo->ring_ref[i] = GRANT_INVALID_REF;
                }
        }
-       free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
+       free_pages_exact(rinfo->ring.sring,
+                        info->nr_ring_pages * XEN_PAGE_SIZE);
        rinfo->ring.sring = NULL;
 
        if (rinfo->irq)
@@ -1372,9 +1373,15 @@ static int blkif_get_final_status(enum blk_req_status s1,
        return BLKIF_RSP_OKAY;
 }
 
-static bool blkif_completion(unsigned long *id,
-                            struct blkfront_ring_info *rinfo,
-                            struct blkif_response *bret)
+/*
+ * Return values:
+ *  1 response processed.
+ *  0 missing further responses.
+ * -1 error while processing.
+ */
+static int blkif_completion(unsigned long *id,
+                           struct blkfront_ring_info *rinfo,
+                           struct blkif_response *bret)
 {
        int i = 0;
        struct scatterlist *sg;
@@ -1397,7 +1404,7 @@ static bool blkif_completion(unsigned long *id,
 
                /* Wait the second response if not yet here. */
                if (s2->status < REQ_DONE)
-                       return false;
+                       return 0;
 
                bret->status = blkif_get_final_status(s->status,
                                                      s2->status);
@@ -1448,42 +1455,43 @@ static bool blkif_completion(unsigned long *id,
        }
        /* Add the persistent grant into the list of free grants */
        for (i = 0; i < num_grant; i++) {
-               if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
+               if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
                        /*
                         * If the grant is still mapped by the backend (the
                         * backend has chosen to make this grant persistent)
                         * we add it at the head of the list, so it will be
                         * reused first.
                         */
-                       if (!info->feature_persistent)
-                               pr_alert_ratelimited("backed has not unmapped grant: %u\n",
-                                                    s->grants_used[i]->gref);
+                       if (!info->feature_persistent) {
+                               pr_alert("backed has not unmapped grant: %u\n",
+                                        s->grants_used[i]->gref);
+                               return -1;
+                       }
                        list_add(&s->grants_used[i]->node, &rinfo->grants);
                        rinfo->persistent_gnts_c++;
                } else {
                        /*
-                        * If the grant is not mapped by the backend we end the
-                        * foreign access and add it to the tail of the list,
-                        * so it will not be picked again unless we run out of
-                        * persistent grants.
+                        * If the grant is not mapped by the backend we add it
+                        * to the tail of the list, so it will not be picked
+                        * again unless we run out of persistent grants.
                         */
-                       gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
                        s->grants_used[i]->gref = GRANT_INVALID_REF;
                        list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
                }
        }
        if (s->req.operation == BLKIF_OP_INDIRECT) {
                for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
-                       if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
-                               if (!info->feature_persistent)
-                                       pr_alert_ratelimited("backed has not unmapped grant: %u\n",
-                                                            s->indirect_grants[i]->gref);
+                       if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
+                               if (!info->feature_persistent) {
+                                       pr_alert("backed has not unmapped grant: %u\n",
+                                                s->indirect_grants[i]->gref);
+                                       return -1;
+                               }
                                list_add(&s->indirect_grants[i]->node, &rinfo->grants);
                                rinfo->persistent_gnts_c++;
                        } else {
                                struct page *indirect_page;
 
-                               gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
                                /*
                                 * Add the used indirect page back to the list of
                                 * available pages for indirect grefs.
@@ -1498,7 +1506,7 @@ static bool blkif_completion(unsigned long *id,
                }
        }
 
-       return true;
+       return 1;
 }
 
 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -1564,12 +1572,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                }
 
                if (bret.operation != BLKIF_OP_DISCARD) {
+                       int ret;
+
                        /*
                         * We may need to wait for an extra response if the
                         * I/O request is split in 2
                         */
-                       if (!blkif_completion(&id, rinfo, &bret))
+                       ret = blkif_completion(&id, rinfo, &bret);
+                       if (!ret)
                                continue;
+                       if (unlikely(ret < 0))
+                               goto err;
                }
 
                if (add_id_to_freelist(rinfo, id)) {
@@ -1676,8 +1689,7 @@ static int setup_blkring(struct xenbus_device *dev,
        for (i = 0; i < info->nr_ring_pages; i++)
                rinfo->ring_ref[i] = GRANT_INVALID_REF;
 
-       sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
-                                                      get_order(ring_size));
+       sring = alloc_pages_exact(ring_size, GFP_NOIO);
        if (!sring) {
                xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
                return -ENOMEM;
@@ -1687,7 +1699,7 @@ static int setup_blkring(struct xenbus_device *dev,
 
        err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
        if (err < 0) {
-               free_pages((unsigned long)sring, get_order(ring_size));
+               free_pages_exact(sring, ring_size);
                rinfo->ring.sring = NULL;
                goto fail;
        }
@@ -2532,11 +2544,10 @@ static void purge_persistent_grants(struct blkfront_info *info)
                list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
                                         node) {
                        if (gnt_list_entry->gref == GRANT_INVALID_REF ||
-                           gnttab_query_foreign_access(gnt_list_entry->gref))
+                           !gnttab_try_end_foreign_access(gnt_list_entry->gref))
                                continue;
 
                        list_del(&gnt_list_entry->node);
-                       gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
                        rinfo->persistent_gnts_c--;
                        gnt_list_entry->gref = GRANT_INVALID_REF;
                        list_add_tail(&gnt_list_entry->node, &rinfo->grants);
index 36380e6..e307074 100644 (file)
@@ -400,6 +400,7 @@ config BT_MTKSDIO
 config BT_MTKUART
        tristate "MediaTek HCI UART driver"
        depends on SERIAL_DEV_BUS
+       select BT_MTK
        help
          MediaTek Bluetooth HCI UART driver.
          This driver is required if you want to use MediaTek Bluetooth
index 759d782..88262d3 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/device.h>
 #include <linux/firmware.h>
 #include <linux/usb.h>
 #include <asm/unaligned.h>
index e667933..c738ad0 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <linux/module.h>
 
-#include <linux/atomic.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/slab.h>
index 526dfdf..809762d 100644 (file)
@@ -285,6 +285,7 @@ MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
 MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION);
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_MT7622);
 MODULE_FIRMWARE(FIRMWARE_MT7663);
 MODULE_FIRMWARE(FIRMWARE_MT7668);
 MODULE_FIRMWARE(FIRMWARE_MT7961);
index 013850f..2a88ea8 100644 (file)
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: ISC */
 /* Copyright (C) 2021 MediaTek Inc. */
 
+#define FIRMWARE_MT7622                "mediatek/mt7622pr2h.bin"
 #define FIRMWARE_MT7663                "mediatek/mt7663pr2h.bin"
 #define FIRMWARE_MT7668                "mediatek/mt7668pr2h.bin"
 #define FIRMWARE_MT7961                "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
index df3f9d0..f3dc588 100644 (file)
@@ -936,6 +936,62 @@ static int btmtksdio_mtk_reg_write(struct hci_dev *hdev, u32 reg, u32 val, u32 m
        return err;
 }
 
+static int btmtksdio_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
+{
+       /* uses 1 as data path id for all the usecases */
+       *data_path_id = 1;
+       return 0;
+}
+
+static int btmtksdio_get_codec_config_data(struct hci_dev *hdev,
+                                          __u8 link, struct bt_codec *codec,
+                                          __u8 *ven_len, __u8 **ven_data)
+{
+       int err = 0;
+
+       if (!ven_data || !ven_len)
+               return -EINVAL;
+
+       *ven_len = 0;
+       *ven_data = NULL;
+
+       if (link != ESCO_LINK) {
+               bt_dev_err(hdev, "Invalid link type(%u)", link);
+               return -EINVAL;
+       }
+
+       *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL);
+       if (!ven_data) {
+               err = -ENOMEM;
+               goto error;
+       }
+
+       /* supports only CVSD and mSBC offload codecs */
+       switch (codec->id) {
+       case 0x02:
+               **ven_data = 0x00;
+               break;
+       case 0x05:
+               **ven_data = 0x01;
+               break;
+       default:
+               err = -EINVAL;
+               bt_dev_err(hdev, "Invalid codec id(%u)", codec->id);
+               goto error;
+       }
+       /* codec and its capabilities are pre-defined to ids
+        * preset id = 0x00 represents CVSD codec with sampling rate 8K
+        * preset id = 0x01 represents mSBC codec with sampling rate 16K
+        */
+       *ven_len = sizeof(__u8);
+       return err;
+
+error:
+       kfree(*ven_data);
+       *ven_data = NULL;
+       return err;
+}
+
 static int btmtksdio_sco_setting(struct hci_dev *hdev)
 {
        const struct btmtk_sco sco_setting = {
@@ -968,7 +1024,14 @@ static int btmtksdio_sco_setting(struct hci_dev *hdev)
                return err;
 
        val |= 0x00000101;
-       return btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_1, val, ~0);
+       err =  btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_1, val, ~0);
+       if (err < 0)
+               return err;
+
+       hdev->get_data_path_id = btmtksdio_get_data_path_id;
+       hdev->get_codec_config_data = btmtksdio_get_codec_config_data;
+
+       return err;
 }
 
 static int btmtksdio_reset_setting(struct hci_dev *hdev)
@@ -1060,6 +1123,9 @@ static int btmtksdio_setup(struct hci_dev *hdev)
                        return err;
                }
 
+               /* Enable WBS with mSBC codec */
+               set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+
                /* Enable GPIO reset mechanism */
                if (bdev->reset) {
                        err = btmtksdio_reset_setting(hdev);
@@ -1070,6 +1136,9 @@ static int btmtksdio_setup(struct hci_dev *hdev)
                        }
                }
 
+               /* Valid LE States quirk for MediaTek 7921 */
+               set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+
                break;
        case 0x7663:
        case 0x7668:
@@ -1281,6 +1350,8 @@ static int btmtksdio_probe(struct sdio_func *func,
        hdev->manufacturer = 70;
        set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
 
+       sdio_set_drvdata(func, bdev);
+
        err = hci_register_dev(hdev);
        if (err < 0) {
                dev_err(&func->dev, "Can't register HCI device\n");
@@ -1288,8 +1359,6 @@ static int btmtksdio_probe(struct sdio_func *func,
                return err;
        }
 
-       sdio_set_drvdata(func, bdev);
-
        /* pm_runtime_enable would be done after the firmware is being
         * downloaded because the core layer probably already enables
         * runtime PM for this func such as the case host->caps &
index 9ba22b1..c98691c 100644 (file)
 #include <net/bluetooth/hci_core.h>
 
 #include "h4_recv.h"
+#include "btmtk.h"
 
 #define VERSION "0.2"
 
-#define FIRMWARE_MT7622                "mediatek/mt7622pr2h.bin"
-#define FIRMWARE_MT7663                "mediatek/mt7663pr2h.bin"
-#define FIRMWARE_MT7668                "mediatek/mt7668pr2h.bin"
-
 #define MTK_STP_TLR_SIZE       2
 
 #define BTMTKUART_TX_STATE_ACTIVE      1
 
 #define BTMTKUART_FLAG_STANDALONE_HW    BIT(0)
 
-enum {
-       MTK_WMT_PATCH_DWNLD = 0x1,
-       MTK_WMT_TEST = 0x2,
-       MTK_WMT_WAKEUP = 0x3,
-       MTK_WMT_HIF = 0x4,
-       MTK_WMT_FUNC_CTRL = 0x6,
-       MTK_WMT_RST = 0x7,
-       MTK_WMT_SEMAPHORE = 0x17,
-};
-
-enum {
-       BTMTK_WMT_INVALID,
-       BTMTK_WMT_PATCH_UNDONE,
-       BTMTK_WMT_PATCH_DONE,
-       BTMTK_WMT_ON_UNDONE,
-       BTMTK_WMT_ON_DONE,
-       BTMTK_WMT_ON_PROGRESS,
-};
-
 struct mtk_stp_hdr {
        u8      prefix;
        __be16  dlen;
@@ -74,44 +52,6 @@ struct btmtkuart_data {
        const char *fwname;
 };
 
-struct mtk_wmt_hdr {
-       u8      dir;
-       u8      op;
-       __le16  dlen;
-       u8      flag;
-} __packed;
-
-struct mtk_hci_wmt_cmd {
-       struct mtk_wmt_hdr hdr;
-       u8 data[256];
-} __packed;
-
-struct btmtk_hci_wmt_evt {
-       struct hci_event_hdr hhdr;
-       struct mtk_wmt_hdr whdr;
-} __packed;
-
-struct btmtk_hci_wmt_evt_funcc {
-       struct btmtk_hci_wmt_evt hwhdr;
-       __be16 status;
-} __packed;
-
-struct btmtk_tci_sleep {
-       u8 mode;
-       __le16 duration;
-       __le16 host_duration;
-       u8 host_wakeup_pin;
-       u8 time_compensation;
-} __packed;
-
-struct btmtk_hci_wmt_params {
-       u8 op;
-       u8 flag;
-       u16 dlen;
-       const void *data;
-       u32 *status;
-};
-
 struct btmtkuart_dev {
        struct hci_dev *hdev;
        struct serdev_device *serdev;
@@ -153,29 +93,36 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
        struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
        u32 hlen, status = BTMTK_WMT_INVALID;
        struct btmtk_hci_wmt_evt *wmt_evt;
-       struct mtk_hci_wmt_cmd wc;
-       struct mtk_wmt_hdr *hdr;
+       struct btmtk_hci_wmt_cmd *wc;
+       struct btmtk_wmt_hdr *hdr;
        int err;
 
+       /* Send the WMT command and wait until the WMT event returns */
        hlen = sizeof(*hdr) + wmt_params->dlen;
        if (hlen > 255) {
                err = -EINVAL;
                goto err_free_skb;
        }
 
-       hdr = (struct mtk_wmt_hdr *)&wc;
+       wc = kzalloc(hlen, GFP_KERNEL);
+       if (!wc) {
+               err = -ENOMEM;
+               goto err_free_skb;
+       }
+
+       hdr = &wc->hdr;
        hdr->dir = 1;
        hdr->op = wmt_params->op;
        hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
        hdr->flag = wmt_params->flag;
-       memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+       memcpy(wc->data, wmt_params->data, wmt_params->dlen);
 
        set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
 
-       err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+       err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
        if (err < 0) {
                clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
-               goto err_free_skb;
+               goto err_free_wc;
        }
 
        /* The vendor specific WMT commands are all answered by a vendor
@@ -192,14 +139,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
        if (err == -EINTR) {
                bt_dev_err(hdev, "Execution of wmt command interrupted");
                clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
-               goto err_free_skb;
+               goto err_free_wc;
        }
 
        if (err) {
                bt_dev_err(hdev, "Execution of wmt command timed out");
                clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
                err = -ETIMEDOUT;
-               goto err_free_skb;
+               goto err_free_wc;
        }
 
        /* Parse and handle the return WMT event */
@@ -208,17 +155,17 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
                bt_dev_err(hdev, "Wrong op received %d expected %d",
                           wmt_evt->whdr.op, hdr->op);
                err = -EIO;
-               goto err_free_skb;
+               goto err_free_wc;
        }
 
        switch (wmt_evt->whdr.op) {
-       case MTK_WMT_SEMAPHORE:
+       case BTMTK_WMT_SEMAPHORE:
                if (wmt_evt->whdr.flag == 2)
                        status = BTMTK_WMT_PATCH_UNDONE;
                else
                        status = BTMTK_WMT_PATCH_DONE;
                break;
-       case MTK_WMT_FUNC_CTRL:
+       case BTMTK_WMT_FUNC_CTRL:
                wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
                if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
                        status = BTMTK_WMT_ON_DONE;
@@ -232,6 +179,8 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
        if (wmt_params->status)
                *wmt_params->status = status;
 
+err_free_wc:
+       kfree(wc);
 err_free_skb:
        kfree_skb(bdev->evt_skb);
        bdev->evt_skb = NULL;
@@ -239,95 +188,12 @@ err_free_skb:
        return err;
 }
 
-static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
-{
-       struct btmtk_hci_wmt_params wmt_params;
-       const struct firmware *fw;
-       const u8 *fw_ptr;
-       size_t fw_size;
-       int err, dlen;
-       u8 flag;
-
-       err = request_firmware(&fw, fwname, &hdev->dev);
-       if (err < 0) {
-               bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
-               return err;
-       }
-
-       fw_ptr = fw->data;
-       fw_size = fw->size;
-
-       /* The size of patch header is 30 bytes, should be skip */
-       if (fw_size < 30) {
-               err = -EINVAL;
-               goto free_fw;
-       }
-
-       fw_size -= 30;
-       fw_ptr += 30;
-       flag = 1;
-
-       wmt_params.op = MTK_WMT_PATCH_DWNLD;
-       wmt_params.status = NULL;
-
-       while (fw_size > 0) {
-               dlen = min_t(int, 250, fw_size);
-
-               /* Tell device the position in sequence */
-               if (fw_size - dlen <= 0)
-                       flag = 3;
-               else if (fw_size < fw->size - 30)
-                       flag = 2;
-
-               wmt_params.flag = flag;
-               wmt_params.dlen = dlen;
-               wmt_params.data = fw_ptr;
-
-               err = mtk_hci_wmt_sync(hdev, &wmt_params);
-               if (err < 0) {
-                       bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
-                                  err);
-                       goto free_fw;
-               }
-
-               fw_size -= dlen;
-               fw_ptr += dlen;
-       }
-
-       wmt_params.op = MTK_WMT_RST;
-       wmt_params.flag = 4;
-       wmt_params.dlen = 0;
-       wmt_params.data = NULL;
-       wmt_params.status = NULL;
-
-       /* Activate funciton the firmware providing to */
-       err = mtk_hci_wmt_sync(hdev, &wmt_params);
-       if (err < 0) {
-               bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
-               goto free_fw;
-       }
-
-       /* Wait a few moments for firmware activation done */
-       usleep_range(10000, 12000);
-
-free_fw:
-       release_firmware(fw);
-       return err;
-}
-
 static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
        struct hci_event_hdr *hdr = (void *)skb->data;
        int err;
 
-       /* Fix up the vendor event id with 0xff for vendor specific instead
-        * of 0xe4 so that event send via monitoring socket can be parsed
-        * properly.
-        */
-       if (hdr->evt == 0xe4)
-               hdr->evt = HCI_EV_VENDOR;
-
        /* When someone waits for the WMT event, the skb is being cloned
         * and being processed the events from there then.
         */
@@ -343,7 +209,7 @@ static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
        if (err < 0)
                goto err_free_skb;
 
-       if (hdr->evt == HCI_EV_VENDOR) {
+       if (hdr->evt == HCI_EV_WMT) {
                if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
                                       &bdev->tx_state)) {
                        /* Barrier to sync with other CPUs */
@@ -645,7 +511,7 @@ static int btmtkuart_func_query(struct hci_dev *hdev)
        u8 param = 0;
 
        /* Query whether the function is enabled */
-       wmt_params.op = MTK_WMT_FUNC_CTRL;
+       wmt_params.op = BTMTK_WMT_FUNC_CTRL;
        wmt_params.flag = 4;
        wmt_params.dlen = sizeof(param);
        wmt_params.data = &param;
@@ -672,7 +538,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
         * ready to change a new baudrate.
         */
        baudrate = cpu_to_le32(bdev->desired_speed);
-       wmt_params.op = MTK_WMT_HIF;
+       wmt_params.op = BTMTK_WMT_HIF;
        wmt_params.flag = 1;
        wmt_params.dlen = 4;
        wmt_params.data = &baudrate;
@@ -706,7 +572,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
        usleep_range(20000, 22000);
 
        /* Test the new baudrate */
-       wmt_params.op = MTK_WMT_TEST;
+       wmt_params.op = BTMTK_WMT_TEST;
        wmt_params.flag = 7;
        wmt_params.dlen = 0;
        wmt_params.data = NULL;
@@ -741,7 +607,7 @@ static int btmtkuart_setup(struct hci_dev *hdev)
         * do any setups.
         */
        if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
-               wmt_params.op = MTK_WMT_WAKEUP;
+               wmt_params.op = BTMTK_WMT_WAKEUP;
                wmt_params.flag = 3;
                wmt_params.dlen = 0;
                wmt_params.data = NULL;
@@ -760,7 +626,7 @@ static int btmtkuart_setup(struct hci_dev *hdev)
                btmtkuart_change_baudrate(hdev);
 
        /* Query whether the firmware is already download */
-       wmt_params.op = MTK_WMT_SEMAPHORE;
+       wmt_params.op = BTMTK_WMT_SEMAPHORE;
        wmt_params.flag = 1;
        wmt_params.dlen = 0;
        wmt_params.data = NULL;
@@ -778,7 +644,7 @@ static int btmtkuart_setup(struct hci_dev *hdev)
        }
 
        /* Setup a firmware which the device definitely requires */
-       err = mtk_setup_firmware(hdev, bdev->data->fwname);
+       err = btmtk_setup_firmware(hdev, bdev->data->fwname, mtk_hci_wmt_sync);
        if (err < 0)
                return err;
 
@@ -801,7 +667,7 @@ ignore_setup_fw:
        }
 
        /* Enable Bluetooth protocol */
-       wmt_params.op = MTK_WMT_FUNC_CTRL;
+       wmt_params.op = BTMTK_WMT_FUNC_CTRL;
        wmt_params.flag = 0;
        wmt_params.dlen = sizeof(param);
        wmt_params.data = &param;
@@ -846,7 +712,7 @@ static int btmtkuart_shutdown(struct hci_dev *hdev)
        int err;
 
        /* Disable the device */
-       wmt_params.op = MTK_WMT_FUNC_CTRL;
+       wmt_params.op = BTMTK_WMT_FUNC_CTRL;
        wmt_params.flag = 0;
        wmt_params.dlen = sizeof(param);
        wmt_params.data = &param;
@@ -1007,6 +873,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
        hdev->setup    = btmtkuart_setup;
        hdev->shutdown = btmtkuart_shutdown;
        hdev->send     = btmtkuart_send_frame;
+       hdev->set_bdaddr = btmtk_set_bdaddr;
        SET_HCIDEV_DEV(hdev, &serdev->dev);
 
        hdev->manufacturer = 70;
@@ -1131,6 +998,3 @@ MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
 MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FIRMWARE_MT7622);
-MODULE_FIRMWARE(FIRMWARE_MT7663);
-MODULE_FIRMWARE(FIRMWARE_MT7668);
index c2030f7..481d488 100644 (file)
@@ -49,6 +49,7 @@ enum btrtl_chip_id {
        CHIP_ID_8822C = 13,
        CHIP_ID_8761B,
        CHIP_ID_8852A = 18,
+       CHIP_ID_8852B = 20,
 };
 
 struct id_table {
@@ -187,6 +188,14 @@ static const struct id_table ic_id_table[] = {
          .has_msft_ext = true,
          .fw_name  = "rtl_bt/rtl8852au_fw.bin",
          .cfg_name = "rtl_bt/rtl8852au_config" },
+
+       /* 8852B */
+       { IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_USB),
+         .config_needed = false,
+         .has_rom_version = true,
+         .has_msft_ext = true,
+         .fw_name  = "rtl_bt/rtl8852bu_fw.bin",
+         .cfg_name = "rtl_bt/rtl8852bu_config" },
        };
 
 static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
@@ -295,6 +304,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
                { RTL_ROM_LMP_8822B, 13 },      /* 8822C */
                { RTL_ROM_LMP_8761A, 14 },      /* 8761B */
                { RTL_ROM_LMP_8852A, 18 },      /* 8852A */
+               { RTL_ROM_LMP_8852A, 20 },      /* 8852B */
        };
 
        min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
@@ -757,6 +767,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
        switch (btrtl_dev->project_id) {
        case CHIP_ID_8822C:
        case CHIP_ID_8852A:
+       case CHIP_ID_8852B:
                set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
                set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
                hci_set_aosp_capable(hdev);
@@ -934,3 +945,5 @@ MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin");
 MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin");
 MODULE_FIRMWARE("rtl_bt/rtl8852au_fw.bin");
 MODULE_FIRMWARE("rtl_bt/rtl8852au_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
index 1bb00b7..50df417 100644 (file)
@@ -477,6 +477,7 @@ static const struct usb_device_id blacklist_table[] = {
        /* Additional Realtek 8723BE Bluetooth devices */
        { USB_DEVICE(0x0489, 0xe085), .driver_info = BTUSB_REALTEK },
        { USB_DEVICE(0x0489, 0xe08b), .driver_info = BTUSB_REALTEK },
+       { USB_DEVICE(0x04f2, 0xb49f), .driver_info = BTUSB_REALTEK },
        { USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
        { USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
        { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
@@ -2057,6 +2058,8 @@ static int btusb_setup_csr(struct hci_dev *hdev)
                 */
                set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
                set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
+               set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
+               set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
 
                /* Clear the reset quirk since this is not an actual
                 * early Bluetooth 1.1 device from CSR.
@@ -2067,7 +2070,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
                /*
                 * Special workaround for these BT 4.0 chip clones, and potentially more:
                 *
-                * - 0x0134: a Barrot 8041a02                 (HCI rev: 0x1012 sub: 0x0810)
+                * - 0x0134: a Barrot 8041a02                 (HCI rev: 0x0810 sub: 0x1012)
                 * - 0x7558: IC markings FR3191AHAL 749H15143 (HCI rev/sub-version: 0x0709)
                 *
                 * These controllers are really messed-up.
@@ -2096,7 +2099,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
                if (ret >= 0)
                        msleep(200);
                else
-                       bt_dev_err(hdev, "CSR: Failed to suspend the device for our Barrot 8041a02 receive-issue workaround");
+                       bt_dev_warn(hdev, "CSR: Couldn't suspend the device for our Barrot 8041a02 receive-issue workaround");
 
                pm_runtime_forbid(&data->udev->dev);
 
index bd090d9..785f445 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/clk.h>
 #include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
 #include <linux/tty.h>
 #include <linux/interrupt.h>
 #include <linux/dmi.h>
@@ -870,7 +871,23 @@ unlock:
 #endif
 
 /* Some firmware reports an IRQ which does not work (wrong pin in fw table?) */
+static struct gpiod_lookup_table asus_tf103c_irq_gpios = {
+       .dev_id = "serial0-0",
+       .table = {
+               GPIO_LOOKUP("INT33FC:02", 17, "host-wakeup-alt", GPIO_ACTIVE_HIGH),
+               { }
+       },
+};
+
 static const struct dmi_system_id bcm_broken_irq_dmi_table[] = {
+       {
+               .ident = "Asus TF103C",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+               },
+               .driver_data = &asus_tf103c_irq_gpios,
+       },
        {
                .ident = "Meegopad T08",
                .matches = {
@@ -1027,7 +1044,8 @@ static struct clk *bcm_get_txco(struct device *dev)
 
 static int bcm_get_resources(struct bcm_device *dev)
 {
-       const struct dmi_system_id *dmi_id;
+       const struct dmi_system_id *broken_irq_dmi_id;
+       const char *irq_con_id = "host-wakeup";
        int err;
 
        dev->name = dev_name(dev->dev);
@@ -1083,23 +1101,33 @@ static int bcm_get_resources(struct bcm_device *dev)
        if (err)
                return err;
 
+       broken_irq_dmi_id = dmi_first_match(bcm_broken_irq_dmi_table);
+       if (broken_irq_dmi_id && broken_irq_dmi_id->driver_data) {
+               gpiod_add_lookup_table(broken_irq_dmi_id->driver_data);
+               irq_con_id = "host-wakeup-alt";
+               dev->irq_active_low = false;
+               dev->irq = 0;
+       }
+
        /* IRQ can be declared in ACPI table as Interrupt or GpioInt */
        if (dev->irq <= 0) {
                struct gpio_desc *gpio;
 
-               gpio = devm_gpiod_get_optional(dev->dev, "host-wakeup",
-                                              GPIOD_IN);
+               gpio = devm_gpiod_get_optional(dev->dev, irq_con_id, GPIOD_IN);
                if (IS_ERR(gpio))
                        return PTR_ERR(gpio);
 
                dev->irq = gpiod_to_irq(gpio);
        }
 
-       dmi_id = dmi_first_match(bcm_broken_irq_dmi_table);
-       if (dmi_id) {
-               dev_info(dev->dev, "%s: Has a broken IRQ config, disabling IRQ support / runtime-pm\n",
-                        dmi_id->ident);
-               dev->irq = 0;
+       if (broken_irq_dmi_id) {
+               if (broken_irq_dmi_id->driver_data) {
+                       gpiod_remove_lookup_table(broken_irq_dmi_id->driver_data);
+               } else {
+                       dev_info(dev->dev, "%s: Has a broken IRQ config, disabling IRQ support / runtime-pm\n",
+                                broken_irq_dmi_id->ident);
+                       dev->irq = 0;
+               }
        }
 
        dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq);
index fdf504b..c5a0409 100644 (file)
@@ -629,9 +629,11 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
                break;
        }
 
-       pm_runtime_get_sync(&hu->serdev->dev);
-       pm_runtime_mark_last_busy(&hu->serdev->dev);
-       pm_runtime_put_autosuspend(&hu->serdev->dev);
+       if (hu->serdev) {
+               pm_runtime_get_sync(&hu->serdev->dev);
+               pm_runtime_mark_last_busy(&hu->serdev->dev);
+               pm_runtime_put_autosuspend(&hu->serdev->dev);
+       }
 
        return 0;
 }
index 2359889..e3c4305 100644 (file)
@@ -1957,6 +1957,13 @@ static void virtcons_remove(struct virtio_device *vdev)
        list_del(&portdev->list);
        spin_unlock_irq(&pdrvdata_lock);
 
+       /* Device is going away, exit any polling for buffers */
+       virtio_break_device(vdev);
+       if (use_multiport(portdev))
+               flush_work(&portdev->control_work);
+       else
+               flush_work(&portdev->config_work);
+
        /* Disable interrupts for vqs */
        virtio_reset_device(vdev);
        /* Finish up work that's lined up */
index ad4256d..d4d67fb 100644 (file)
@@ -231,6 +231,8 @@ config COMMON_CLK_GEMINI
 
 config COMMON_CLK_LAN966X
        bool "Generic Clock Controller driver for LAN966X SoC"
+       depends on HAS_IOMEM
+       depends on OF
        help
          This driver provides support for Generic Clock Controller(GCK) on
          LAN966X SoC. GCK generates and supplies clock to various peripherals
index 538e496..5d2ae29 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, 2022, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/clk-provider.h>
@@ -625,6 +625,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
 
 static struct gdsc mdss_gdsc = {
        .gdscr = 0x3000,
+       .en_rest_wait_val = 0x2,
+       .en_few_wait_val = 0x2,
+       .clk_dis_wait_val = 0xf,
        .pd = {
                .name = "mdss_gdsc",
        },
index 4ef4ae2..ad596d5 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/clk-provider.h>
@@ -787,6 +787,9 @@ static struct clk_branch disp_cc_sleep_clk = {
 
 static struct gdsc disp_cc_mdss_core_gdsc = {
        .gdscr = 0x1004,
+       .en_rest_wait_val = 0x2,
+       .en_few_wait_val = 0x2,
+       .clk_dis_wait_val = 0xf,
        .pd = {
                .name = "disp_cc_mdss_core_gdsc",
        },
index 566fdfa..db93796 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, 2022, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/clk-provider.h>
@@ -1126,6 +1126,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
 
 static struct gdsc mdss_gdsc = {
        .gdscr = 0x3000,
+       .en_rest_wait_val = 0x2,
+       .en_few_wait_val = 0x2,
+       .clk_dis_wait_val = 0xf,
        .pd = {
                .name = "mdss_gdsc",
        },
index 7e1dd8c..44520ef 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/bitops.h>
 #define CFG_GDSCR_OFFSET               0x4
 
 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
-#define EN_REST_WAIT_VAL       (0x2 << 20)
-#define EN_FEW_WAIT_VAL                (0x8 << 16)
-#define CLK_DIS_WAIT_VAL       (0x2 << 12)
+#define EN_REST_WAIT_VAL       0x2
+#define EN_FEW_WAIT_VAL                0x8
+#define CLK_DIS_WAIT_VAL       0x2
+
+/* Transition delay shifts */
+#define EN_REST_WAIT_SHIFT     20
+#define EN_FEW_WAIT_SHIFT      16
+#define CLK_DIS_WAIT_SHIFT     12
 
 #define RETAIN_MEM             BIT(14)
 #define RETAIN_PERIPH          BIT(13)
@@ -380,7 +385,18 @@ static int gdsc_init(struct gdsc *sc)
         */
        mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
               EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
-       val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
+
+       if (!sc->en_rest_wait_val)
+               sc->en_rest_wait_val = EN_REST_WAIT_VAL;
+       if (!sc->en_few_wait_val)
+               sc->en_few_wait_val = EN_FEW_WAIT_VAL;
+       if (!sc->clk_dis_wait_val)
+               sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL;
+
+       val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT |
+               sc->en_few_wait_val << EN_FEW_WAIT_SHIFT |
+               sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
+
        ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
        if (ret)
                return ret;
index d7cc4c2..ad313d7 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __QCOM_GDSC_H__
@@ -22,6 +22,9 @@ struct reset_controller_dev;
  * @cxcs: offsets of branch registers to toggle mem/periph bits in
  * @cxc_count: number of @cxcs
  * @pwrsts: Possible powerdomain power states
+ * @en_rest_wait_val: transition delay value for receiving enr ack signal
+ * @en_few_wait_val: transition delay value for receiving enf ack signal
+ * @clk_dis_wait_val: transition delay value for halting clock
  * @resets: ids of resets associated with this gdsc
  * @reset_count: number of @resets
  * @rcdev: reset controller
@@ -36,6 +39,9 @@ struct gdsc {
        unsigned int                    clamp_io_ctrl;
        unsigned int                    *cxcs;
        unsigned int                    cxc_count;
+       unsigned int                    en_rest_wait_val;
+       unsigned int                    en_few_wait_val;
+       unsigned int                    clk_dis_wait_val;
        const u8                        pwrsts;
 /* Powerdomain allowable state bitfields */
 #define PWRSTS_OFF             BIT(0)
index 99ba8d5..11f30fd 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/clk.h>
 #include <linux/crypto.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -43,16 +44,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
 {
        unsigned int currsize = 0;
        u32 val;
+       int ret;
 
        /* read random data from hardware */
        do {
-               val = readl_relaxed(rng->base + PRNG_STATUS);
-               if (!(val & PRNG_STATUS_DATA_AVAIL))
-                       break;
+               ret = readl_poll_timeout(rng->base + PRNG_STATUS, val,
+                                        val & PRNG_STATUS_DATA_AVAIL,
+                                        200, 10000);
+               if (ret)
+                       return ret;
 
                val = readl_relaxed(rng->base + PRNG_DATA_OUT);
                if (!val)
-                       break;
+                       return -EINVAL;
 
                if ((max - currsize) >= WORD_SZ) {
                        memcpy(data, &val, WORD_SZ);
@@ -61,11 +65,10 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
                } else {
                        /* copy only remaining bytes */
                        memcpy(data, &val, max - currsize);
-                       break;
                }
        } while (currsize < max);
 
-       return currsize;
+       return 0;
 }
 
 static int qcom_rng_generate(struct crypto_rng *tfm,
@@ -87,7 +90,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm,
        mutex_unlock(&rng->lock);
        clk_disable_unprepare(rng->clk);
 
-       return 0;
+       return ret;
 }
 
 static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
index 4c3201e..ea84108 100644 (file)
@@ -24,7 +24,7 @@ static bool dump_properties __initdata;
 static int __init dump_properties_enable(char *arg)
 {
        dump_properties = true;
-       return 0;
+       return 1;
 }
 
 __setup("dump_apple_properties", dump_properties_enable);
index 7de3f5b..5502e17 100644 (file)
@@ -212,7 +212,7 @@ static int __init efivar_ssdt_setup(char *str)
                memcpy(efivar_ssdt, str, strlen(str));
        else
                pr_warn("efivar_ssdt: name too long: %s\n", str);
-       return 0;
+       return 1;
 }
 __setup("efivar_ssdt=", efivar_ssdt_setup);
 
index 153fe79..8e5d879 100644 (file)
@@ -547,7 +547,7 @@ struct gpio_sim_bank {
         *
         * So we need to store the pointer to the parent struct here. We can
         * dereference it anywhere we need with no checks and no locking as
-        * it's guaranteed to survive the childred and protected by configfs
+        * it's guaranteed to survive the children and protected by configfs
         * locks.
         *
         * Same for other structures.
@@ -1322,7 +1322,7 @@ static void gpio_sim_hog_config_item_release(struct config_item *item)
        kfree(hog);
 }
 
-struct configfs_item_operations gpio_sim_hog_config_item_ops = {
+static struct configfs_item_operations gpio_sim_hog_config_item_ops = {
        .release        = gpio_sim_hog_config_item_release,
 };
 
index 8d298be..031fe10 100644 (file)
@@ -1075,6 +1075,7 @@ static const struct tegra_gpio_soc tegra241_main_soc = {
        .ports = tegra241_main_ports,
        .name = "tegra241-gpio",
        .instance = 0,
+       .num_irqs_per_bank = 8,
 };
 
 #define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins)     \
@@ -1095,6 +1096,7 @@ static const struct tegra_gpio_soc tegra241_aon_soc = {
        .ports = tegra241_aon_ports,
        .name = "tegra241-gpio-aon",
        .instance = 1,
+       .num_irqs_per_bank = 8,
 };
 
 static const struct of_device_id tegra186_gpio_of_match[] = {
index d885032..d918d2d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Digital I/O driver for Technologic Systems I2C FPGA Core
  *
- * Copyright (C) 2015 Technologic Systems
+ * Copyright (C) 2015, 2018 Technologic Systems
  * Copyright (C) 2016 Savoir-Faire Linux
  *
  * This program is free software; you can redistribute it and/or
@@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip,
 {
        struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
 
-       /*
-        * This will clear the output enable bit, the other bits are
-        * dontcare when this is cleared
+       /* Only clear the OE bit here, requires a RMW. Prevents potential issue
+        * with OE and data getting to the physical pin at different times.
         */
-       return regmap_write(priv->regmap, offset, 0);
+       return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0);
 }
 
 static int ts4900_gpio_direction_output(struct gpio_chip *chip,
                                        unsigned int offset, int value)
 {
        struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+       unsigned int reg;
        int ret;
 
+       /* If changing from an input to an output, we need to first set the
+        * proper data bit to what is requested and then set OE bit. This
+        * prevents a glitch that can occur on the IO line
+        */
+       regmap_read(priv->regmap, offset, &reg);
+       if (!(reg & TS4900_GPIO_OE)) {
+               if (value)
+                       reg = TS4900_GPIO_OUT;
+               else
+                       reg &= ~TS4900_GPIO_OUT;
+
+               regmap_write(priv->regmap, offset, reg);
+       }
+
        if (value)
                ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE |
                                                         TS4900_GPIO_OUT);
index c0f6a25..a5495ad 100644 (file)
@@ -307,7 +307,8 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
        if (IS_ERR(desc))
                return desc;
 
-       ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
+       /* ACPI uses hundredths of milliseconds units */
+       ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
        if (ret)
                dev_warn(chip->parent,
                         "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
@@ -1035,7 +1036,8 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind
                        if (ret < 0)
                                return ret;
 
-                       ret = gpio_set_debounce_timeout(desc, info.debounce);
+                       /* ACPI uses hundredths of milliseconds units */
+                       ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
                        if (ret)
                                return ret;
 
index a3d1427..6630d92 100644 (file)
@@ -2227,6 +2227,16 @@ static int gpio_set_bias(struct gpio_desc *desc)
        return gpio_set_config_with_argument_optional(desc, bias, arg);
 }
 
+/**
+ * gpio_set_debounce_timeout() - Set debounce timeout
+ * @desc:      GPIO descriptor to set the debounce timeout
+ * @debounce:  Debounce timeout in microseconds
+ *
+ * The function calls the certain GPIO driver to set debounce timeout
+ * in the hardware.
+ *
+ * Returns 0 on success, or negative error code otherwise.
+ */
 int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
 {
        return gpio_set_config_with_argument_optional(desc,
index d62190b..418341a 100644 (file)
@@ -777,7 +777,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
        amdgpu_vm_eviction_lock(vm);
        ret = !vm->evicting;
        amdgpu_vm_eviction_unlock(vm);
-       return ret;
+
+       return ret && list_empty(&vm->evicted);
 }
 
 /**
index 58a2428..6e3f1d6 100644 (file)
@@ -6,6 +6,7 @@ config DRM_HDLCD
        depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
        depends on COMMON_CLK
        select DRM_KMS_HELPER
+       select DRM_GEM_CMA_HELPER
        help
          Choose this option if you have an ARM High Definition Colour LCD
          controller.
index dab8f76..68d8415 100644 (file)
@@ -1802,6 +1802,7 @@ static inline void ti_sn_gpio_unregister(void) {}
 
 static void ti_sn65dsi86_runtime_disable(void *data)
 {
+       pm_runtime_dont_use_autosuspend(data);
        pm_runtime_disable(data);
 }
 
@@ -1861,11 +1862,11 @@ static int ti_sn65dsi86_probe(struct i2c_client *client,
                                     "failed to get reference clock\n");
 
        pm_runtime_enable(dev);
+       pm_runtime_set_autosuspend_delay(pdata->dev, 500);
+       pm_runtime_use_autosuspend(pdata->dev);
        ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev);
        if (ret)
                return ret;
-       pm_runtime_set_autosuspend_delay(pdata->dev, 500);
-       pm_runtime_use_autosuspend(pdata->dev);
 
        ti_sn65dsi86_debugfs_init(pdata);
 
index a50c82b..76a8c70 100644 (file)
@@ -2330,6 +2330,9 @@ EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal);
 void drm_connector_set_vrr_capable_property(
                struct drm_connector *connector, bool capable)
 {
+       if (!connector->vrr_capable_property)
+               return;
+
        drm_object_property_set_value(&connector->base,
                                      connector->vrr_capable_property,
                                      capable);
index 12571ac..c04264f 100644 (file)
@@ -678,7 +678,6 @@ static int decon_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct decon_context *ctx;
        struct device_node *i80_if_timings;
-       struct resource *res;
        int ret;
 
        if (!dev->of_node)
@@ -728,16 +727,11 @@ static int decon_probe(struct platform_device *pdev)
                goto err_iounmap;
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-                                          ctx->i80_if ? "lcd_sys" : "vsync");
-       if (!res) {
-               dev_err(dev, "irq request failed.\n");
-               ret = -ENXIO;
+       ret =  platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
+       if (ret < 0)
                goto err_iounmap;
-       }
 
-       ret = devm_request_irq(dev, res->start, decon_irq_handler,
-                                                       0, "drm_decon", ctx);
+       ret = devm_request_irq(dev, ret, decon_irq_handler, 0, "drm_decon", ctx);
        if (ret) {
                dev_err(dev, "irq request failed.\n");
                goto err_iounmap;
index 32a3657..d13f5e3 100644 (file)
@@ -1334,8 +1334,10 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
        int ret;
        int te_gpio_irq;
 
-       dsi->te_gpio = devm_gpiod_get_optional(dsi->dev, "te", GPIOD_IN);
-       if (IS_ERR(dsi->te_gpio)) {
+       dsi->te_gpio = gpiod_get_optional(panel, "te", GPIOD_IN);
+       if (!dsi->te_gpio) {
+               return 0;
+       } else if (IS_ERR(dsi->te_gpio)) {
                dev_err(dsi->dev, "gpio request failed with %ld\n",
                                PTR_ERR(dsi->te_gpio));
                return PTR_ERR(dsi->te_gpio);
index 023f54e..0ee32e4 100644 (file)
@@ -1267,7 +1267,6 @@ static int fimc_probe(struct platform_device *pdev)
        struct exynos_drm_ipp_formats *formats;
        struct device *dev = &pdev->dev;
        struct fimc_context *ctx;
-       struct resource *res;
        int ret;
        int i, j, num_limits, num_formats;
 
@@ -1330,14 +1329,12 @@ static int fimc_probe(struct platform_device *pdev)
                return PTR_ERR(ctx->regs);
 
        /* resource irq */
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res) {
-               dev_err(dev, "failed to request irq resource.\n");
-               return -ENOENT;
-       }
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0)
+               return ret;
 
-       ret = devm_request_irq(dev, res->start, fimc_irq_handler,
-               0, dev_name(dev), ctx);
+       ret = devm_request_irq(dev, ret, fimc_irq_handler,
+                              0, dev_name(dev), ctx);
        if (ret < 0) {
                dev_err(dev, "failed to request irq.\n");
                return ret;
index c735e53..7d5a483 100644 (file)
@@ -1133,7 +1133,6 @@ static int fimd_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct fimd_context *ctx;
        struct device_node *i80_if_timings;
-       struct resource *res;
        int ret;
 
        if (!dev->of_node)
@@ -1206,15 +1205,11 @@ static int fimd_probe(struct platform_device *pdev)
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-                                          ctx->i80_if ? "lcd_sys" : "vsync");
-       if (!res) {
-               dev_err(dev, "irq request failed.\n");
-               return -ENXIO;
-       }
+       ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
+       if (ret < 0)
+               return ret;
 
-       ret = devm_request_irq(dev, res->start, fimd_irq_handler,
-                                                       0, "drm_fimd", ctx);
+       ret = devm_request_irq(dev, ret, fimd_irq_handler, 0, "drm_fimd", ctx);
        if (ret) {
                dev_err(dev, "irq request failed.\n");
                return ret;
index 166a802..964dceb 100644 (file)
@@ -1220,7 +1220,6 @@ static int gsc_probe(struct platform_device *pdev)
        struct gsc_driverdata *driver_data;
        struct exynos_drm_ipp_formats *formats;
        struct gsc_context *ctx;
-       struct resource *res;
        int num_formats, ret, i, j;
 
        ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
@@ -1275,13 +1274,10 @@ static int gsc_probe(struct platform_device *pdev)
                return PTR_ERR(ctx->regs);
 
        /* resource irq */
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res) {
-               dev_err(dev, "failed to request irq resource.\n");
-               return -ENOENT;
-       }
+       ctx->irq = platform_get_irq(pdev, 0);
+       if (ctx->irq < 0)
+               return ctx->irq;
 
-       ctx->irq = res->start;
        ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0,
                               dev_name(dev), ctx);
        if (ret < 0) {
index 41c54f1..e5204be 100644 (file)
@@ -809,19 +809,17 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
                return -ENXIO;
        }
 
-       res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0);
-       if (res == NULL) {
-               dev_err(dev, "get interrupt resource failed.\n");
-               return -ENXIO;
-       }
+       ret = platform_get_irq(mixer_ctx->pdev, 0);
+       if (ret < 0)
+               return ret;
+       mixer_ctx->irq = ret;
 
-       ret = devm_request_irq(dev, res->start, mixer_irq_handler,
-                                               0, "drm_mixer", mixer_ctx);
+       ret = devm_request_irq(dev, mixer_ctx->irq, mixer_irq_handler,
+                              0, "drm_mixer", mixer_ctx);
        if (ret) {
                dev_err(dev, "request interrupt failed.\n");
                return ret;
        }
-       mixer_ctx->irq = res->start;
 
        return 0;
 }
index a1a663f..00279e8 100644 (file)
@@ -1406,6 +1406,13 @@ static inline u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private
               PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
 }
 
+static inline u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
+{
+       return IS_ALDERLAKE_P(dev_priv) ?
+              ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
+              PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
+}
+
 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1510,7 +1517,13 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 val = PSR2_MAN_TRK_CTL_ENABLE;
+       u32 val = 0;
+
+       if (!IS_ALDERLAKE_P(dev_priv))
+               val = PSR2_MAN_TRK_CTL_ENABLE;
+
+       /* SF partial frame enable has to be set even on full update */
+       val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
 
        if (full_update) {
                /*
@@ -1530,7 +1543,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
        } else {
                drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
 
-               val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
                val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
                val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
        }
index 13b27b8..ba21ace 100644 (file)
@@ -110,7 +110,7 @@ static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
 {
        u32 request[] = {
                GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
-               SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2),
+               SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
                id,
        };
 
index c2bb33f..902e4c8 100644 (file)
@@ -4829,6 +4829,7 @@ enum {
 #define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val)       REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
 #define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK         REG_GENMASK(12, 0)
 #define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val)         REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
+#define  ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE         REG_BIT(31)
 #define  ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME            REG_BIT(14)
 #define  ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME         REG_BIT(13)
 
index da8f82c..fc8a68f 100644 (file)
@@ -108,6 +108,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                /* Comet Lake V PCH is based on KBP, which is SPT compatible */
                return PCH_SPT;
        case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+       case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
                drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
                drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
                return PCH_ICP;
@@ -123,7 +124,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                            !IS_GEN9_BC(dev_priv));
                return PCH_TGP;
        case INTEL_PCH_JSP_DEVICE_ID_TYPE:
-       case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
                drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
                drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
                return PCH_JSP;
index 6bff775..4ba0f19 100644 (file)
@@ -50,11 +50,11 @@ enum intel_pch {
 #define INTEL_PCH_CMP2_DEVICE_ID_TYPE          0x0680
 #define INTEL_PCH_CMP_V_DEVICE_ID_TYPE         0xA380
 #define INTEL_PCH_ICP_DEVICE_ID_TYPE           0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE          0x3880
 #define INTEL_PCH_MCC_DEVICE_ID_TYPE           0x4B00
 #define INTEL_PCH_TGP_DEVICE_ID_TYPE           0xA080
 #define INTEL_PCH_TGP2_DEVICE_ID_TYPE          0x4380
 #define INTEL_PCH_JSP_DEVICE_ID_TYPE           0x4D80
-#define INTEL_PCH_JSP2_DEVICE_ID_TYPE          0x3880
 #define INTEL_PCH_ADP_DEVICE_ID_TYPE           0x7A80
 #define INTEL_PCH_ADP2_DEVICE_ID_TYPE          0x5180
 #define INTEL_PCH_ADP3_DEVICE_ID_TYPE          0x7A00
index 434c286..0aec5a1 100644 (file)
@@ -106,6 +106,7 @@ config DRM_PANEL_EDP
        depends on PM
        select VIDEOMODE_HELPERS
        select DRM_DP_AUX_BUS
+       select DRM_DP_HELPER
        help
          DRM panel driver for dumb eDP panels that need at most a regulator and
          a GPIO to be powered up. Optionally a backlight can be attached so
index 145833a..5b3fbee 100644 (file)
 /* format 13 is semi-planar YUV411 VUVU */
 #define SUN8I_MIXER_FBFMT_YUV411       14
 /* format 15 doesn't exist */
-/* format 16 is P010 YVU */
-#define SUN8I_MIXER_FBFMT_P010_YUV     17
-/* format 18 is P210 YVU */
-#define SUN8I_MIXER_FBFMT_P210_YUV     19
+#define SUN8I_MIXER_FBFMT_P010_YUV     16
+/* format 17 is P010 YVU */
+#define SUN8I_MIXER_FBFMT_P210_YUV     18
+/* format 19 is P210 YVU */
 /* format 20 is packed YVU444 10-bit */
 /* format 21 is packed YUV444 10-bit */
 
index 26c31d7..81e7e40 100644 (file)
@@ -860,7 +860,9 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_F22] = "F22",                      [KEY_F23] = "F23",
        [KEY_F24] = "F24",                      [KEY_PLAYCD] = "PlayCD",
        [KEY_PAUSECD] = "PauseCD",              [KEY_PROG3] = "Prog3",
-       [KEY_PROG4] = "Prog4",                  [KEY_SUSPEND] = "Suspend",
+       [KEY_PROG4] = "Prog4",
+       [KEY_ALL_APPLICATIONS] = "AllApplications",
+       [KEY_SUSPEND] = "Suspend",
        [KEY_CLOSE] = "Close",                  [KEY_PLAY] = "Play",
        [KEY_FASTFORWARD] = "FastForward",      [KEY_BASSBOOST] = "BassBoost",
        [KEY_PRINT] = "Print",                  [KEY_HP] = "HP",
@@ -969,6 +971,7 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_ASSISTANT] = "Assistant",
        [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
        [KEY_EMOJI_PICKER] = "EmojiPicker",
+       [KEY_DICTATE] = "Dictate",
        [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
        [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
        [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
index 9b42b0c..2876cb6 100644 (file)
@@ -228,7 +228,6 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        struct elo_priv *priv;
        int ret;
-       struct usb_device *udev;
 
        if (!hid_is_usb(hdev))
                return -EINVAL;
@@ -238,8 +237,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return -ENOMEM;
 
        INIT_DELAYED_WORK(&priv->work, elo_work);
-       udev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
-       priv->usbdev = usb_get_dev(udev);
+       priv->usbdev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
 
        hid_set_drvdata(hdev, priv);
 
@@ -262,7 +260,6 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        return 0;
 err_free:
-       usb_put_dev(udev);
        kfree(priv);
        return ret;
 }
@@ -271,8 +268,6 @@ static void elo_remove(struct hid_device *hdev)
 {
        struct elo_priv *priv = hid_get_drvdata(hdev);
 
-       usb_put_dev(priv->usbdev);
-
        hid_hw_stop(hdev);
        cancel_delayed_work_sync(&priv->work);
        kfree(priv);
index 112901d..56ec273 100644 (file)
@@ -992,6 +992,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x0cd: map_key_clear(KEY_PLAYPAUSE);       break;
                case 0x0cf: map_key_clear(KEY_VOICECOMMAND);    break;
 
+               case 0x0d8: map_key_clear(KEY_DICTATE);         break;
                case 0x0d9: map_key_clear(KEY_EMOJI_PICKER);    break;
 
                case 0x0e0: map_abs_clear(ABS_VOLUME);          break;
@@ -1083,6 +1084,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 
                case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break;
 
+               case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS);        break;
+
                case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV);             break;
                case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT);             break;
                case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP);                break;
index 7106b92..c358778 100644 (file)
@@ -1068,6 +1068,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
                workitem.reports_supported |= STD_KEYBOARD;
                break;
        case 0x0f:
+       case 0x11:
                device_type = "eQUAD Lightspeed 1.2";
                logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
                workitem.reports_supported |= STD_KEYBOARD;
index b6a9a0f..2204de8 100644 (file)
@@ -2128,6 +2128,10 @@ static int nintendo_hid_probe(struct hid_device *hdev,
        spin_lock_init(&ctlr->lock);
        ctlr->rumble_queue = alloc_workqueue("hid-nintendo-rumble_wq",
                                             WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+       if (!ctlr->rumble_queue) {
+               ret = -ENOMEM;
+               goto err;
+       }
        INIT_WORK(&ctlr->rumble_worker, joycon_rumble_worker);
 
        ret = hid_parse(hdev);
index 03b935f..c3e6d69 100644 (file)
@@ -64,7 +64,9 @@ struct tm_wheel_info {
  */
 static const struct tm_wheel_info tm_wheels_infos[] = {
        {0x0306, 0x0006, "Thrustmaster T150RS"},
+       {0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"},
        {0x0206, 0x0005, "Thrustmaster T300RS"},
+       {0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"},
        {0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"},
        {0x0002, 0x0002, "Thrustmaster T500RS"}
        //{0x0407, 0x0001, "Thrustmaster TMX"}
@@ -158,6 +160,12 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
                return;
        }
 
+       if (usbif->cur_altsetting->desc.bNumEndpoints < 2) {
+               kfree(send_buf);
+               hid_err(hdev, "Wrong number of endpoints?\n");
+               return;
+       }
+
        ep = &usbif->cur_altsetting->endpoint[1];
        b_ep = ep->desc.bEndpointAddress;
 
index efa6140..42ceb20 100644 (file)
@@ -144,7 +144,7 @@ out:
 static int vivaldi_input_configured(struct hid_device *hdev,
                                    struct hid_input *hidinput)
 {
-       return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group);
+       return devm_device_add_group(&hdev->dev, &input_attribute_group);
 }
 
 static const struct hid_device_id vivaldi_table[] = {
index 0b61df5..290ea8a 100644 (file)
@@ -433,8 +433,7 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
 
        dev->port[port_num].dbg_cc_params = dbg_cc_params;
 
-       dbg_cc_params->root = debugfs_create_dir("cc_params",
-                                                mdev->priv.dbg_root);
+       dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
 
        for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
                dbg_cc_params->params[i].offset = i;
index 85f526c..32a0ea8 100644 (file)
@@ -4178,7 +4178,7 @@ static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
        if (!mlx5_debugfs_root)
                return 0;
 
-       root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root);
+       root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
        dev->delay_drop.dir_debugfs = root;
 
        debugfs_create_atomic_t("num_timeout_events", 0400, root,
index 06e4b8c..32cb706 100644 (file)
@@ -696,7 +696,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
        if (!mlx5_debugfs_root || dev->is_rep)
                return;
 
-       cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
+       cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev));
 
        for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
                ent = &cache->ent[i];
index 0c607da..9417ee0 100644 (file)
@@ -556,7 +556,7 @@ config KEYBOARD_PMIC8XXX
 
 config KEYBOARD_SAMSUNG
        tristate "Samsung keypad support"
-       depends on HAVE_CLK
+       depends on HAS_IOMEM && HAVE_CLK
        select INPUT_MATRIXKMAP
        help
          Say Y here if you want to use the keypad on your Samsung mobile
index 47af62c..e1758d5 100644 (file)
@@ -186,55 +186,21 @@ static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count,
        return 0;
 }
 
-static int elan_enable_power(struct elan_tp_data *data)
+static int elan_set_power(struct elan_tp_data *data, bool on)
 {
        int repeat = ETP_RETRY_COUNT;
        int error;
 
-       error = regulator_enable(data->vcc);
-       if (error) {
-               dev_err(&data->client->dev,
-                       "failed to enable regulator: %d\n", error);
-               return error;
-       }
-
        do {
-               error = data->ops->power_control(data->client, true);
+               error = data->ops->power_control(data->client, on);
                if (error >= 0)
                        return 0;
 
                msleep(30);
        } while (--repeat > 0);
 
-       dev_err(&data->client->dev, "failed to enable power: %d\n", error);
-       return error;
-}
-
-static int elan_disable_power(struct elan_tp_data *data)
-{
-       int repeat = ETP_RETRY_COUNT;
-       int error;
-
-       do {
-               error = data->ops->power_control(data->client, false);
-               if (!error) {
-                       error = regulator_disable(data->vcc);
-                       if (error) {
-                               dev_err(&data->client->dev,
-                                       "failed to disable regulator: %d\n",
-                                       error);
-                               /* Attempt to power the chip back up */
-                               data->ops->power_control(data->client, true);
-                               break;
-                       }
-
-                       return 0;
-               }
-
-               msleep(30);
-       } while (--repeat > 0);
-
-       dev_err(&data->client->dev, "failed to disable power: %d\n", error);
+       dev_err(&data->client->dev, "failed to set power %s: %d\n",
+               on ? "on" : "off", error);
        return error;
 }
 
@@ -1399,9 +1365,19 @@ static int __maybe_unused elan_suspend(struct device *dev)
                /* Enable wake from IRQ */
                data->irq_wake = (enable_irq_wake(client->irq) == 0);
        } else {
-               ret = elan_disable_power(data);
+               ret = elan_set_power(data, false);
+               if (ret)
+                       goto err;
+
+               ret = regulator_disable(data->vcc);
+               if (ret) {
+                       dev_err(dev, "error %d disabling regulator\n", ret);
+                       /* Attempt to power the chip back up */
+                       elan_set_power(data, true);
+               }
        }
 
+err:
        mutex_unlock(&data->sysfs_mutex);
        return ret;
 }
@@ -1412,12 +1388,18 @@ static int __maybe_unused elan_resume(struct device *dev)
        struct elan_tp_data *data = i2c_get_clientdata(client);
        int error;
 
-       if (device_may_wakeup(dev) && data->irq_wake) {
+       if (!device_may_wakeup(dev)) {
+               error = regulator_enable(data->vcc);
+               if (error) {
+                       dev_err(dev, "error %d enabling regulator\n", error);
+                       goto err;
+               }
+       } else if (data->irq_wake) {
                disable_irq_wake(client->irq);
                data->irq_wake = false;
        }
 
-       error = elan_enable_power(data);
+       error = elan_set_power(data, true);
        if (error) {
                dev_err(dev, "power up when resuming failed: %d\n", error);
                goto err;
index a3bfc7a..752e8ba 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/delay.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/platform_data/x86/soc.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
 #include <linux/of.h>
@@ -805,21 +806,6 @@ static int goodix_reset(struct goodix_ts_data *ts)
 }
 
 #ifdef ACPI_GPIO_SUPPORT
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
-
-static const struct x86_cpu_id baytrail_cpu_ids[] = {
-       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, X86_FEATURE_ANY, },
-       {}
-};
-
-static inline bool is_byt(void)
-{
-       const struct x86_cpu_id *id = x86_match_cpu(baytrail_cpu_ids);
-
-       return !!id;
-}
-
 static const struct acpi_gpio_params first_gpio = { 0, 0, false };
 static const struct acpi_gpio_params second_gpio = { 1, 0, false };
 
@@ -878,7 +864,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
        const struct acpi_gpio_mapping *gpio_mapping = NULL;
        struct device *dev = &ts->client->dev;
        LIST_HEAD(resources);
-       int ret;
+       int irq, ret;
 
        ts->gpio_count = 0;
        ts->gpio_int_idx = -1;
@@ -891,6 +877,20 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
 
        acpi_dev_free_resource_list(&resources);
 
+       /*
+        * CHT devices should have a GpioInt + a regular GPIO ACPI resource.
+        * Some CHT devices have a bug (where the also is bogus Interrupt
+        * resource copied from a previous BYT based generation). i2c-core-acpi
+        * will use the non-working Interrupt resource, fix this up.
+        */
+       if (soc_intel_is_cht() && ts->gpio_count == 2 && ts->gpio_int_idx != -1) {
+               irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
+               if (irq > 0 && irq != ts->client->irq) {
+                       dev_warn(dev, "Overriding IRQ %d -> %d\n", ts->client->irq, irq);
+                       ts->client->irq = irq;
+               }
+       }
+
        if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
                ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
                gpio_mapping = acpi_goodix_int_first_gpios;
@@ -903,7 +903,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
                dev_info(dev, "Using ACPI INTI and INTO methods for IRQ pin access\n");
                ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_METHOD;
                gpio_mapping = acpi_goodix_reset_only_gpios;
-       } else if (is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
+       } else if (soc_intel_is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
                dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
                ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
                gpio_mapping = acpi_goodix_int_last_gpios;
index 416815a..bb95edf 100644 (file)
@@ -14,6 +14,7 @@
 extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
 extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
 extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
 extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
 extern int amd_iommu_init_devices(void);
 extern void amd_iommu_uninit_devices(void);
index ffc89c4..47108ed 100644 (file)
 #define PASID_MASK             0x0000ffff
 
 /* MMIO status bits */
+#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK      (1 << 0)
 #define MMIO_STATUS_EVT_INT_MASK       (1 << 1)
 #define MMIO_STATUS_COM_WAIT_INT_MASK  (1 << 2)
 #define MMIO_STATUS_PPR_INT_MASK       (1 << 6)
index b10fb52..7bfe37e 100644 (file)
@@ -657,6 +657,16 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
        return iommu->cmd_buf ? 0 : -ENOMEM;
 }
 
+/*
+ * This function restarts event logging in case the IOMMU experienced
+ * an event log buffer overflow.
+ */
+void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
+{
+       iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+       iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+}
+
 /*
  * This function resets the command buffer if the IOMMU stopped fetching
  * commands from it.
index b1bf412..6608d17 100644 (file)
@@ -492,18 +492,18 @@ static void v1_free_pgtable(struct io_pgtable *iop)
 
        dom = container_of(pgtable, struct protection_domain, iop);
 
-       /* Update data structure */
-       amd_iommu_domain_clr_pt_root(dom);
-
-       /* Make changes visible to IOMMUs */
-       amd_iommu_domain_update(dom);
-
        /* Page-table is not visible to IOMMU anymore, so free it */
        BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
               pgtable->mode > PAGE_MODE_6_LEVEL);
 
        free_sub_pt(pgtable->root, pgtable->mode, &freelist);
 
+       /* Update data structure */
+       amd_iommu_domain_clr_pt_root(dom);
+
+       /* Make changes visible to IOMMUs */
+       amd_iommu_domain_update(dom);
+
        put_pages_list(&freelist);
 }
 
index 461f184..a18b549 100644 (file)
@@ -764,7 +764,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
 #endif /* !CONFIG_IRQ_REMAP */
 
 #define AMD_IOMMU_INT_MASK     \
-       (MMIO_STATUS_EVT_INT_MASK | \
+       (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
+        MMIO_STATUS_EVT_INT_MASK | \
         MMIO_STATUS_PPR_INT_MASK | \
         MMIO_STATUS_GALOG_INT_MASK)
 
@@ -774,7 +775,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
        u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 
        while (status & AMD_IOMMU_INT_MASK) {
-               /* Enable EVT and PPR and GA interrupts again */
+               /* Enable interrupt sources again */
                writel(AMD_IOMMU_INT_MASK,
                        iommu->mmio_base + MMIO_STATUS_OFFSET);
 
@@ -795,6 +796,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
                }
 #endif
 
+               if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
+                       pr_info_ratelimited("IOMMU event log overflow\n");
+                       amd_iommu_restart_event_logging(iommu);
+               }
+
                /*
                 * Hardware bug: ERBT1312
                 * When re-enabling interrupt (by writing 1
index 92fea3f..5b196cf 100644 (file)
@@ -2738,7 +2738,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
        /* PASID table is mandatory for a PCI device in scalable mode. */
-       if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
+       if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
                ret = intel_pasid_alloc_table(dev);
                if (ret) {
                        dev_err(dev, "PASID table allocation failed\n");
index e900e3c..2561ce8 100644 (file)
@@ -808,8 +808,10 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
                return NULL;
 
        mc = platform_get_drvdata(pdev);
-       if (!mc)
+       if (!mc) {
+               put_device(&pdev->dev);
                return NULL;
+       }
 
        return mc->smmu;
 }
index bd087cc..af17459 100644 (file)
@@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc)
        }
        /* Allocate memory for FIFOS */
        /* the memory needs to be on a 32k boundary within the first 4G */
-       dma_set_mask(&hc->pdev->dev, 0xFFFF8000);
+       if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) {
+               printk(KERN_WARNING
+                      "HFC-PCI: No usable DMA configuration!\n");
+               return -EIO;
+       }
        buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
                                    GFP_KERNEL);
        /* We silently assume the address is okay if nonzero */
index 39f841b..4f8d85b 100644 (file)
@@ -1062,7 +1062,7 @@ ipac_rme(struct hscx_hw *hx)
        if (!hx->bch.rx_skb)
                return;
        if (hx->bch.rx_skb->len < 2) {
-               pr_debug("%s: B%1d frame to short %d\n",
+               pr_debug("%s: B%1d frame too short %d\n",
                         hx->ip->name, hx->bch.nr, hx->bch.rx_skb->len);
                skb_trim(hx->bch.rx_skb, 0);
        } else {
index 5694340..48b3d43 100644 (file)
@@ -466,7 +466,7 @@ isar_rcv_frame(struct isar_ch *ch)
                rcv_mbox(ch->is, ptr);
                if (ch->is->cmsb & HDLC_FED) {
                        if (ch->bch.rx_skb->len < 3) { /* last 2 are the FCS */
-                               pr_debug("%s: ISAR frame to short %d\n",
+                               pr_debug("%s: ISAR frame too short %d\n",
                                         ch->is->name, ch->bch.rx_skb->len);
                                skb_trim(ch->bch.rx_skb, 0);
                                break;
@@ -542,7 +542,7 @@ isar_rcv_frame(struct isar_ch *ch)
                rcv_mbox(ch->is, ptr);
                if (ch->is->cmsb & HDLC_FED) {
                        if (ch->bch.rx_skb->len < 3) { /* last 2 are the FCS */
-                               pr_info("%s: ISAR frame to short %d\n",
+                               pr_info("%s: ISAR frame too short %d\n",
                                        ch->is->name, ch->bch.rx_skb->len);
                                skb_trim(ch->bch.rx_skb, 0);
                                break;
index e11ca6b..c3b2c99 100644 (file)
@@ -192,7 +192,7 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
 int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
 {
        int found = 0;
-       char *dup, *tok, *name, *args;
+       char *dup, *next, *tok, *name, *args;
        struct dsp_element_entry *entry, *n;
        struct dsp_pipeline_entry *pipeline_entry;
        struct mISDN_dsp_element *elem;
@@ -203,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
        if (!list_empty(&pipeline->list))
                _dsp_pipeline_destroy(pipeline);
 
-       dup = kstrdup(cfg, GFP_ATOMIC);
+       dup = next = kstrdup(cfg, GFP_ATOMIC);
        if (!dup)
                return 0;
-       while ((tok = strsep(&dup, "|"))) {
+       while ((tok = strsep(&next, "|"))) {
                if (!strlen(tok))
                        continue;
                name = strsep(&tok, "(");
index 8d718aa..4e67c14 100644 (file)
@@ -1908,7 +1908,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
 
        cb_data.card = card;
        cb_data.status = 0;
-       err = __mmc_poll_for_busy(card->host, MMC_BLK_TIMEOUT_MS,
+       err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
                                  &mmc_blk_busy_cb, &cb_data);
 
        /*
index bbbbcaf..8421519 100644 (file)
@@ -1962,7 +1962,7 @@ static int mmc_sleep(struct mmc_host *host)
                goto out_release;
        }
 
-       err = __mmc_poll_for_busy(host, timeout_ms, &mmc_sleep_busy_cb, host);
+       err = __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_sleep_busy_cb, host);
 
 out_release:
        mmc_retune_release(host);
index d63d1c7..180d7e9 100644 (file)
@@ -21,6 +21,8 @@
 
 #define MMC_BKOPS_TIMEOUT_MS           (120 * 1000) /* 120s */
 #define MMC_SANITIZE_TIMEOUT_MS                (240 * 1000) /* 240s */
+#define MMC_OP_COND_PERIOD_US          (1 * 1000) /* 1ms */
+#define MMC_OP_COND_TIMEOUT_MS         1000 /* 1s */
 
 static const u8 tuning_blk_pattern_4bit[] = {
        0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -232,7 +234,9 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
        cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 
-       err = __mmc_poll_for_busy(host, 1000, &__mmc_send_op_cond_cb, &cb_data);
+       err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
+                                 MMC_OP_COND_TIMEOUT_MS,
+                                 &__mmc_send_op_cond_cb, &cb_data);
        if (err)
                return err;
 
@@ -495,13 +499,14 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
        return 0;
 }
 
-int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
+                       unsigned int timeout_ms,
                        int (*busy_cb)(void *cb_data, bool *busy),
                        void *cb_data)
 {
        int err;
        unsigned long timeout;
-       unsigned int udelay = 32, udelay_max = 32768;
+       unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
        bool expired = false;
        bool busy = false;
 
@@ -546,7 +551,7 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
        cb_data.retry_crc_err = retry_crc_err;
        cb_data.busy_cmd = busy_cmd;
 
-       return __mmc_poll_for_busy(host, timeout_ms, &mmc_busy_cb, &cb_data);
+       return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
 }
 EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
 
index 9c813b8..09ffbc0 100644 (file)
@@ -41,7 +41,8 @@ int mmc_can_ext_csd(struct mmc_card *card);
 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
 bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
                          unsigned int timeout_ms);
-int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
+                       unsigned int timeout_ms,
                        int (*busy_cb)(void *cb_data, bool *busy),
                        void *cb_data);
 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
index bd87012..bfbfed3 100644 (file)
@@ -1672,7 +1672,7 @@ static int sd_poweroff_notify(struct mmc_card *card)
 
        cb_data.card = card;
        cb_data.reg_buf = reg_buf;
-       err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
+       err = __mmc_poll_for_busy(card->host, 0, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
                                  &sd_busy_poweroff_notify_cb, &cb_data);
 
 out:
index 8f36536..58ab9d9 100644 (file)
@@ -173,6 +173,8 @@ struct meson_host {
        int irq;
 
        bool vqmmc_enabled;
+       bool needs_pre_post_req;
+
 };
 
 #define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
@@ -663,6 +665,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc,
        struct meson_host *host = mmc_priv(mmc);
 
        host->cmd = NULL;
+       if (host->needs_pre_post_req)
+               meson_mmc_post_req(mmc, mrq, 0);
        mmc_request_done(host->mmc, mrq);
 }
 
@@ -880,7 +884,7 @@ static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data
 static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct meson_host *host = mmc_priv(mmc);
-       bool needs_pre_post_req = mrq->data &&
+       host->needs_pre_post_req = mrq->data &&
                        !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
 
        /*
@@ -896,22 +900,19 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
                }
        }
 
-       if (needs_pre_post_req) {
+       if (host->needs_pre_post_req) {
                meson_mmc_get_transfer_mode(mmc, mrq);
                if (!meson_mmc_desc_chain_mode(mrq->data))
-                       needs_pre_post_req = false;
+                       host->needs_pre_post_req = false;
        }
 
-       if (needs_pre_post_req)
+       if (host->needs_pre_post_req)
                meson_mmc_pre_req(mmc, mrq);
 
        /* Stop execution */
        writel(0, host->regs + SD_EMMC_START);
 
        meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
-
-       if (needs_pre_post_req)
-               meson_mmc_post_req(mmc, mrq, 0);
 }
 
 static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
index d986ab4..820e5dc 100644 (file)
@@ -42,8 +42,7 @@ config MTD_NAND_OMAP2
        tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
        depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
        depends on HAS_IOMEM
-       select MEMORY
-       select OMAP_GPMC
+       depends on OMAP_GPMC
        help
          Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
          and Keystone platforms.
index ba587e5..683203f 100644 (file)
@@ -148,14 +148,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        skb_reset_network_header(skb);
        skb_reset_mac_header(skb);
 
-       if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+       if (!ipv6_mod_enabled() || family == AF_INET)
                err = IP_ECN_decapsulate(oiph, skb);
        else
                err = IP6_ECN_decapsulate(oiph, skb);
 
        if (unlikely(err)) {
                if (log_ecn_error) {
-                       if  (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+                       if  (!ipv6_mod_enabled() || family == AF_INET)
                                net_info_ratelimited("non-ECT from %pI4 "
                                                     "with TOS=%#x\n",
                                                     &((struct iphdr *)oiph)->saddr,
@@ -221,11 +221,12 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port)
        int err;
 
        memset(&udp_conf, 0, sizeof(udp_conf));
-#if IS_ENABLED(CONFIG_IPV6)
-       udp_conf.family = AF_INET6;
-#else
-       udp_conf.family = AF_INET;
-#endif
+
+       if (ipv6_mod_enabled())
+               udp_conf.family = AF_INET6;
+       else
+               udp_conf.family = AF_INET;
+
        udp_conf.local_udp_port = port;
        /* Open UDP socket */
        err = udp_sock_create(net, &udp_conf, &sock);
@@ -448,7 +449,7 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        rcu_read_lock();
-       if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
+       if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
                err = bareudp6_xmit_skb(skb, dev, bareudp, info);
        else
                err = bareudp_xmit_skb(skb, dev, bareudp, info);
@@ -478,7 +479,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
 
        use_cache = ip_tunnel_dst_cache_usable(skb, info);
 
-       if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
+       if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
                struct rtable *rt;
                __be32 saddr;
 
index 55e0ba2..15eddca 100644 (file)
@@ -5120,7 +5120,7 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
        if (xmit_suc)
                return NETDEV_TX_OK;
 
-       atomic_long_inc(&bond_dev->tx_dropped);
+       dev_core_stats_tx_dropped_inc(bond_dev);
        return NET_XMIT_DROP;
 }
 
index 6a6cdd0..69b0a37 100644 (file)
@@ -15,14 +15,8 @@ struct slave_attribute {
        ssize_t (*show)(struct slave *, char *);
 };
 
-#define SLAVE_ATTR(_name, _mode, _show)                                \
-const struct slave_attribute slave_attr_##_name = {            \
-       .attr = {.name = __stringify(_name),                    \
-                .mode = _mode },                               \
-       .show   = _show,                                        \
-};
 #define SLAVE_ATTR_RO(_name)                                   \
-       SLAVE_ATTR(_name, 0444, _name##_show)
+const struct slave_attribute slave_attr_##_name = __ATTR_RO(_name)
 
 static ssize_t state_show(struct slave *slave, char *buf)
 {
index c192f25..e7ab45f 100644 (file)
@@ -154,7 +154,7 @@ static void can_restart(struct net_device *dev)
 
        cf->can_id |= CAN_ERR_RESTARTED;
 
-       netif_rx_ni(skb);
+       netif_rx(skb);
 
 restart:
        netdev_dbg(dev, "restarted\n");
index acd7472..1e121e0 100644 (file)
@@ -44,6 +44,7 @@
 enum rcanfd_chip_id {
        RENESAS_RCAR_GEN3 = 0,
        RENESAS_RZG2L,
+       RENESAS_R8A779A0,
 };
 
 /* Global register bits */
@@ -79,6 +80,7 @@ enum rcanfd_chip_id {
 #define RCANFD_GSTS_GNOPM              (BIT(0) | BIT(1) | BIT(2) | BIT(3))
 
 /* RSCFDnCFDGERFL / RSCFDnGERFL */
+#define RCANFD_GERFL_EEF0_7            GENMASK(23, 16)
 #define RCANFD_GERFL_EEF1              BIT(17)
 #define RCANFD_GERFL_EEF0              BIT(16)
 #define RCANFD_GERFL_CMPOF             BIT(3)  /* CAN FD only */
@@ -86,20 +88,26 @@ enum rcanfd_chip_id {
 #define RCANFD_GERFL_MES               BIT(1)
 #define RCANFD_GERFL_DEF               BIT(0)
 
-#define RCANFD_GERFL_ERR(gpriv, x)     ((x) & (RCANFD_GERFL_EEF1 |\
-                                       RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\
-                                       (gpriv->fdmode ?\
-                                        RCANFD_GERFL_CMPOF : 0)))
+#define RCANFD_GERFL_ERR(gpriv, x) \
+       ((x) & (reg_v3u(gpriv, RCANFD_GERFL_EEF0_7, \
+                       RCANFD_GERFL_EEF0 | RCANFD_GERFL_EEF1) | \
+               RCANFD_GERFL_MES | \
+               ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
 
 /* AFL Rx rules registers */
 
 /* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG_SETRNC(n, x)    (((x) & 0xff) << (24 - n * 8))
-#define RCANFD_GAFLCFG_GETRNC(n, x)    (((x) >> (24 - n * 8)) & 0xff)
+#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
+       (((x) & reg_v3u(gpriv, 0x1ff, 0xff)) << \
+        (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8)))
+
+#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
+       (((x) >> (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) & \
+        reg_v3u(gpriv, 0x1ff, 0xff))
 
 /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
 #define RCANFD_GAFLECTR_AFLDAE         BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(x)       ((x) & 0x1f)
+#define RCANFD_GAFLECTR_AFLPN(gpriv, x)        ((x) & reg_v3u(gpriv, 0x7f, 0x1f))
 
 /* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
 #define RCANFD_GAFLID_GAFLLB           BIT(29)
@@ -116,9 +124,15 @@ enum rcanfd_chip_id {
 #define RCANFD_CFG_BRP(x)              (((x) & 0x3ff) << 0)
 
 /* RSCFDnCFDCmNCFG - CAN FD only */
-#define RCANFD_NCFG_NTSEG2(x)          (((x) & 0x1f) << 24)
-#define RCANFD_NCFG_NTSEG1(x)          (((x) & 0x7f) << 16)
-#define RCANFD_NCFG_NSJW(x)            (((x) & 0x1f) << 11)
+#define RCANFD_NCFG_NTSEG2(gpriv, x) \
+       (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 25, 24))
+
+#define RCANFD_NCFG_NTSEG1(gpriv, x) \
+       (((x) & reg_v3u(gpriv, 0xff, 0x7f)) << reg_v3u(gpriv, 17, 16))
+
+#define RCANFD_NCFG_NSJW(gpriv, x) \
+       (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 10, 11))
+
 #define RCANFD_NCFG_NBRP(x)            (((x) & 0x3ff) << 0)
 
 /* RSCFDnCFDCmCTR / RSCFDnCmCTR */
@@ -180,11 +194,18 @@ enum rcanfd_chip_id {
 
 /* RSCFDnCFDCmDCFG */
 #define RCANFD_DCFG_DSJW(x)            (((x) & 0x7) << 24)
-#define RCANFD_DCFG_DTSEG2(x)          (((x) & 0x7) << 20)
-#define RCANFD_DCFG_DTSEG1(x)          (((x) & 0xf) << 16)
+
+#define RCANFD_DCFG_DTSEG2(gpriv, x) \
+       (((x) & reg_v3u(gpriv, 0x0f, 0x7)) << reg_v3u(gpriv, 16, 20))
+
+#define RCANFD_DCFG_DTSEG1(gpriv, x) \
+       (((x) & reg_v3u(gpriv, 0x1f, 0xf)) << reg_v3u(gpriv, 8, 16))
+
 #define RCANFD_DCFG_DBRP(x)            (((x) & 0xff) << 0)
 
 /* RSCFDnCFDCmFDCFG */
+#define RCANFD_FDCFG_CLOE              BIT(30)
+#define RCANFD_FDCFG_FDOE              BIT(28)
 #define RCANFD_FDCFG_TDCE              BIT(9)
 #define RCANFD_FDCFG_TDCOC             BIT(8)
 #define RCANFD_FDCFG_TDCO(x)           (((x) & 0x7f) >> 16)
@@ -219,10 +240,10 @@ enum rcanfd_chip_id {
 /* Common FIFO bits */
 
 /* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(x)           (((x) & 0xf) << 20)
-#define RCANFD_CFCC_CFM(x)             (((x) & 0x3) << 16)
+#define RCANFD_CFCC_CFTML(gpriv, x)    (((x) & 0xf) << reg_v3u(gpriv, 16, 20))
+#define RCANFD_CFCC_CFM(gpriv, x)      (((x) & 0x3) << reg_v3u(gpriv,  8, 16))
 #define RCANFD_CFCC_CFIM               BIT(12)
-#define RCANFD_CFCC_CFDC(x)            (((x) & 0x7) << 8)
+#define RCANFD_CFCC_CFDC(gpriv, x)     (((x) & 0x7) << reg_v3u(gpriv, 21,  8))
 #define RCANFD_CFCC_CFPLS(x)           (((x) & 0x7) << 4)
 #define RCANFD_CFCC_CFTXIE             BIT(2)
 #define RCANFD_CFCC_CFE                        BIT(0)
@@ -282,33 +303,31 @@ enum rcanfd_chip_id {
 #define RCANFD_GTSC                    (0x0094)
 /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
 #define RCANFD_GAFLECTR                        (0x0098)
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG0                        (0x009c)
-/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */
-#define RCANFD_GAFLCFG1                        (0x00a0)
+/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */
+#define RCANFD_GAFLCFG(ch)             (0x009c + (0x04 * ((ch) / 2)))
 /* RSCFDnCFDRMNB / RSCFDnRMNB */
 #define RCANFD_RMNB                    (0x00a4)
 /* RSCFDnCFDRMND / RSCFDnRMND */
 #define RCANFD_RMND(y)                 (0x00a8 + (0x04 * (y)))
 
 /* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(x)                 (0x00b8 + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x)          (reg_v3u(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
 /* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
-#define RCANFD_RFSTS(x)                        (0x00d8 + (0x04 * (x)))
+#define RCANFD_RFSTS(gpriv, x)         (RCANFD_RFCC(gpriv, x) + 0x20)
 /* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
-#define RCANFD_RFPCTR(x)               (0x00f8 + (0x04 * (x)))
+#define RCANFD_RFPCTR(gpriv, x)                (RCANFD_RFCC(gpriv, x) + 0x40)
 
 /* Common FIFO Control registers */
 
 /* RSCFDnCFDCFCCx / RSCFDnCFCCx */
-#define RCANFD_CFCC(ch, idx)           (0x0118 + (0x0c * (ch)) + \
-                                        (0x04 * (idx)))
+#define RCANFD_CFCC(gpriv, ch, idx) \
+       (reg_v3u(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
 /* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
-#define RCANFD_CFSTS(ch, idx)          (0x0178 + (0x0c * (ch)) + \
-                                        (0x04 * (idx)))
+#define RCANFD_CFSTS(gpriv, ch, idx) \
+       (reg_v3u(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
 /* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
-#define RCANFD_CFPCTR(ch, idx)         (0x01d8 + (0x0c * (ch)) + \
-                                        (0x04 * (idx)))
+#define RCANFD_CFPCTR(gpriv, ch, idx) \
+       (reg_v3u(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
 
 /* RSCFDnCFDFESTS / RSCFDnFESTS */
 #define RCANFD_FESTS                   (0x0238)
@@ -387,22 +406,23 @@ enum rcanfd_chip_id {
 #define RCANFD_C_RMDF1(q)              (0x060c + (0x10 * (q)))
 
 /* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
-#define RCANFD_C_RFOFFSET              (0x0e00)
-#define RCANFD_C_RFID(x)               (RCANFD_C_RFOFFSET + (0x10 * (x)))
-#define RCANFD_C_RFPTR(x)              (RCANFD_C_RFOFFSET + 0x04 + \
-                                        (0x10 * (x)))
-#define RCANFD_C_RFDF(x, df)           (RCANFD_C_RFOFFSET + 0x08 + \
-                                        (0x10 * (x)) + (0x04 * (df)))
+#define RCANFD_C_RFOFFSET      (0x0e00)
+#define RCANFD_C_RFID(x)       (RCANFD_C_RFOFFSET + (0x10 * (x)))
+#define RCANFD_C_RFPTR(x)      (RCANFD_C_RFOFFSET + 0x04 + (0x10 * (x)))
+#define RCANFD_C_RFDF(x, df) \
+               (RCANFD_C_RFOFFSET + 0x08 + (0x10 * (x)) + (0x04 * (df)))
 
 /* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */
 #define RCANFD_C_CFOFFSET              (0x0e80)
-#define RCANFD_C_CFID(ch, idx)         (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \
-                                        (0x10 * (idx)))
-#define RCANFD_C_CFPTR(ch, idx)                (RCANFD_C_CFOFFSET + 0x04 + \
-                                        (0x30 * (ch)) + (0x10 * (idx)))
-#define RCANFD_C_CFDF(ch, idx, df)     (RCANFD_C_CFOFFSET + 0x08 + \
-                                        (0x30 * (ch)) + (0x10 * (idx)) + \
-                                        (0x04 * (df)))
+
+#define RCANFD_C_CFID(ch, idx) \
+       (RCANFD_C_CFOFFSET + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFPTR(ch, idx)        \
+       (RCANFD_C_CFOFFSET + 0x04 + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFDF(ch, idx, df) \
+       (RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df)))
 
 /* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
 #define RCANFD_C_TMID(p)               (0x1000 + (0x10 * (p)))
@@ -415,6 +435,12 @@ enum rcanfd_chip_id {
 /* RSCFDnRPGACCr */
 #define RCANFD_C_RPGACC(r)             (0x1900 + (0x04 * (r)))
 
+/* R-Car V3U Classical and CAN FD mode specific register map */
+#define RCANFD_V3U_CFDCFG              (0x1314)
+#define RCANFD_V3U_DCFG(m)             (0x1400 + (0x20 * (m)))
+
+#define RCANFD_V3U_GAFL_OFFSET         (0x1800)
+
 /* CAN FD mode specific register map */
 
 /* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
@@ -434,26 +460,28 @@ enum rcanfd_chip_id {
 #define RCANFD_F_RMDF(q, b)            (0x200c + (0x04 * (b)) + (0x20 * (q)))
 
 /* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET              (0x3000)
-#define RCANFD_F_RFID(x)               (RCANFD_F_RFOFFSET + (0x80 * (x)))
-#define RCANFD_F_RFPTR(x)              (RCANFD_F_RFOFFSET + 0x04 + \
-                                        (0x80 * (x)))
-#define RCANFD_F_RFFDSTS(x)            (RCANFD_F_RFOFFSET + 0x08 + \
-                                        (0x80 * (x)))
-#define RCANFD_F_RFDF(x, df)           (RCANFD_F_RFOFFSET + 0x0c + \
-                                        (0x80 * (x)) + (0x04 * (df)))
+#define RCANFD_F_RFOFFSET(gpriv)       reg_v3u(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFID(gpriv, x)                (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
+#define RCANFD_F_RFPTR(gpriv, x)       (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
+#define RCANFD_F_RFFDSTS(gpriv, x)     (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
+#define RCANFD_F_RFDF(gpriv, x, df) \
+       (RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
 
 /* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET              (0x3400)
-#define RCANFD_F_CFID(ch, idx)         (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \
-                                        (0x80 * (idx)))
-#define RCANFD_F_CFPTR(ch, idx)                (RCANFD_F_CFOFFSET + 0x04 + \
-                                        (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFFDCSTS(ch, idx)     (RCANFD_F_CFOFFSET + 0x08 + \
-                                        (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFDF(ch, idx, df)     (RCANFD_F_CFOFFSET + 0x0c + \
-                                        (0x180 * (ch)) + (0x80 * (idx)) + \
-                                        (0x04 * (df)))
+#define RCANFD_F_CFOFFSET(gpriv)       reg_v3u(gpriv, 0x6400, 0x3400)
+
+#define RCANFD_F_CFID(gpriv, ch, idx) \
+       (RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFPTR(gpriv, ch, idx) \
+       (RCANFD_F_CFOFFSET(gpriv) + 0x04 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFFDCSTS(gpriv, ch, idx) \
+       (RCANFD_F_CFOFFSET(gpriv) + 0x08 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFDF(gpriv, ch, idx, df) \
+       (RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \
+        (0x04 * (df)))
 
 /* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
 #define RCANFD_F_TMID(p)               (0x4000 + (0x20 * (p)))
@@ -470,7 +498,7 @@ enum rcanfd_chip_id {
 #define RCANFD_FIFO_DEPTH              8       /* Tx FIFO depth */
 #define RCANFD_NAPI_WEIGHT             8       /* Rx poll quota */
 
-#define RCANFD_NUM_CHANNELS            2       /* Two channels max */
+#define RCANFD_NUM_CHANNELS            8       /* Eight channels max */
 #define RCANFD_CHANNELS_MASK           BIT((RCANFD_NUM_CHANNELS) - 1)
 
 #define RCANFD_GAFL_PAGENUM(entry)     ((entry) / 16)
@@ -521,6 +549,7 @@ struct rcar_canfd_global {
        struct reset_control *rstc1;
        struct reset_control *rstc2;
        enum rcanfd_chip_id chip_id;
+       u32 max_channels;
 };
 
 /* CAN FD mode nominal rate constants */
@@ -563,6 +592,17 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
 };
 
 /* Helper functions */
+static inline bool is_v3u(struct rcar_canfd_global *gpriv)
+{
+       return gpriv->chip_id == RENESAS_R8A779A0;
+}
+
+static inline u32 reg_v3u(struct rcar_canfd_global *gpriv,
+                         u32 v3u, u32 not_v3u)
+{
+       return is_v3u(gpriv) ? v3u : not_v3u;
+}
+
 static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
 {
        u32 data = readl(reg);
@@ -628,6 +668,25 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
                can_free_echo_skb(ndev, i, NULL);
 }
 
+static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
+{
+       if (is_v3u(gpriv)) {
+               if (gpriv->fdmode)
+                       rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
+                                          RCANFD_FDCFG_FDOE);
+               else
+                       rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
+                                          RCANFD_FDCFG_CLOE);
+       } else {
+               if (gpriv->fdmode)
+                       rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+                                          RCANFD_GRMCFG_RCMC);
+               else
+                       rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+                                            RCANFD_GRMCFG_RCMC);
+       }
+}
+
 static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
 {
        u32 sts, ch;
@@ -660,15 +719,10 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
        rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
 
        /* Set the controller into appropriate mode */
-       if (gpriv->fdmode)
-               rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
-                                  RCANFD_GRMCFG_RCMC);
-       else
-               rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
-                                    RCANFD_GRMCFG_RCMC);
+       rcar_canfd_set_mode(gpriv);
 
        /* Transition all Channels to reset mode */
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
                rcar_canfd_clear_bit(gpriv->base,
                                     RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR);
 
@@ -709,7 +763,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
        rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
 
        /* Channel configuration settings */
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
                rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch),
                                   RCANFD_CCTR_ERRD);
                rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
@@ -729,20 +783,22 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
                start = 0; /* Channel 0 always starts from 0th rule */
        } else {
                /* Get number of Channel 0 rules and adjust */
-               cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0);
-               start = RCANFD_GAFLCFG_GETRNC(0, cfg);
+               cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
+               start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
        }
 
        /* Enable write access to entry */
        page = RCANFD_GAFL_PAGENUM(start);
        rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
-                          (RCANFD_GAFLECTR_AFLPN(page) |
+                          (RCANFD_GAFLECTR_AFLPN(gpriv, page) |
                            RCANFD_GAFLECTR_AFLDAE));
 
        /* Write number of rules for channel */
-       rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0,
-                          RCANFD_GAFLCFG_SETRNC(ch, num_rules));
-       if (gpriv->fdmode)
+       rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
+                          RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
+       if (is_v3u(gpriv))
+               offset = RCANFD_V3U_GAFL_OFFSET;
+       else if (gpriv->fdmode)
                offset = RCANFD_F_GAFL_OFFSET;
        else
                offset = RCANFD_C_GAFL_OFFSET;
@@ -754,8 +810,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
        /* Any data length accepted */
        rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
        /* Place the msg in corresponding Rx FIFO entry */
-       rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start),
-                        RCANFD_GAFLP1_GAFLFDP(ridx));
+       rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+                          RCANFD_GAFLP1_GAFLFDP(ridx));
 
        /* Disable write access to page */
        rcar_canfd_clear_bit(gpriv->base,
@@ -779,7 +835,7 @@ static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch)
 
        cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) |
                RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE);
-       rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg);
+       rcar_canfd_write(gpriv->base, RCANFD_RFCC(gpriv, ridx), cfg);
 }
 
 static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
@@ -801,15 +857,15 @@ static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
        else
                cfpls = 0;      /* b000 - Max 8 bytes payload */
 
-       cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) |
-               RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) |
+       cfg = (RCANFD_CFCC_CFTML(gpriv, cftml) | RCANFD_CFCC_CFM(gpriv, cfm) |
+               RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(gpriv, cfdc) |
                RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE);
-       rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg);
+       rcar_canfd_write(gpriv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), cfg);
 
        if (gpriv->fdmode)
                /* Clear FD mode specific control/status register */
                rcar_canfd_write(gpriv->base,
-                                RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0);
+                                RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), 0);
 }
 
 static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv)
@@ -890,20 +946,20 @@ static void rcar_canfd_global_error(struct net_device *ndev)
        }
        if (gerfl & RCANFD_GERFL_MES) {
                sts = rcar_canfd_read(priv->base,
-                                     RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+                                     RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
                if (sts & RCANFD_CFSTS_CFMLT) {
                        netdev_dbg(ndev, "Tx Message Lost flag\n");
                        stats->tx_dropped++;
                        rcar_canfd_write(priv->base,
-                                        RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+                                        RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
                                         sts & ~RCANFD_CFSTS_CFMLT);
                }
 
-               sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+               sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
                if (sts & RCANFD_RFSTS_RFMLT) {
                        netdev_dbg(ndev, "Rx Message Lost flag\n");
                        stats->rx_dropped++;
-                       rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+                       rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
                                         sts & ~RCANFD_RFSTS_RFMLT);
                }
        }
@@ -1038,6 +1094,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
 static void rcar_canfd_tx_done(struct net_device *ndev)
 {
        struct rcar_canfd_channel *priv = netdev_priv(ndev);
+       struct rcar_canfd_global *gpriv = priv->gpriv;
        struct net_device_stats *stats = &ndev->stats;
        u32 sts;
        unsigned long flags;
@@ -1053,7 +1110,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
                spin_lock_irqsave(&priv->tx_lock, flags);
                priv->tx_tail++;
                sts = rcar_canfd_read(priv->base,
-                                     RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+                                     RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
                unsent = RCANFD_CFSTS_CFMC(sts);
 
                /* Wake producer only when there is room */
@@ -1069,7 +1126,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
        } while (1);
 
        /* Clear interrupt */
-       rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+       rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
                         sts & ~RCANFD_CFSTS_CFTXIF);
        can_led_event(ndev, CAN_LED_EVENT_TX);
 }
@@ -1091,7 +1148,7 @@ static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id)
        struct rcar_canfd_global *gpriv = dev_id;
        u32 ch;
 
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
                rcar_canfd_handle_global_err(gpriv, ch);
 
        return IRQ_HANDLED;
@@ -1104,12 +1161,12 @@ static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u3
        u32 sts;
 
        /* Handle Rx interrupts */
-       sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+       sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
        if (likely(sts & RCANFD_RFSTS_RFIF)) {
                if (napi_schedule_prep(&priv->napi)) {
                        /* Disable Rx FIFO interrupts */
                        rcar_canfd_clear_bit(priv->base,
-                                            RCANFD_RFCC(ridx),
+                                            RCANFD_RFCC(gpriv, ridx),
                                             RCANFD_RFCC_RFIE);
                        __napi_schedule(&priv->napi);
                }
@@ -1121,7 +1178,7 @@ static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_i
        struct rcar_canfd_global *gpriv = dev_id;
        u32 ch;
 
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
                rcar_canfd_handle_global_receive(gpriv, ch);
 
        return IRQ_HANDLED;
@@ -1135,7 +1192,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
        /* Global error interrupts still indicate a condition specific
         * to a channel. RxFIFO interrupt is a global interrupt.
         */
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
                rcar_canfd_handle_global_err(gpriv, ch);
                rcar_canfd_handle_global_receive(gpriv, ch);
        }
@@ -1181,7 +1238,7 @@ static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch
 
        /* Handle Tx interrupts */
        sts = rcar_canfd_read(priv->base,
-                             RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+                             RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
        if (likely(sts & RCANFD_CFSTS_CFTXIF))
                rcar_canfd_tx_done(ndev);
 }
@@ -1191,7 +1248,7 @@ static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id)
        struct rcar_canfd_global *gpriv = dev_id;
        u32 ch;
 
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
                rcar_canfd_handle_channel_tx(gpriv, ch);
 
        return IRQ_HANDLED;
@@ -1223,7 +1280,7 @@ static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id)
        struct rcar_canfd_global *gpriv = dev_id;
        u32 ch;
 
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
                rcar_canfd_handle_channel_err(gpriv, ch);
 
        return IRQ_HANDLED;
@@ -1235,7 +1292,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
        u32 ch;
 
        /* Common FIFO is a per channel resource */
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
                rcar_canfd_handle_channel_err(gpriv, ch);
                rcar_canfd_handle_channel_tx(gpriv, ch);
        }
@@ -1246,6 +1303,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
 static void rcar_canfd_set_bittiming(struct net_device *dev)
 {
        struct rcar_canfd_channel *priv = netdev_priv(dev);
+       struct rcar_canfd_global *gpriv = priv->gpriv;
        const struct can_bittiming *bt = &priv->can.bittiming;
        const struct can_bittiming *dbt = &priv->can.data_bittiming;
        u16 brp, sjw, tseg1, tseg2;
@@ -1260,8 +1318,8 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
 
        if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
                /* CAN FD only mode */
-               cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) |
-                      RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2));
+               cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
+                      RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
 
                rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
                netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
@@ -1273,16 +1331,25 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
                tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
                tseg2 = dbt->phase_seg2 - 1;
 
-               cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) |
-                      RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2));
+               cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
+                      RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
 
                rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
                netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
                           brp, sjw, tseg1, tseg2);
        } else {
                /* Classical CAN only mode */
-               cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
-                       RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
+               if (is_v3u(gpriv)) {
+                       cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
+                              RCANFD_NCFG_NBRP(brp) |
+                              RCANFD_NCFG_NSJW(gpriv, sjw) |
+                              RCANFD_NCFG_NTSEG2(gpriv, tseg2));
+               } else {
+                       cfg = (RCANFD_CFG_TSEG1(tseg1) |
+                              RCANFD_CFG_BRP(brp) |
+                              RCANFD_CFG_SJW(sjw) |
+                              RCANFD_CFG_TSEG2(tseg2));
+               }
 
                rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
                netdev_dbg(priv->ndev,
@@ -1294,6 +1361,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
 static int rcar_canfd_start(struct net_device *ndev)
 {
        struct rcar_canfd_channel *priv = netdev_priv(ndev);
+       struct rcar_canfd_global *gpriv = priv->gpriv;
        int err = -EOPNOTSUPP;
        u32 sts, ch = priv->channel;
        u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1315,9 +1383,9 @@ static int rcar_canfd_start(struct net_device *ndev)
        }
 
        /* Enable Common & Rx FIFO */
-       rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+       rcar_canfd_set_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
                           RCANFD_CFCC_CFE);
-       rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+       rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
        return 0;
@@ -1365,6 +1433,7 @@ out_clock:
 static void rcar_canfd_stop(struct net_device *ndev)
 {
        struct rcar_canfd_channel *priv = netdev_priv(ndev);
+       struct rcar_canfd_global *gpriv = priv->gpriv;
        int err;
        u32 sts, ch = priv->channel;
        u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1382,9 +1451,9 @@ static void rcar_canfd_stop(struct net_device *ndev)
        rcar_canfd_disable_channel_interrupts(priv);
 
        /* Disable Common & Rx FIFO */
-       rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+       rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
                             RCANFD_CFCC_CFE);
-       rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+       rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
 
        /* Set the state as STOPPED */
        priv->can.state = CAN_STATE_STOPPED;
@@ -1408,6 +1477,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
                                         struct net_device *ndev)
 {
        struct rcar_canfd_channel *priv = netdev_priv(ndev);
+       struct rcar_canfd_global *gpriv = priv->gpriv;
        struct canfd_frame *cf = (struct canfd_frame *)skb->data;
        u32 sts = 0, id, dlc;
        unsigned long flags;
@@ -1428,11 +1498,11 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
 
        dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
 
-       if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+       if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
                rcar_canfd_write(priv->base,
-                                RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id);
+                                RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
                rcar_canfd_write(priv->base,
-                                RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
+                                RCANFD_F_CFPTR(gpriv, ch, RCANFD_CFFIFO_IDX), dlc);
 
                if (can_is_canfd_skb(skb)) {
                        /* CAN FD frame format */
@@ -1445,10 +1515,10 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
                }
 
                rcar_canfd_write(priv->base,
-                                RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts);
+                                RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts);
 
                rcar_canfd_put_data(priv, cf,
-                                   RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
+                                   RCANFD_F_CFDF(gpriv, ch, RCANFD_CFFIFO_IDX, 0));
        } else {
                rcar_canfd_write(priv->base,
                                 RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id);
@@ -1471,7 +1541,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
         * pointer for the Common FIFO
         */
        rcar_canfd_write(priv->base,
-                        RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff);
+                        RCANFD_CFPCTR(gpriv, ch, RCANFD_CFFIFO_IDX), 0xff);
 
        spin_unlock_irqrestore(&priv->tx_lock, flags);
        return NETDEV_TX_OK;
@@ -1480,18 +1550,21 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
 static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
 {
        struct net_device_stats *stats = &priv->ndev->stats;
+       struct rcar_canfd_global *gpriv = priv->gpriv;
        struct canfd_frame *cf;
        struct sk_buff *skb;
        u32 sts = 0, id, dlc;
        u32 ch = priv->channel;
        u32 ridx = ch + RCANFD_RFFIFO_IDX;
 
-       if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
-               id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx));
-               dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx));
+       if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
+               id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
+               dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
 
-               sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx));
-               if (sts & RCANFD_RFFDSTS_RFFDF)
+               sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(gpriv, ridx));
+
+               if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+                   sts & RCANFD_RFFDSTS_RFFDF)
                        skb = alloc_canfd_skb(priv->ndev, &cf);
                else
                        skb = alloc_can_skb(priv->ndev,
@@ -1529,12 +1602,14 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
                        if (sts & RCANFD_RFFDSTS_RFBRS)
                                cf->flags |= CANFD_BRS;
 
-                       rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0));
+                       rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
                }
        } else {
                cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
                if (id & RCANFD_RFID_RFRTR)
                        cf->can_id |= CAN_RTR_FLAG;
+               else if (is_v3u(gpriv))
+                       rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
                else
                        rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
        }
@@ -1542,7 +1617,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
        /* Write 0xff to RFPC to increment the CPU-side
         * pointer of the Rx FIFO
         */
-       rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff);
+       rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff);
 
        can_led_event(priv->ndev, CAN_LED_EVENT_RX);
 
@@ -1556,13 +1631,14 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
 {
        struct rcar_canfd_channel *priv =
                container_of(napi, struct rcar_canfd_channel, napi);
+       struct rcar_canfd_global *gpriv = priv->gpriv;
        int num_pkts;
        u32 sts;
        u32 ch = priv->channel;
        u32 ridx = ch + RCANFD_RFFIFO_IDX;
 
        for (num_pkts = 0; num_pkts < quota; num_pkts++) {
-               sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+               sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
                /* Check FIFO empty condition */
                if (sts & RCANFD_RFSTS_RFEMP)
                        break;
@@ -1571,7 +1647,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
 
                /* Clear interrupt bit */
                if (sts & RCANFD_RFSTS_RFIF)
-                       rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+                       rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
                                         sts & ~RCANFD_RFSTS_RFIF);
        }
 
@@ -1579,7 +1655,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
        if (num_pkts < quota) {
                if (napi_complete_done(napi, num_pkts)) {
                        /* Enable Rx FIFO interrupts */
-                       rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+                       rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx),
                                           RCANFD_RFCC_RFIE);
                }
        }
@@ -1756,21 +1832,24 @@ static int rcar_canfd_probe(struct platform_device *pdev)
        int g_err_irq, g_recc_irq;
        bool fdmode = true;                     /* CAN FD only mode - default */
        enum rcanfd_chip_id chip_id;
+       int max_channels;
+       char name[9] = "channelX";
+       int i;
 
        chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+       max_channels = chip_id == RENESAS_R8A779A0 ? 8 : 2;
 
        if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
                fdmode = false;                 /* Classical CAN only mode */
 
-       of_child = of_get_child_by_name(pdev->dev.of_node, "channel0");
-       if (of_child && of_device_is_available(of_child))
-               channels_mask |= BIT(0);        /* Channel 0 */
-
-       of_child = of_get_child_by_name(pdev->dev.of_node, "channel1");
-       if (of_child && of_device_is_available(of_child))
-               channels_mask |= BIT(1);        /* Channel 1 */
+       for (i = 0; i < max_channels; ++i) {
+               name[7] = '0' + i;
+               of_child = of_get_child_by_name(pdev->dev.of_node, name);
+               if (of_child && of_device_is_available(of_child))
+                       channels_mask |= BIT(i);
+       }
 
-       if (chip_id == RENESAS_RCAR_GEN3) {
+       if (chip_id != RENESAS_RZG2L) {
                ch_irq = platform_get_irq_byname_optional(pdev, "ch_int");
                if (ch_irq < 0) {
                        /* For backward compatibility get irq by index */
@@ -1806,6 +1885,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
        gpriv->channels_mask = channels_mask;
        gpriv->fdmode = fdmode;
        gpriv->chip_id = chip_id;
+       gpriv->max_channels = max_channels;
 
        if (gpriv->chip_id == RENESAS_RZG2L) {
                gpriv->rstc1 = devm_reset_control_get_exclusive(&pdev->dev, "rstp_n");
@@ -1847,7 +1927,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
        }
        fcan_freq = clk_get_rate(gpriv->can_clk);
 
-       if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id == RENESAS_RCAR_GEN3)
+       if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id != RENESAS_RZG2L)
                /* CANFD clock is further divided by (1/2) within the IP */
                fcan_freq /= 2;
 
@@ -1859,7 +1939,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
        gpriv->base = addr;
 
        /* Request IRQ that's common for both channels */
-       if (gpriv->chip_id == RENESAS_RCAR_GEN3) {
+       if (gpriv->chip_id != RENESAS_RZG2L) {
                err = devm_request_irq(&pdev->dev, ch_irq,
                                       rcar_canfd_channel_interrupt, 0,
                                       "canfd.ch_int", gpriv);
@@ -1925,7 +2005,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
        rcar_canfd_configure_controller(gpriv);
 
        /* Configure per channel attributes */
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+       for_each_set_bit(ch, &gpriv->channels_mask, max_channels) {
                /* Configure Channel's Rx fifo */
                rcar_canfd_configure_rx(gpriv, ch);
 
@@ -1951,7 +2031,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
                goto fail_mode;
        }
 
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+       for_each_set_bit(ch, &gpriv->channels_mask, max_channels) {
                err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq);
                if (err)
                        goto fail_channel;
@@ -1963,7 +2043,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
        return 0;
 
 fail_channel:
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+       for_each_set_bit(ch, &gpriv->channels_mask, max_channels)
                rcar_canfd_channel_remove(gpriv, ch);
 fail_mode:
        rcar_canfd_disable_global_interrupts(gpriv);
@@ -1984,7 +2064,7 @@ static int rcar_canfd_remove(struct platform_device *pdev)
        rcar_canfd_reset_controller(gpriv);
        rcar_canfd_disable_global_interrupts(gpriv);
 
-       for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
                rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
                rcar_canfd_channel_remove(gpriv, ch);
        }
@@ -2014,6 +2094,7 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
 static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
        { .compatible = "renesas,rcar-gen3-canfd", .data = (void *)RENESAS_RCAR_GEN3 },
        { .compatible = "renesas,rzg2l-canfd", .data = (void *)RENESAS_RZG2L },
+       { .compatible = "renesas,r8a779a0-canfd", .data = (void *)RENESAS_R8A779A0 },
        { }
 };
 
index 27783fb..ec294d0 100644 (file)
@@ -221,7 +221,7 @@ static void slc_bump(struct slcan *sl)
        if (!(cf.can_id & CAN_RTR_FLAG))
                sl->dev->stats.rx_bytes += cf.len;
 
-       netif_rx_ni(skb);
+       netif_rx(skb);
 }
 
 /* parse tty input stream */
index 664b8f1..a5b2952 100644 (file)
@@ -356,7 +356,7 @@ static void hi3110_hw_rx(struct spi_device *spi)
 
        can_led_event(priv->net, CAN_LED_EVENT_RX);
 
-       netif_rx_ni(skb);
+       netif_rx(skb);
 }
 
 static void hi3110_hw_sleep(struct spi_device *spi)
@@ -677,7 +677,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
                        tx_state = txerr >= rxerr ? new_state : 0;
                        rx_state = txerr <= rxerr ? new_state : 0;
                        can_change_state(net, cf, tx_state, rx_state);
-                       netif_rx_ni(skb);
+                       netif_rx(skb);
 
                        if (new_state == CAN_STATE_BUS_OFF) {
                                can_bus_off(net);
@@ -718,7 +718,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
                                cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
                                cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
                                netdev_dbg(priv->net, "Bus Error\n");
-                               netif_rx_ni(skb);
+                               netif_rx(skb);
                        }
                }
 
index d23edaf..fc747bf 100644 (file)
@@ -740,7 +740,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
 
        can_led_event(priv->net, CAN_LED_EVENT_RX);
 
-       netif_rx_ni(skb);
+       netif_rx(skb);
 }
 
 static void mcp251x_hw_sleep(struct spi_device *spi)
@@ -987,7 +987,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
        if (skb) {
                frame->can_id |= can_id;
                frame->data[1] = data1;
-               netif_rx_ni(skb);
+               netif_rx(skb);
        } else {
                netdev_err(net, "cannot allocate error skb\n");
        }
index a83d685..94d7de9 100644 (file)
@@ -6,6 +6,8 @@ mcp251xfd-objs :=
 mcp251xfd-objs += mcp251xfd-chip-fifo.o
 mcp251xfd-objs += mcp251xfd-core.o
 mcp251xfd-objs += mcp251xfd-crc16.o
+mcp251xfd-objs += mcp251xfd-ethtool.o
+mcp251xfd-objs += mcp251xfd-ram.o
 mcp251xfd-objs += mcp251xfd-regmap.o
 mcp251xfd-objs += mcp251xfd-ring.o
 mcp251xfd-objs += mcp251xfd-rx.o
index 3da17ca..325024b 100644 (file)
@@ -1598,6 +1598,7 @@ static int mcp251xfd_open(struct net_device *ndev)
                goto out_transceiver_disable;
 
        mcp251xfd_timestamp_init(priv);
+       clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
        can_rx_offload_enable(&priv->offload);
 
        err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
@@ -1618,6 +1619,7 @@ static int mcp251xfd_open(struct net_device *ndev)
        free_irq(spi->irq, priv);
  out_can_rx_offload_disable:
        can_rx_offload_disable(&priv->offload);
+       set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
        mcp251xfd_timestamp_stop(priv);
  out_transceiver_disable:
        mcp251xfd_transceiver_disable(priv);
@@ -1637,6 +1639,8 @@ static int mcp251xfd_stop(struct net_device *ndev)
        struct mcp251xfd_priv *priv = netdev_priv(ndev);
 
        netif_stop_queue(ndev);
+       set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+       hrtimer_cancel(&priv->rx_irq_timer);
        mcp251xfd_chip_interrupts_disable(priv);
        free_irq(ndev->irq, priv);
        can_rx_offload_disable(&priv->offload);
@@ -1871,6 +1875,8 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
        if (err)
                goto out_chip_sleep;
 
+       mcp251xfd_ethtool_init(priv);
+
        err = register_candev(ndev);
        if (err)
                goto out_chip_sleep;
@@ -2034,6 +2040,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
                CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
                CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
                CAN_CTRLMODE_CC_LEN8_DLC;
+       set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
        priv->ndev = ndev;
        priv->spi = spi;
        priv->rx_int = rx_int;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
new file mode 100644 (file)
index 0000000..6c7a57f
--- /dev/null
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+//               Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/ethtool.h>
+
+#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
+
+static void
+mcp251xfd_ring_get_ringparam(struct net_device *ndev,
+                            struct ethtool_ringparam *ring,
+                            struct kernel_ethtool_ringparam *kernel_ring,
+                            struct netlink_ext_ack *extack)
+{
+       const struct mcp251xfd_priv *priv = netdev_priv(ndev);
+       const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+       struct can_ram_layout layout;
+
+       can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+       ring->rx_max_pending = layout.max_rx;
+       ring->tx_max_pending = layout.max_tx;
+
+       ring->rx_pending = priv->rx_obj_num;
+       ring->tx_pending = priv->tx->obj_num;
+}
+
+static int
+mcp251xfd_ring_set_ringparam(struct net_device *ndev,
+                            struct ethtool_ringparam *ring,
+                            struct kernel_ethtool_ringparam *kernel_ring,
+                            struct netlink_ext_ack *extack)
+{
+       struct mcp251xfd_priv *priv = netdev_priv(ndev);
+       const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+       struct can_ram_layout layout;
+
+       can_ram_get_layout(&layout, &mcp251xfd_ram_config, ring, NULL, fd_mode);
+       if ((layout.cur_rx != priv->rx_obj_num ||
+            layout.cur_tx != priv->tx->obj_num) &&
+           netif_running(ndev))
+               return -EBUSY;
+
+       priv->rx_obj_num = layout.cur_rx;
+       priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+       priv->tx->obj_num = layout.cur_tx;
+
+       return 0;
+}
+
+static int mcp251xfd_ring_get_coalesce(struct net_device *ndev,
+                                      struct ethtool_coalesce *ec,
+                                      struct kernel_ethtool_coalesce *kec,
+                                      struct netlink_ext_ack *ext_ack)
+{
+       struct mcp251xfd_priv *priv = netdev_priv(ndev);
+       u32 rx_max_frames, tx_max_frames;
+
+       /* The ethtool doc says:
+        * To disable coalescing, set usecs = 0 and max_frames = 1.
+        */
+       if (priv->rx_obj_num_coalesce_irq == 0)
+               rx_max_frames = 1;
+       else
+               rx_max_frames = priv->rx_obj_num_coalesce_irq;
+
+       ec->rx_max_coalesced_frames_irq = rx_max_frames;
+       ec->rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq;
+
+       if (priv->tx_obj_num_coalesce_irq == 0)
+               tx_max_frames = 1;
+       else
+               tx_max_frames = priv->tx_obj_num_coalesce_irq;
+
+       ec->tx_max_coalesced_frames_irq = tx_max_frames;
+       ec->tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq;
+
+       return 0;
+}
+
+static int mcp251xfd_ring_set_coalesce(struct net_device *ndev,
+                                      struct ethtool_coalesce *ec,
+                                      struct kernel_ethtool_coalesce *kec,
+                                      struct netlink_ext_ack *ext_ack)
+{
+       struct mcp251xfd_priv *priv = netdev_priv(ndev);
+       const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+       const struct ethtool_ringparam ring = {
+               .rx_pending = priv->rx_obj_num,
+               .tx_pending = priv->tx->obj_num,
+       };
+       struct can_ram_layout layout;
+
+       can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, ec, fd_mode);
+
+       if ((layout.rx_coalesce != priv->rx_obj_num_coalesce_irq ||
+            ec->rx_coalesce_usecs_irq != priv->rx_coalesce_usecs_irq ||
+            layout.tx_coalesce != priv->tx_obj_num_coalesce_irq ||
+            ec->tx_coalesce_usecs_irq != priv->tx_coalesce_usecs_irq) &&
+           netif_running(ndev))
+               return -EBUSY;
+
+       priv->rx_obj_num = layout.cur_rx;
+       priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+       priv->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+
+       priv->tx->obj_num = layout.cur_tx;
+       priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+       priv->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+       return 0;
+}
+
+static const struct ethtool_ops mcp251xfd_ethtool_ops = {
+       .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+               ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+               ETHTOOL_COALESCE_TX_USECS_IRQ |
+               ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+       .get_ringparam = mcp251xfd_ring_get_ringparam,
+       .set_ringparam = mcp251xfd_ring_set_ringparam,
+       .get_coalesce = mcp251xfd_ring_get_coalesce,
+       .set_coalesce = mcp251xfd_ring_set_coalesce,
+};
+
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv)
+{
+       struct can_ram_layout layout;
+
+       priv->ndev->ethtool_ops = &mcp251xfd_ethtool_ops;
+
+       can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, false);
+       priv->rx_obj_num = layout.default_rx;
+       priv->tx->obj_num = layout.default_tx;
+
+       priv->rx_obj_num_coalesce_irq = 0;
+       priv->tx_obj_num_coalesce_irq = 0;
+       priv->rx_coalesce_usecs_irq = 0;
+       priv->tx_coalesce_usecs_irq = 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
new file mode 100644 (file)
index 0000000..9e8e82c
--- /dev/null
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+//               Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include "mcp251xfd-ram.h"
+
+static inline u8 can_ram_clamp(const struct can_ram_config *config,
+                              const struct can_ram_obj_config *obj,
+                              u8 val)
+{
+       u8 max;
+
+       max = min_t(u8, obj->max, obj->fifo_num * config->fifo_depth);
+       return clamp(val, obj->min, max);
+}
+
+static u8
+can_ram_rounddown_pow_of_two(const struct can_ram_config *config,
+                            const struct can_ram_obj_config *obj,
+                            const u8 coalesce, u8 val)
+{
+       u8 fifo_num = obj->fifo_num;
+       u8 ret = 0, i;
+
+       val = can_ram_clamp(config, obj, val);
+
+       if (coalesce) {
+               /* Use 1st FIFO for coalescing, if requested.
+                *
+                * Either use complete FIFO (and FIFO Full IRQ) for
+                * coalescing or only half of FIFO (FIFO Half Full
+                * IRQ) and use remaining half for normal objects.
+                */
+               ret = min_t(u8, coalesce * 2, config->fifo_depth);
+               val -= ret;
+               fifo_num--;
+       }
+
+       for (i = 0; i < fifo_num && val; i++) {
+               u8 n;
+
+               n = min_t(u8, rounddown_pow_of_two(val),
+                         config->fifo_depth);
+
+               /* skip small FIFOs */
+               if (n < obj->fifo_depth_min)
+                       return ret;
+
+               ret += n;
+               val -= n;
+       }
+
+       return ret;
+}
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+                       const struct can_ram_config *config,
+                       const struct ethtool_ringparam *ring,
+                       const struct ethtool_coalesce *ec,
+                       const bool fd_mode)
+{
+       u8 num_rx, num_tx;
+       u16 ram_free;
+
+       /* default CAN */
+
+       num_tx = config->tx.def[fd_mode];
+       num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+       ram_free = config->size;
+       ram_free -= config->tx.size[fd_mode] * num_tx;
+
+       num_rx = ram_free / config->rx.size[fd_mode];
+
+       layout->default_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+       layout->default_tx = num_tx;
+
+       /* MAX CAN */
+
+       ram_free = config->size;
+       ram_free -= config->tx.size[fd_mode] * config->tx.min;
+       num_rx = ram_free / config->rx.size[fd_mode];
+
+       ram_free = config->size;
+       ram_free -= config->rx.size[fd_mode] * config->rx.min;
+       num_tx = ram_free / config->tx.size[fd_mode];
+
+       layout->max_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+       layout->max_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+       /* cur CAN */
+
+       if (ring) {
+               u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
+
+               num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
+
+               /* The ethtool doc says:
+                * To disable coalescing, set usecs = 0 and max_frames = 1.
+                */
+               if (ec && !(ec->rx_coalesce_usecs_irq == 0 &&
+                           ec->rx_max_coalesced_frames_irq == 1)) {
+                       u8 max;
+
+                       /* use only max half of available objects for coalescing */
+                       max = min_t(u8, num_rx / 2, config->fifo_depth);
+                       num_rx_coalesce = clamp(ec->rx_max_coalesced_frames_irq,
+                                               (u32)config->rx.fifo_depth_coalesce_min,
+                                               (u32)max);
+                       num_rx_coalesce = rounddown_pow_of_two(num_rx_coalesce);
+
+                       num_rx = can_ram_rounddown_pow_of_two(config, &config->rx,
+                                                             num_rx_coalesce, num_rx);
+               }
+
+               ram_free = config->size - config->rx.size[fd_mode] * num_rx;
+               num_tx = ram_free / config->tx.size[fd_mode];
+               num_tx = min_t(u8, ring->tx_pending, num_tx);
+               num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+               /* The ethtool doc says:
+                * To disable coalescing, set usecs = 0 and max_frames = 1.
+                */
+               if (ec && !(ec->tx_coalesce_usecs_irq == 0 &&
+                           ec->tx_max_coalesced_frames_irq == 1)) {
+                       u8 max;
+
+                       /* use only max half of available objects for coalescing */
+                       max = min_t(u8, num_tx / 2, config->fifo_depth);
+                       num_tx_coalesce = clamp(ec->tx_max_coalesced_frames_irq,
+                                               (u32)config->tx.fifo_depth_coalesce_min,
+                                               (u32)max);
+                       num_tx_coalesce = rounddown_pow_of_two(num_tx_coalesce);
+
+                       num_tx = can_ram_rounddown_pow_of_two(config, &config->tx,
+                                                             num_tx_coalesce, num_tx);
+               }
+
+               layout->cur_rx = num_rx;
+               layout->cur_tx = num_tx;
+               layout->rx_coalesce = num_rx_coalesce;
+               layout->tx_coalesce = num_tx_coalesce;
+       } else {
+               layout->cur_rx = layout->default_rx;
+               layout->cur_tx = layout->default_tx;
+               layout->rx_coalesce = 0;
+               layout->tx_coalesce = 0;
+       }
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
new file mode 100644 (file)
index 0000000..7558c15
--- /dev/null
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ *
+ * Copyright (c) 2021, 2022 Pengutronix,
+ *               Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _MCP251XFD_RAM_H
+#define _MCP251XFD_RAM_H
+
+#include <linux/ethtool.h>
+
+#define CAN_RAM_NUM_MAX (-1)
+
+enum can_ram_mode {
+       CAN_RAM_MODE_CAN,
+       CAN_RAM_MODE_CANFD,
+       __CAN_RAM_MODE_MAX
+};
+
+struct can_ram_obj_config {
+       u8 size[__CAN_RAM_MODE_MAX];
+
+       u8 def[__CAN_RAM_MODE_MAX];
+       u8 min;
+       u8 max;
+
+       u8 fifo_num;
+       u8 fifo_depth_min;
+       u8 fifo_depth_coalesce_min;
+};
+
+struct can_ram_config {
+       const struct can_ram_obj_config rx;
+       const struct can_ram_obj_config tx;
+
+       u16 size;
+       u8 fifo_depth;
+};
+
+struct can_ram_layout {
+       u8 default_rx;
+       u8 default_tx;
+
+       u8 max_rx;
+       u8 max_tx;
+
+       u8 cur_rx;
+       u8 cur_tx;
+
+       u8 rx_coalesce;
+       u8 tx_coalesce;
+};
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+                       const struct can_ram_config *config,
+                       const struct ethtool_ringparam *ring,
+                       const struct ethtool_coalesce *ec,
+                       const bool fd_mode);
+
+#endif
index 848b8b2..bf3f0f1 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/unaligned.h>
 
 #include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
 
 static inline u8
 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
@@ -70,6 +71,17 @@ mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
        /* TEF- and TX-FIFO have same number of objects */
        *base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
 
+       /* FIFO IRQ enable */
+       addr = MCP251XFD_REG_TEFCON;
+       val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
+
+       len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
+                                             addr, val, val);
+       tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
+       tef_ring->irq_enable_xfer.len = len;
+       spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
+                                       &tef_ring->irq_enable_xfer, 1);
+
        /* FIFO increment TEF tail pointer */
        addr = MCP251XFD_REG_TEFCON;
        val = MCP251XFD_REG_TEFCON_UINC;
@@ -93,6 +105,18 @@ mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
         * message.
         */
        xfer->cs_change = 0;
+
+       if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
+               val = MCP251XFD_REG_TEFCON_UINC |
+                       MCP251XFD_REG_TEFCON_TEFOVIE |
+                       MCP251XFD_REG_TEFCON_TEFHIE;
+
+               len = mcp251xfd_cmd_prepare_write_reg(priv,
+                                                     &tef_ring->uinc_irq_disable_buf,
+                                                     addr, val, val);
+               xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
+               xfer->len = len;
+       }
 }
 
 static void
@@ -181,8 +205,18 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
                *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
                *fifo_nr += 1;
 
-               /* FIFO increment RX tail pointer */
+               /* FIFO IRQ enable */
                addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
+               val = MCP251XFD_REG_FIFOCON_RXOVIE |
+                       MCP251XFD_REG_FIFOCON_TFNRFNIE;
+               len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
+                                                     addr, val, val);
+               rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
+               rx_ring->irq_enable_xfer.len = len;
+               spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
+                                               &rx_ring->irq_enable_xfer, 1);
+
+               /* FIFO increment RX tail pointer */
                val = MCP251XFD_REG_FIFOCON_UINC;
                len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
                                                      addr, val, val);
@@ -204,6 +238,39 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
                 * the chip select at the end of the message.
                 */
                xfer->cs_change = 0;
+
+               /* Use 1st RX-FIFO for IRQ coalescing. If enabled
+                * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
+                * is activated), use the last transfer to disable:
+                *
+                * - TFNRFNIE (Receive FIFO Not Empty Interrupt)
+                *
+                * and enable:
+                *
+                * - TFHRFHIE (Receive FIFO Half Full Interrupt)
+                *   - or -
+                * - TFERFFIE (Receive FIFO Full Interrupt)
+                *
+                * depending on rx_max_coalesce_frames_irq.
+                *
+                * The RXOVIE (Overflow Interrupt) is always enabled.
+                */
+               if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
+                                        priv->rx_obj_num_coalesce_irq)) {
+                       val = MCP251XFD_REG_FIFOCON_UINC |
+                               MCP251XFD_REG_FIFOCON_RXOVIE;
+
+                       if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
+                               val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
+                       else if (priv->rx_obj_num_coalesce_irq)
+                               val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
+
+                       len = mcp251xfd_cmd_prepare_write_reg(priv,
+                                                             &rx_ring->uinc_irq_disable_buf,
+                                                             addr, val, val);
+                       xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
+                       xfer->len = len;
+               }
        }
 }
 
@@ -238,19 +305,58 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
         */
        priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
 
-       netdev_dbg(priv->ndev,
-                  "FIFO setup: TEF:         0x%03x: %2d*%zu bytes = %4zu bytes\n",
-                  mcp251xfd_get_tef_obj_addr(0),
-                  priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
-                  priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
+       if (priv->tx_obj_num_coalesce_irq) {
+               netdev_dbg(priv->ndev,
+                          "FIFO setup: TEF:         0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
+                          mcp251xfd_get_tef_obj_addr(0),
+                          priv->tx_obj_num_coalesce_irq,
+                          sizeof(struct mcp251xfd_hw_tef_obj),
+                          priv->tx_obj_num_coalesce_irq *
+                          sizeof(struct mcp251xfd_hw_tef_obj));
 
-       mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
                netdev_dbg(priv->ndev,
-                          "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
-                          rx_ring->nr, rx_ring->fifo_nr,
-                          mcp251xfd_get_rx_obj_addr(rx_ring, 0),
-                          rx_ring->obj_num, rx_ring->obj_size,
-                          rx_ring->obj_num * rx_ring->obj_size);
+                          "                         0x%03x: %2d*%zu bytes = %4zu bytes\n",
+                          mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
+                          priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
+                          sizeof(struct mcp251xfd_hw_tef_obj),
+                          (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
+                          sizeof(struct mcp251xfd_hw_tef_obj));
+       } else {
+               netdev_dbg(priv->ndev,
+                          "FIFO setup: TEF:         0x%03x: %2d*%zu bytes = %4zu bytes\n",
+                          mcp251xfd_get_tef_obj_addr(0),
+                          priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
+                          priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
+       }
+
+       mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+               if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
+                       netdev_dbg(priv->ndev,
+                                  "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
+                                  rx_ring->nr, rx_ring->fifo_nr,
+                                  mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+                                  priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
+                                  priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
+
+                       if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
+                               continue;
+
+                       netdev_dbg(priv->ndev,
+                                  "                         0x%03x: %2u*%u bytes = %4u bytes\n",
+                                  mcp251xfd_get_rx_obj_addr(rx_ring,
+                                                            priv->rx_obj_num_coalesce_irq),
+                                  rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
+                                  rx_ring->obj_size,
+                                  (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
+                                  rx_ring->obj_size);
+               } else {
+                       netdev_dbg(priv->ndev,
+                                  "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+                                  rx_ring->nr, rx_ring->fifo_nr,
+                                  mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+                                  rx_ring->obj_num, rx_ring->obj_size,
+                                  rx_ring->obj_num * rx_ring->obj_size);
+               }
        }
 
        netdev_dbg(priv->ndev,
@@ -261,7 +367,7 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
                   priv->tx->obj_num * priv->tx->obj_size);
 
        netdev_dbg(priv->ndev,
-                  "FIFO setup: free:                             %4u bytes\n",
+                  "FIFO setup: free:                             %4d bytes\n",
                   MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
 
        ram_used = base - MCP251XFD_RAM_START;
@@ -285,40 +391,103 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
        }
 }
 
+static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
+{
+       struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+                                                  rx_irq_timer);
+       struct mcp251xfd_rx_ring *ring = priv->rx[0];
+
+       if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+               return HRTIMER_NORESTART;
+
+       spi_async(priv->spi, &ring->irq_enable_msg);
+
+       return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
+{
+       struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+                                                  tx_irq_timer);
+       struct mcp251xfd_tef_ring *ring = priv->tef;
+
+       if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+               return HRTIMER_NORESTART;
+
+       spi_async(priv->spi, &ring->irq_enable_msg);
+
+       return HRTIMER_NORESTART;
+}
+
+const struct can_ram_config mcp251xfd_ram_config = {
+       .rx = {
+               .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
+               .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
+               .min = MCP251XFD_RX_OBJ_NUM_MIN,
+               .max = MCP251XFD_RX_OBJ_NUM_MAX,
+               .def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
+               .def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
+               .fifo_num = MCP251XFD_FIFO_RX_NUM,
+               .fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
+               .fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
+       },
+       .tx = {
+               .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
+                       sizeof(struct mcp251xfd_hw_tx_obj_can),
+               .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
+                       sizeof(struct mcp251xfd_hw_tx_obj_canfd),
+               .min = MCP251XFD_TX_OBJ_NUM_MIN,
+               .max = MCP251XFD_TX_OBJ_NUM_MAX,
+               .def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
+               .def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
+               .fifo_num = MCP251XFD_FIFO_TX_NUM,
+               .fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
+               .fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
+       },
+       .size = MCP251XFD_RAM_SIZE,
+       .fifo_depth = MCP251XFD_FIFO_DEPTH,
+};
+
 int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
 {
-       struct mcp251xfd_tx_ring *tx_ring;
+       const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+       struct mcp251xfd_tx_ring *tx_ring = priv->tx;
        struct mcp251xfd_rx_ring *rx_ring;
-       int tef_obj_size, tx_obj_size, rx_obj_size;
-       int tx_obj_num;
-       int ram_free, i;
+       u8 tx_obj_size, rx_obj_size;
+       u8 rem, i;
+
+       /* switching from CAN-2.0 to CAN-FD mode or vice versa */
+       if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
+               struct can_ram_layout layout;
+
+               can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+               priv->rx_obj_num = layout.default_rx;
+               tx_ring->obj_num = layout.default_tx;
+       }
 
-       tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
-       if (mcp251xfd_is_fd_mode(priv)) {
-               tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
+       if (fd_mode) {
                tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
                rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
+               set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
        } else {
-               tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
                tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
                rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
+               clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
        }
 
-       tx_ring = priv->tx;
-       tx_ring->obj_num = tx_obj_num;
        tx_ring->obj_size = tx_obj_size;
 
-       ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
-               (tef_obj_size + tx_obj_size);
-
-       for (i = 0;
-            i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
-            i++) {
-               int rx_obj_num;
+       rem = priv->rx_obj_num;
+       for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
+               u8 rx_obj_num;
 
-               rx_obj_num = ram_free / rx_obj_size;
-               rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
-                                MCP251XFD_RX_OBJ_NUM_MAX);
+               if (i == 0 && priv->rx_obj_num_coalesce_irq)
+                       rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
+                                          MCP251XFD_FIFO_DEPTH);
+               else
+                       rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
+                                          MCP251XFD_FIFO_DEPTH);
+               rem -= rx_obj_num;
 
                rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
                                  GFP_KERNEL);
@@ -326,13 +495,18 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
                        mcp251xfd_ring_free(priv);
                        return -ENOMEM;
                }
+
                rx_ring->obj_num = rx_obj_num;
                rx_ring->obj_size = rx_obj_size;
                priv->rx[i] = rx_ring;
-
-               ram_free -= rx_ring->obj_num * rx_ring->obj_size;
        }
        priv->rx_ring_num = i;
 
+       hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
+
+       hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
+
        return 0;
 }
index e6d3987..d09f7fb 100644 (file)
@@ -254,7 +254,11 @@ int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
        int err, n;
 
        mcp251xfd_for_each_rx_ring(priv, ring, n) {
-               if (!(priv->regs_status.rxif & BIT(ring->fifo_nr)))
+               /* - if RX IRQ coalescing is active always handle ring 0
+                * - only handle rings if RX IRQ is active
+                */
+               if ((ring->nr > 0 || !priv->rx_obj_num_coalesce_irq) &&
+                   !(priv->regs_status.rxif & BIT(ring->fifo_nr)))
                        continue;
 
                err = mcp251xfd_handle_rxif_ring(priv, ring);
@@ -262,5 +266,11 @@ int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
                        return err;
        }
 
+       if (priv->rx_coalesce_usecs_irq)
+               hrtimer_start(&priv->rx_irq_timer,
+                             ns_to_ktime(priv->rx_coalesce_usecs_irq *
+                                         NSEC_PER_USEC),
+                             HRTIMER_MODE_REL);
+
        return 0;
 }
index 4061660..237617b 100644 (file)
@@ -256,5 +256,11 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
                netif_wake_queue(priv->ndev);
        }
 
+       if (priv->tx_coalesce_usecs_irq)
+               hrtimer_start(&priv->tx_irq_timer,
+                             ns_to_ktime(priv->tx_coalesce_usecs_irq *
+                                         NSEC_PER_USEC),
+                             HRTIMER_MODE_REL);
+
        return 0;
 }
index 87cc13d..9cb6b5a 100644 (file)
 #define MCP251XFD_REG_DEVID_ID_MASK GENMASK(7, 4)
 #define MCP251XFD_REG_DEVID_REV_MASK GENMASK(3, 0)
 
-/* number of TX FIFO objects, depending on CAN mode
- *
- * FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes
- * FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes
- */
-#define MCP251XFD_RX_OBJ_NUM_MAX 32
-#define MCP251XFD_TX_OBJ_NUM_CAN 8
-#define MCP251XFD_TX_OBJ_NUM_CANFD 4
-
-#if MCP251XFD_TX_OBJ_NUM_CAN > MCP251XFD_TX_OBJ_NUM_CANFD
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CAN
-#else
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CANFD
-#endif
-
-#define MCP251XFD_NAPI_WEIGHT 32
-
 /* SPI commands */
 #define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000
 #define MCP251XFD_SPI_INSTRUCTION_WRITE 0x2000
@@ -404,6 +387,9 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
 #define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
 #define MCP251XFD_POLL_SLEEP_US (10)
 #define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
+
+/* Misc */
+#define MCP251XFD_NAPI_WEIGHT 32
 #define MCP251XFD_SOFTRESET_RETRIES_MAX 3
 #define MCP251XFD_READ_CRC_RETRIES_MAX 3
 #define MCP251XFD_ECC_CNT_MAX 2
@@ -412,12 +398,26 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
 
 /* FIFO and Ring */
 #define MCP251XFD_FIFO_TEF_NUM 1U
-#define MCP251XFD_FIFO_RX_NUM_MAX 1U
+#define MCP251XFD_FIFO_RX_NUM 3U
 #define MCP251XFD_FIFO_TX_NUM 1U
 
+#define MCP251XFD_FIFO_DEPTH 32U
+
+#define MCP251XFD_RX_OBJ_NUM_MIN 16U
+#define MCP251XFD_RX_OBJ_NUM_MAX (MCP251XFD_FIFO_RX_NUM * MCP251XFD_FIFO_DEPTH)
+#define MCP251XFD_RX_FIFO_DEPTH_MIN 4U
+#define MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN 8U
+
+#define MCP251XFD_TX_OBJ_NUM_MIN 2U
+#define MCP251XFD_TX_OBJ_NUM_MAX 16U
+#define MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT 8U
+#define MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT 4U
+#define MCP251XFD_TX_FIFO_DEPTH_MIN 2U
+#define MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN 2U
+
 static_assert(MCP251XFD_FIFO_TEF_NUM == 1U);
 static_assert(MCP251XFD_FIFO_TEF_NUM == MCP251XFD_FIFO_TX_NUM);
-static_assert(MCP251XFD_FIFO_RX_NUM_MAX <= 4U);
+static_assert(MCP251XFD_FIFO_RX_NUM <= 4U);
 
 /* Silence TX MAB overflow warnings */
 #define MCP251XFD_QUIRK_MAB_NO_WARN BIT(0)
@@ -519,7 +519,12 @@ struct mcp251xfd_tef_ring {
        /* u8 obj_num equals tx_ring->obj_num */
        /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
 
+       union mcp251xfd_write_reg_buf irq_enable_buf;
+       struct spi_transfer irq_enable_xfer;
+       struct spi_message irq_enable_msg;
+
        union mcp251xfd_write_reg_buf uinc_buf;
+       union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
        struct spi_transfer uinc_xfer[MCP251XFD_TX_OBJ_NUM_MAX];
 };
 
@@ -547,8 +552,13 @@ struct mcp251xfd_rx_ring {
        u8 obj_num;
        u8 obj_size;
 
+       union mcp251xfd_write_reg_buf irq_enable_buf;
+       struct spi_transfer irq_enable_xfer;
+       struct spi_message irq_enable_msg;
+
        union mcp251xfd_write_reg_buf uinc_buf;
-       struct spi_transfer uinc_xfer[MCP251XFD_RX_OBJ_NUM_MAX];
+       union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
+       struct spi_transfer uinc_xfer[MCP251XFD_FIFO_DEPTH];
        struct mcp251xfd_hw_rx_obj_canfd obj[];
 };
 
@@ -584,6 +594,13 @@ struct mcp251xfd_devtype_data {
        u32 quirks;
 };
 
+enum mcp251xfd_flags {
+       MCP251XFD_FLAGS_DOWN,
+       MCP251XFD_FLAGS_FD_MODE,
+
+       __MCP251XFD_FLAGS_SIZE__
+};
+
 struct mcp251xfd_priv {
        struct can_priv can;
        struct can_rx_offload offload;
@@ -606,10 +623,20 @@ struct mcp251xfd_priv {
        u32 spi_max_speed_hz_slow;
 
        struct mcp251xfd_tef_ring tef[MCP251XFD_FIFO_TEF_NUM];
-       struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM_MAX];
+       struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
        struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
 
+       DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
+
        u8 rx_ring_num;
+       u8 rx_obj_num;
+       u8 rx_obj_num_coalesce_irq;
+       u8 tx_obj_num_coalesce_irq;
+
+       u32 rx_coalesce_usecs_irq;
+       u32 tx_coalesce_usecs_irq;
+       struct hrtimer rx_irq_timer;
+       struct hrtimer tx_irq_timer;
 
        struct mcp251xfd_ecc ecc;
        struct mcp251xfd_regs_status regs_status;
@@ -891,7 +918,9 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv);
 u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
                             const void *data, size_t data_size);
 u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv);
 int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
+extern const struct can_ram_config mcp251xfd_ram_config;
 int mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
 void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
 int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
index 88d2540..c97ffa7 100644 (file)
@@ -173,12 +173,11 @@ static int es58x_fd_rx_event_msg(struct net_device *netdev,
        const struct es58x_fd_rx_event_msg *rx_event_msg;
        int ret;
 
+       rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
        ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len);
        if (ret)
                return ret;
 
-       rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
-
        return es58x_rx_err_msg(netdev, rx_event_msg->error_code,
                                rx_event_msg->event_code,
                                get_unaligned_le64(&rx_event_msg->timestamp));
index d35749f..67408e3 100644 (file)
@@ -9,11 +9,12 @@
  * Many thanks to all socketcan devs!
  */
 
+#include <linux/bitfield.h>
 #include <linux/ethtool.h>
 #include <linux/init.h>
-#include <linux/signal.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
+#include <linux/signal.h>
 #include <linux/usb.h>
 
 #include <linux/can.h>
 #include <linux/can/error.h>
 
 /* Device specific constants */
-#define USB_GSUSB_1_VENDOR_ID      0x1d50
-#define USB_GSUSB_1_PRODUCT_ID     0x606f
+#define USB_GSUSB_1_VENDOR_ID 0x1d50
+#define USB_GSUSB_1_PRODUCT_ID 0x606f
 
-#define USB_CANDLELIGHT_VENDOR_ID  0x1209
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
 #define USB_CANDLELIGHT_PRODUCT_ID 0x2323
 
-#define GSUSB_ENDPOINT_IN          1
-#define GSUSB_ENDPOINT_OUT         2
+#define USB_CES_CANEXT_FD_VENDOR_ID 0x1cd2
+#define USB_CES_CANEXT_FD_PRODUCT_ID 0x606f
+
+#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
+#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
+
+#define GSUSB_ENDPOINT_IN 1
+#define GSUSB_ENDPOINT_OUT 2
 
 /* Device specific constants */
 enum gs_usb_breq {
@@ -40,6 +47,11 @@ enum gs_usb_breq {
        GS_USB_BREQ_DEVICE_CONFIG,
        GS_USB_BREQ_TIMESTAMP,
        GS_USB_BREQ_IDENTIFY,
+       GS_USB_BREQ_GET_USER_ID,
+       GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING = GS_USB_BREQ_GET_USER_ID,
+       GS_USB_BREQ_SET_USER_ID,
+       GS_USB_BREQ_DATA_BITTIMING,
+       GS_USB_BREQ_BT_CONST_EXT,
 };
 
 enum gs_can_mode {
@@ -87,11 +99,18 @@ struct gs_device_config {
        __le32 hw_version;
 } __packed;
 
-#define GS_CAN_MODE_NORMAL               0
-#define GS_CAN_MODE_LISTEN_ONLY          BIT(0)
-#define GS_CAN_MODE_LOOP_BACK            BIT(1)
-#define GS_CAN_MODE_TRIPLE_SAMPLE        BIT(2)
-#define GS_CAN_MODE_ONE_SHOT             BIT(3)
+#define GS_CAN_MODE_NORMAL 0
+#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
+#define GS_CAN_MODE_LOOP_BACK BIT(1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_HW_TIMESTAMP BIT(4)
+/* GS_CAN_FEATURE_IDENTIFY BIT(5) */
+/* GS_CAN_FEATURE_USER_ID BIT(6) */
+#define GS_CAN_MODE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_MODE_FD BIT(8)
+/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */
+/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */
 
 struct gs_device_mode {
        __le32 mode;
@@ -116,12 +135,25 @@ struct gs_identify_mode {
        __le32 mode;
 } __packed;
 
-#define GS_CAN_FEATURE_LISTEN_ONLY      BIT(0)
-#define GS_CAN_FEATURE_LOOP_BACK        BIT(1)
-#define GS_CAN_FEATURE_TRIPLE_SAMPLE    BIT(2)
-#define GS_CAN_FEATURE_ONE_SHOT         BIT(3)
-#define GS_CAN_FEATURE_HW_TIMESTAMP     BIT(4)
-#define GS_CAN_FEATURE_IDENTIFY         BIT(5)
+#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
+#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
+#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
+#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+#define GS_CAN_FEATURE_USER_ID BIT(6)
+#define GS_CAN_FEATURE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_FEATURE_FD BIT(8)
+#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9)
+#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10)
+#define GS_CAN_FEATURE_MASK GENMASK(10, 0)
+
+/* internal quirks - keep in GS_CAN_FEATURE space for now */
+
+/* CANtact Pro original firmware:
+ * BREQ DATA_BITTIMING overlaps with GET_USER_ID
+ */
+#define GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO BIT(31)
 
 struct gs_device_bt_const {
        __le32 feature;
@@ -136,7 +168,50 @@ struct gs_device_bt_const {
        __le32 brp_inc;
 } __packed;
 
-#define GS_CAN_FLAG_OVERFLOW 1
+struct gs_device_bt_const_extended {
+       __le32 feature;
+       __le32 fclk_can;
+       __le32 tseg1_min;
+       __le32 tseg1_max;
+       __le32 tseg2_min;
+       __le32 tseg2_max;
+       __le32 sjw_max;
+       __le32 brp_min;
+       __le32 brp_max;
+       __le32 brp_inc;
+
+       __le32 dtseg1_min;
+       __le32 dtseg1_max;
+       __le32 dtseg2_min;
+       __le32 dtseg2_max;
+       __le32 dsjw_max;
+       __le32 dbrp_min;
+       __le32 dbrp_max;
+       __le32 dbrp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW BIT(0)
+#define GS_CAN_FLAG_FD BIT(1)
+#define GS_CAN_FLAG_BRS BIT(2)
+#define GS_CAN_FLAG_ESI BIT(3)
+
+struct classic_can {
+       u8 data[8];
+} __packed;
+
+struct classic_can_quirk {
+       u8 data[8];
+       u8 quirk;
+} __packed;
+
+struct canfd {
+       u8 data[64];
+} __packed;
+
+struct canfd_quirk {
+       u8 data[64];
+       u8 quirk;
+} __packed;
 
 struct gs_host_frame {
        u32 echo_id;
@@ -147,7 +222,12 @@ struct gs_host_frame {
        u8 flags;
        u8 reserved;
 
-       u8 data[8];
+       union {
+               DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
+               DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk);
+               DECLARE_FLEX_ARRAY(struct canfd, canfd);
+               DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk);
+       };
 } __packed;
 /* The GS USB devices make use of the same flags and masks as in
  * linux/can.h and linux/can/error.h, and no additional mapping is necessary.
@@ -158,9 +238,9 @@ struct gs_host_frame {
 /* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
 #define GS_MAX_RX_URBS 30
 /* Maximum number of interfaces the driver supports per device.
- * Current hardware only supports 2 interfaces. The future may vary.
+ * Current hardware only supports 3 interfaces. The future may vary.
  */
-#define GS_MAX_INTF 2
+#define GS_MAX_INTF 3
 
 struct gs_tx_context {
        struct gs_can *dev;
@@ -176,9 +256,12 @@ struct gs_can {
        struct usb_device *udev;
        struct usb_interface *iface;
 
-       struct can_bittiming_const bt_const;
+       struct can_bittiming_const bt_const, data_bt_const;
        unsigned int channel;   /* channel number */
 
+       u32 feature;
+       unsigned int hf_size_tx;
+
        /* This lock prevents a race condition between xmit and receive. */
        spinlock_t tx_ctx_lock;
        struct gs_tx_context tx_context[GS_MAX_TX_URBS];
@@ -192,6 +275,7 @@ struct gs_usb {
        struct gs_can *canch[GS_MAX_INTF];
        struct usb_anchor rx_submitted;
        struct usb_device *udev;
+       unsigned int hf_size_rx;
        u8 active_channels;
 };
 
@@ -258,11 +342,7 @@ static int gs_cmd_reset(struct gs_can *gsdev)
                             usb_sndctrlpipe(interface_to_usbdev(intf), 0),
                             GS_USB_BREQ_MODE,
                             USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
-                            gsdev->channel,
-                            0,
-                            dm,
-                            sizeof(*dm),
-                            1000);
+                            gsdev->channel, 0, dm, sizeof(*dm), 1000);
 
        kfree(dm);
 
@@ -304,6 +384,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
        struct gs_host_frame *hf = urb->transfer_buffer;
        struct gs_tx_context *txc;
        struct can_frame *cf;
+       struct canfd_frame *cfd;
        struct sk_buff *skb;
 
        BUG_ON(!usbcan);
@@ -332,18 +413,33 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
                return;
 
        if (hf->echo_id == -1) { /* normal rx */
-               skb = alloc_can_skb(dev->netdev, &cf);
-               if (!skb)
-                       return;
+               if (hf->flags & GS_CAN_FLAG_FD) {
+                       skb = alloc_canfd_skb(dev->netdev, &cfd);
+                       if (!skb)
+                               return;
+
+                       cfd->can_id = le32_to_cpu(hf->can_id);
+                       cfd->len = can_fd_dlc2len(hf->can_dlc);
+                       if (hf->flags & GS_CAN_FLAG_BRS)
+                               cfd->flags |= CANFD_BRS;
+                       if (hf->flags & GS_CAN_FLAG_ESI)
+                               cfd->flags |= CANFD_ESI;
+
+                       memcpy(cfd->data, hf->canfd->data, cfd->len);
+               } else {
+                       skb = alloc_can_skb(dev->netdev, &cf);
+                       if (!skb)
+                               return;
 
-               cf->can_id = le32_to_cpu(hf->can_id);
+                       cf->can_id = le32_to_cpu(hf->can_id);
+                       can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
 
-               can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
-               memcpy(cf->data, hf->data, 8);
+                       memcpy(cf->data, hf->classic_can->data, 8);
 
-               /* ERROR frames tell us information about the controller */
-               if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
-                       gs_update_state(dev, cf);
+                       /* ERROR frames tell us information about the controller */
+                       if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
+                               gs_update_state(dev, cf);
+               }
 
                netdev->stats.rx_packets++;
                netdev->stats.rx_bytes += hf->can_dlc;
@@ -392,14 +488,10 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
        }
 
  resubmit_urb:
-       usb_fill_bulk_urb(urb,
-                         usbcan->udev,
+       usb_fill_bulk_urb(urb, usbcan->udev,
                          usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
-                         hf,
-                         sizeof(struct gs_host_frame),
-                         gs_usb_receive_bulk_callback,
-                         usbcan
-                         );
+                         hf, dev->parent->hf_size_rx,
+                         gs_usb_receive_bulk_callback, usbcan);
 
        rc = usb_submit_urb(urb, GFP_ATOMIC);
 
@@ -436,11 +528,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
                             usb_sndctrlpipe(interface_to_usbdev(intf), 0),
                             GS_USB_BREQ_BITTIMING,
                             USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
-                            dev->channel,
-                            0,
-                            dbt,
-                            sizeof(*dbt),
-                            1000);
+                            dev->channel, 0, dbt, sizeof(*dbt), 1000);
 
        kfree(dbt);
 
@@ -451,6 +539,44 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
        return (rc > 0) ? 0 : rc;
 }
 
+static int gs_usb_set_data_bittiming(struct net_device *netdev)
+{
+       struct gs_can *dev = netdev_priv(netdev);
+       struct can_bittiming *bt = &dev->can.data_bittiming;
+       struct usb_interface *intf = dev->iface;
+       struct gs_device_bittiming *dbt;
+       u8 request = GS_USB_BREQ_DATA_BITTIMING;
+       int rc;
+
+       dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
+       if (!dbt)
+               return -ENOMEM;
+
+       dbt->prop_seg = cpu_to_le32(bt->prop_seg);
+       dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
+       dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
+       dbt->sjw = cpu_to_le32(bt->sjw);
+       dbt->brp = cpu_to_le32(bt->brp);
+
+       if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO)
+               request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING;
+
+       /* request bit timings */
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+                            request,
+                            USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+                            dev->channel, 0, dbt, sizeof(*dbt), 1000);
+
+       kfree(dbt);
+
+       if (rc < 0)
+               dev_err(netdev->dev.parent,
+                       "Couldn't set data bittimings (err=%d)", rc);
+
+       return (rc > 0) ? 0 : rc;
+}
+
 static void gs_usb_xmit_callback(struct urb *urb)
 {
        struct gs_tx_context *txc = urb->context;
@@ -460,10 +586,8 @@ static void gs_usb_xmit_callback(struct urb *urb)
        if (urb->status)
                netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
 
-       usb_free_coherent(urb->dev,
-                         urb->transfer_buffer_length,
-                         urb->transfer_buffer,
-                         urb->transfer_dma);
+       usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+                         urb->transfer_buffer, urb->transfer_dma);
 }
 
 static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -474,6 +598,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
        struct urb *urb;
        struct gs_host_frame *hf;
        struct can_frame *cf;
+       struct canfd_frame *cfd;
        int rc;
        unsigned int idx;
        struct gs_tx_context *txc;
@@ -491,7 +616,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
        if (!urb)
                goto nomem_urb;
 
-       hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
+       hf = usb_alloc_coherent(dev->udev, dev->hf_size_tx, GFP_ATOMIC,
                                &urb->transfer_dma);
        if (!hf) {
                netdev_err(netdev, "No memory left for USB buffer\n");
@@ -510,19 +635,31 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
        hf->flags = 0;
        hf->reserved = 0;
 
-       cf = (struct can_frame *)skb->data;
+       if (can_is_canfd_skb(skb)) {
+               cfd = (struct canfd_frame *)skb->data;
 
-       hf->can_id = cpu_to_le32(cf->can_id);
-       hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+               hf->can_id = cpu_to_le32(cfd->can_id);
+               hf->can_dlc = can_fd_len2dlc(cfd->len);
+               hf->flags |= GS_CAN_FLAG_FD;
+               if (cfd->flags & CANFD_BRS)
+                       hf->flags |= GS_CAN_FLAG_BRS;
+               if (cfd->flags & CANFD_ESI)
+                       hf->flags |= GS_CAN_FLAG_ESI;
 
-       memcpy(hf->data, cf->data, cf->len);
+               memcpy(hf->canfd->data, cfd->data, cfd->len);
+       } else {
+               cf = (struct can_frame *)skb->data;
+
+               hf->can_id = cpu_to_le32(cf->can_id);
+               hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+
+               memcpy(hf->classic_can->data, cf->data, cf->len);
+       }
 
        usb_fill_bulk_urb(urb, dev->udev,
                          usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
-                         hf,
-                         sizeof(*hf),
-                         gs_usb_xmit_callback,
-                         txc);
+                         hf, dev->hf_size_tx,
+                         gs_usb_xmit_callback, txc);
 
        urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
        usb_anchor_urb(urb, &dev->tx_submitted);
@@ -539,10 +676,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
                gs_free_tx_context(txc);
 
                usb_unanchor_urb(urb);
-               usb_free_coherent(dev->udev,
-                                 sizeof(*hf),
-                                 hf,
-                                 urb->transfer_dma);
+               usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+                                 urb->transfer_buffer, urb->transfer_dma);
 
                if (rc == -ENODEV) {
                        netif_device_detach(netdev);
@@ -562,10 +697,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 
  badidx:
-       usb_free_coherent(dev->udev,
-                         sizeof(*hf),
-                         hf,
-                         urb->transfer_dma);
+       usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+                         urb->transfer_buffer, urb->transfer_dma);
  nomem_hf:
        usb_free_urb(urb);
 
@@ -582,6 +715,7 @@ static int gs_can_open(struct net_device *netdev)
        struct gs_usb *parent = dev->parent;
        int rc, i;
        struct gs_device_mode *dm;
+       struct gs_host_frame *hf;
        u32 ctrlmode;
        u32 flags = 0;
 
@@ -589,6 +723,21 @@ static int gs_can_open(struct net_device *netdev)
        if (rc)
                return rc;
 
+       ctrlmode = dev->can.ctrlmode;
+       if (ctrlmode & CAN_CTRLMODE_FD) {
+               flags |= GS_CAN_MODE_FD;
+
+               if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+                       dev->hf_size_tx = struct_size(hf, canfd_quirk, 1);
+               else
+                       dev->hf_size_tx = struct_size(hf, canfd, 1);
+       } else {
+               if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+                       dev->hf_size_tx = struct_size(hf, classic_can_quirk, 1);
+               else
+                       dev->hf_size_tx = struct_size(hf, classic_can, 1);
+       }
+
        if (!parent->active_channels) {
                for (i = 0; i < GS_MAX_RX_URBS; i++) {
                        struct urb *urb;
@@ -601,7 +750,7 @@ static int gs_can_open(struct net_device *netdev)
 
                        /* alloc rx buffer */
                        buf = usb_alloc_coherent(dev->udev,
-                                                sizeof(struct gs_host_frame),
+                                                dev->parent->hf_size_rx,
                                                 GFP_KERNEL,
                                                 &urb->transfer_dma);
                        if (!buf) {
@@ -617,9 +766,8 @@ static int gs_can_open(struct net_device *netdev)
                                          usb_rcvbulkpipe(dev->udev,
                                                          GSUSB_ENDPOINT_IN),
                                          buf,
-                                         sizeof(struct gs_host_frame),
-                                         gs_usb_receive_bulk_callback,
-                                         parent);
+                                         dev->parent->hf_size_rx,
+                                         gs_usb_receive_bulk_callback, parent);
                        urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
                        usb_anchor_urb(urb, &parent->rx_submitted);
@@ -630,8 +778,7 @@ static int gs_can_open(struct net_device *netdev)
                                        netif_device_detach(dev->netdev);
 
                                netdev_err(netdev,
-                                          "usb_submit failed (err=%d)\n",
-                                          rc);
+                                          "usb_submit failed (err=%d)\n", rc);
 
                                usb_unanchor_urb(urb);
                                usb_free_urb(urb);
@@ -650,8 +797,6 @@ static int gs_can_open(struct net_device *netdev)
                return -ENOMEM;
 
        /* flags */
-       ctrlmode = dev->can.ctrlmode;
-
        if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
                flags |= GS_CAN_MODE_LOOP_BACK;
        else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -672,13 +817,8 @@ static int gs_can_open(struct net_device *netdev)
        rc = usb_control_msg(interface_to_usbdev(dev->iface),
                             usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
                             GS_USB_BREQ_MODE,
-                            USB_DIR_OUT | USB_TYPE_VENDOR |
-                            USB_RECIP_INTERFACE,
-                            dev->channel,
-                            0,
-                            dm,
-                            sizeof(*dm),
-                            1000);
+                            USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+                            dev->channel, 0, dm, sizeof(*dm), 1000);
 
        if (rc < 0) {
                netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
@@ -755,16 +895,10 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
                imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
 
        rc = usb_control_msg(interface_to_usbdev(dev->iface),
-                            usb_sndctrlpipe(interface_to_usbdev(dev->iface),
-                                            0),
+                            usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
                             GS_USB_BREQ_IDENTIFY,
-                            USB_DIR_OUT | USB_TYPE_VENDOR |
-                            USB_RECIP_INTERFACE,
-                            dev->channel,
-                            0,
-                            imode,
-                            sizeof(*imode),
-                            100);
+                            USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+                            dev->channel, 0, imode, sizeof(*imode), 100);
 
        kfree(imode);
 
@@ -803,6 +937,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
        struct net_device *netdev;
        int rc;
        struct gs_device_bt_const *bt_const;
+       struct gs_device_bt_const_extended *bt_const_extended;
        u32 feature;
 
        bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
@@ -814,11 +949,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
                             usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
                             GS_USB_BREQ_BT_CONST,
                             USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
-                            channel,
-                            0,
-                            bt_const,
-                            sizeof(*bt_const),
-                            1000);
+                            channel, 0, bt_const, sizeof(*bt_const), 1000);
 
        if (rc < 0) {
                dev_err(&intf->dev,
@@ -875,6 +1006,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
        dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
 
        feature = le32_to_cpu(bt_const->feature);
+       dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature);
        if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
                dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
 
@@ -887,7 +1019,37 @@ static struct gs_can *gs_make_candev(unsigned int channel,
        if (feature & GS_CAN_FEATURE_ONE_SHOT)
                dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
 
-       SET_NETDEV_DEV(netdev, &intf->dev);
+       if (feature & GS_CAN_FEATURE_FD) {
+               dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+               /* The data bit timing will be overwritten, if
+                * GS_CAN_FEATURE_BT_CONST_EXT is set.
+                */
+               dev->can.data_bittiming_const = &dev->bt_const;
+               dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
+       }
+
+       /* The CANtact Pro from LinkLayer Labs is based on the
+        * LPC54616 ÂµC, which is affected by the NXP LPC USB transfer
+        * erratum. However, the current firmware (version 2) doesn't
+        * set the GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX bit. Set the
+        * feature GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX to workaround
+        * this issue.
+        *
+        * For the GS_USB_BREQ_DATA_BITTIMING USB control message the
+        * CANtact Pro firmware uses a request value, which is already
+        * used by the candleLight firmware for a different purpose
+        * (GS_USB_BREQ_GET_USER_ID). Set the feature
+        * GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this
+        * issue.
+        */
+       if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GSUSB_1_VENDOR_ID) &&
+           dev->udev->descriptor.idProduct == cpu_to_le16(USB_GSUSB_1_PRODUCT_ID) &&
+           dev->udev->manufacturer && dev->udev->product &&
+           !strcmp(dev->udev->manufacturer, "LinkLayer Labs") &&
+           !strcmp(dev->udev->product, "CANtact Pro") &&
+           (le32_to_cpu(dconf->sw_version) <= 2))
+               dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
+                       GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
 
        if (le32_to_cpu(dconf->sw_version) > 1)
                if (feature & GS_CAN_FEATURE_IDENTIFY)
@@ -895,6 +1057,45 @@ static struct gs_can *gs_make_candev(unsigned int channel,
 
        kfree(bt_const);
 
+       /* fetch extended bit timing constants if device has feature
+        * GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT
+        */
+       if (feature & GS_CAN_FEATURE_FD &&
+           feature & GS_CAN_FEATURE_BT_CONST_EXT) {
+               bt_const_extended = kmalloc(sizeof(*bt_const_extended), GFP_KERNEL);
+               if (!bt_const_extended)
+                       return ERR_PTR(-ENOMEM);
+
+               rc = usb_control_msg(interface_to_usbdev(intf),
+                                    usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+                                    GS_USB_BREQ_BT_CONST_EXT,
+                                    USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+                                    channel, 0, bt_const_extended,
+                                    sizeof(*bt_const_extended),
+                                    1000);
+               if (rc < 0) {
+                       dev_err(&intf->dev,
+                               "Couldn't get extended bit timing const for channel (err=%d)\n",
+                               rc);
+                       kfree(bt_const_extended);
+                       return ERR_PTR(rc);
+               }
+
+               strcpy(dev->data_bt_const.name, "gs_usb");
+               dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended->dtseg1_min);
+               dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended->dtseg1_max);
+               dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended->dtseg2_min);
+               dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended->dtseg2_max);
+               dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended->dsjw_max);
+               dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended->dbrp_min);
+               dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended->dbrp_max);
+               dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended->dbrp_inc);
+
+               dev->can.data_bittiming_const = &dev->data_bt_const;
+       }
+
+       SET_NETDEV_DEV(netdev, &intf->dev);
+
        rc = register_candev(dev->netdev);
        if (rc) {
                free_candev(dev->netdev);
@@ -915,6 +1116,8 @@ static void gs_destroy_candev(struct gs_can *dev)
 static int gs_usb_probe(struct usb_interface *intf,
                        const struct usb_device_id *id)
 {
+       struct usb_device *udev = interface_to_usbdev(intf);
+       struct gs_host_frame *hf;
        struct gs_usb *dev;
        int rc = -ENOMEM;
        unsigned int icount, i;
@@ -928,21 +1131,16 @@ static int gs_usb_probe(struct usb_interface *intf,
        hconf->byte_order = cpu_to_le32(0x0000beef);
 
        /* send host config */
-       rc = usb_control_msg(interface_to_usbdev(intf),
-                            usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+       rc = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                             GS_USB_BREQ_HOST_FORMAT,
                             USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
-                            1,
-                            intf->cur_altsetting->desc.bInterfaceNumber,
-                            hconf,
-                            sizeof(*hconf),
-                            1000);
+                            1, intf->cur_altsetting->desc.bInterfaceNumber,
+                            hconf, sizeof(*hconf), 1000);
 
        kfree(hconf);
 
        if (rc < 0) {
-               dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
-                       rc);
+               dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc);
                return rc;
        }
 
@@ -951,15 +1149,11 @@ static int gs_usb_probe(struct usb_interface *intf,
                return -ENOMEM;
 
        /* read device config */
-       rc = usb_control_msg(interface_to_usbdev(intf),
-                            usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+       rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
                             GS_USB_BREQ_DEVICE_CONFIG,
                             USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
-                            1,
-                            intf->cur_altsetting->desc.bInterfaceNumber,
-                            dconf,
-                            sizeof(*dconf),
-                            1000);
+                            1, intf->cur_altsetting->desc.bInterfaceNumber,
+                            dconf, sizeof(*dconf), 1000);
        if (rc < 0) {
                dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
                        rc);
@@ -985,9 +1179,13 @@ static int gs_usb_probe(struct usb_interface *intf,
        }
 
        init_usb_anchor(&dev->rx_submitted);
+       /* default to classic CAN, switch to CAN-FD if at least one of
+        * our channels support CAN-FD.
+        */
+       dev->hf_size_rx = struct_size(hf, classic_can, 1);
 
        usb_set_intfdata(intf, dev);
-       dev->udev = interface_to_usbdev(intf);
+       dev->udev = udev;
 
        for (i = 0; i < icount; i++) {
                dev->canch[i] = gs_make_candev(i, intf, dconf);
@@ -1006,6 +1204,9 @@ static int gs_usb_probe(struct usb_interface *intf,
                        return rc;
                }
                dev->canch[i]->parent = dev;
+
+               if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD)
+                       dev->hf_size_rx = struct_size(hf, canfd, 1);
        }
 
        kfree(dconf);
@@ -1015,8 +1216,9 @@ static int gs_usb_probe(struct usb_interface *intf,
 
 static void gs_usb_disconnect(struct usb_interface *intf)
 {
-       unsigned i;
        struct gs_usb *dev = usb_get_intfdata(intf);
+       unsigned int i;
+
        usb_set_intfdata(intf, NULL);
 
        if (!dev) {
@@ -1037,16 +1239,20 @@ static const struct usb_device_id gs_usb_table[] = {
                                      USB_GSUSB_1_PRODUCT_ID, 0) },
        { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
                                      USB_CANDLELIGHT_PRODUCT_ID, 0) },
+       { USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID,
+                                     USB_CES_CANEXT_FD_PRODUCT_ID, 0) },
+       { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID,
+                                     USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
        {} /* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE(usb, gs_usb_table);
 
 static struct usb_driver gs_usb_driver = {
-       .name       = "gs_usb",
-       .probe      = gs_usb_probe,
+       .name = "gs_usb",
+       .probe = gs_usb_probe,
        .disconnect = gs_usb_disconnect,
-       .id_table   = gs_usb_table,
+       .id_table = gs_usb_table,
 };
 
 module_usb_driver(gs_usb_driver);
index c7c41d1..5ae0d7c 100644 (file)
@@ -1392,7 +1392,7 @@ static int ucan_probe(struct usb_interface *intf,
         * Stage 3 for the final driver initialisation.
         */
 
-       /* Prepare Memory for control transferes */
+       /* Prepare Memory for control transfers */
        ctl_msg_buffer = devm_kzalloc(&udev->dev,
                                      sizeof(union ucan_ctl_payload),
                                      GFP_KERNEL);
@@ -1526,7 +1526,7 @@ static int ucan_probe(struct usb_interface *intf,
        ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
                                     sizeof(union ucan_ctl_payload));
        if (ret > 0) {
-               /* copy string while ensuring zero terminiation */
+               /* copy string while ensuring zero termination */
                strncpy(firmware_str, up->ctl_msg_buffer->raw,
                        sizeof(union ucan_ctl_payload));
                firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
index c42f188..a15619d 100644 (file)
@@ -80,7 +80,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
        skb->dev       = dev;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       netif_rx_ni(skb);
+       netif_rx(skb);
 }
 
 static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
index 47ccc15..577a803 100644 (file)
@@ -33,28 +33,33 @@ struct vxcan_priv {
        struct net_device __rcu *peer;
 };
 
-static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
 {
        struct vxcan_priv *priv = netdev_priv(dev);
        struct net_device *peer;
-       struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+       struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
        struct net_device_stats *peerstats, *srcstats = &dev->stats;
+       struct sk_buff *skb;
        u8 len;
 
-       if (can_dropped_invalid_skb(dev, skb))
+       if (can_dropped_invalid_skb(dev, oskb))
                return NETDEV_TX_OK;
 
        rcu_read_lock();
        peer = rcu_dereference(priv->peer);
        if (unlikely(!peer)) {
-               kfree_skb(skb);
+               kfree_skb(oskb);
                dev->stats.tx_dropped++;
                goto out_unlock;
        }
 
-       skb = can_create_echo_skb(skb);
-       if (!skb)
+       skb = skb_clone(oskb, GFP_ATOMIC);
+       if (skb) {
+               consume_skb(oskb);
+       } else {
+               kfree_skb(oskb);
                goto out_unlock;
+       }
 
        /* reset CAN GW hop counter */
        skb->csum_start = 0;
@@ -63,7 +68,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
        skb->ip_summed  = CHECKSUM_UNNECESSARY;
 
        len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
-       if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
+       if (netif_rx(skb) == NET_RX_SUCCESS) {
                srcstats->tx_packets++;
                srcstats->tx_bytes += len;
                peerstats = &peer->stats;
@@ -148,7 +153,7 @@ static void vxcan_setup(struct net_device *dev)
        dev->hard_header_len    = 0;
        dev->addr_len           = 0;
        dev->tx_queue_len       = 0;
-       dev->flags              = (IFF_NOARP|IFF_ECHO);
+       dev->flags              = IFF_NOARP;
        dev->netdev_ops         = &vxcan_netdev_ops;
        dev->needs_free_netdev  = true;
 
index 122e637..77501f9 100644 (file)
@@ -2110,7 +2110,8 @@ out:
 EXPORT_SYMBOL(b53_get_tag_protocol);
 
 int b53_mirror_add(struct dsa_switch *ds, int port,
-                  struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
+                  struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+                  struct netlink_ext_ack *extack)
 {
        struct b53_device *dev = ds->priv;
        u16 reg, loc;
index 86e7eb7..3085b6c 100644 (file)
@@ -373,7 +373,8 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
                const struct switchdev_obj_port_mdb *mdb,
                struct dsa_db db);
 int b53_mirror_add(struct dsa_switch *ds, int port,
-                  struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+                  struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+                  struct netlink_ext_ack *extack);
 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
                                           enum dsa_tag_protocol mprot);
 void b53_mirror_del(struct dsa_switch *ds, int port,
index a8bd233..a416240 100644 (file)
 #define  GSWIP_MAC_CTRL_0_GMII_MII     0x0001
 #define  GSWIP_MAC_CTRL_0_GMII_RGMII   0x0002
 #define GSWIP_MAC_CTRL_2p(p)           (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL         BIT(2) /* Frame Length Check Long Enable */
 #define GSWIP_MAC_CTRL_2_MLEN          BIT(3) /* Maximum Untagged Frame Lnegth */
 
 /* Ethernet Switch Fetch DMA Port Control Register */
 
 #define XRX200_GPHY_FW_ALIGN   (16 * 1024)
 
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH        2400
+
 struct gswip_hw_info {
        int max_ports;
        int cpu_port;
@@ -863,10 +873,6 @@ static int gswip_setup(struct dsa_switch *ds)
        gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
                          GSWIP_PCE_PCTRL_0p(cpu_port));
 
-       gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
-                         GSWIP_MAC_CTRL_2p(cpu_port));
-       gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
-                      GSWIP_MAC_FLEN);
        gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
                          GSWIP_BM_QUEUE_GCTRL);
 
@@ -883,6 +889,8 @@ static int gswip_setup(struct dsa_switch *ds)
                return err;
        }
 
+       ds->mtu_enforcement_ingress = true;
+
        gswip_port_enable(ds, cpu_port, NULL);
 
        ds->configure_vlan_while_not_filtering = false;
@@ -1449,6 +1457,39 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
        return 0;
 }
 
+static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
+{
+       /* Includes 8 bytes for special header. */
+       return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+       struct gswip_priv *priv = ds->priv;
+       int cpu_port = priv->hw_info->cpu_port;
+
+       /* CPU port always has maximum mtu of user ports, so use it to set
+        * switch frame size, including 8 byte special header.
+        */
+       if (port == cpu_port) {
+               new_mtu += 8;
+               gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
+                              GSWIP_MAC_FLEN);
+       }
+
+       /* Enable MLEN for ports with non-standard MTUs, including the special
+        * header on the CPU port added above.
+        */
+       if (new_mtu != ETH_DATA_LEN)
+               gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
+                                 GSWIP_MAC_CTRL_2p(port));
+       else
+               gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
+                                 GSWIP_MAC_CTRL_2p(port));
+
+       return 0;
+}
+
 static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
                                          struct phylink_config *config)
 {
@@ -1794,6 +1835,8 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
        .port_fdb_add           = gswip_port_fdb_add,
        .port_fdb_del           = gswip_port_fdb_del,
        .port_fdb_dump          = gswip_port_fdb_dump,
+       .port_change_mtu        = gswip_port_change_mtu,
+       .port_max_mtu           = gswip_port_max_mtu,
        .phylink_get_caps       = gswip_xrx200_phylink_get_caps,
        .phylink_mac_config     = gswip_phylink_mac_config,
        .phylink_mac_link_down  = gswip_phylink_mac_link_down,
@@ -1818,6 +1861,8 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
        .port_fdb_add           = gswip_port_fdb_add,
        .port_fdb_del           = gswip_port_fdb_del,
        .port_fdb_dump          = gswip_port_fdb_dump,
+       .port_change_mtu        = gswip_port_change_mtu,
+       .port_max_mtu           = gswip_port_max_mtu,
        .phylink_get_caps       = gswip_xrx300_phylink_get_caps,
        .phylink_mac_config     = gswip_phylink_mac_config,
        .phylink_mac_link_down  = gswip_phylink_mac_link_down,
index 9d61189..03da369 100644 (file)
@@ -16,6 +16,7 @@ enum ksz_regs {
        REG_IND_DATA_HI,
        REG_IND_DATA_LO,
        REG_IND_MIB_CHECK,
+       REG_IND_BYTE,
        P_FORCE_CTRL,
        P_LINK_STATUS,
        P_LOCAL_CTRL,
index 5dc9899..b275297 100644 (file)
@@ -33,6 +33,7 @@ static const u8 ksz8795_regs[] = {
        [REG_IND_DATA_HI]               = 0x71,
        [REG_IND_DATA_LO]               = 0x75,
        [REG_IND_MIB_CHECK]             = 0x74,
+       [REG_IND_BYTE]                  = 0xA0,
        [P_FORCE_CTRL]                  = 0x0C,
        [P_LINK_STATUS]                 = 0x0E,
        [P_LOCAL_CTRL]                  = 0x07,
@@ -222,6 +223,25 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
                           bits, set ? bits : 0);
 }
 
+static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
+{
+       struct ksz8 *ksz8 = dev->priv;
+       const u8 *regs = ksz8->regs;
+       u16 ctrl_addr;
+       int ret = 0;
+
+       mutex_lock(&dev->alu_mutex);
+
+       ctrl_addr = IND_ACC_TABLE(table) | addr;
+       ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+       if (!ret)
+               ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+       mutex_unlock(&dev->alu_mutex);
+
+       return ret;
+}
+
 static int ksz8_reset_switch(struct ksz_device *dev)
 {
        if (ksz_is_ksz88x3(dev)) {
@@ -1213,7 +1233,7 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
 
 static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
                                struct dsa_mall_mirror_tc_entry *mirror,
-                               bool ingress)
+                               bool ingress, struct netlink_ext_ack *extack)
 {
        struct ksz_device *dev = ds->priv;
 
@@ -1391,6 +1411,23 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
        }
 }
 
+static int ksz8_handle_global_errata(struct dsa_switch *ds)
+{
+       struct ksz_device *dev = ds->priv;
+       int ret = 0;
+
+       /* KSZ87xx Errata DS80000687C.
+        * Module 2: Link drops with some EEE link partners.
+        *   An issue with the EEE next page exchange between the
+        *   KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in
+        *   the link dropping.
+        */
+       if (dev->ksz87xx_eee_link_erratum)
+               ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0);
+
+       return ret;
+}
+
 static int ksz8_setup(struct dsa_switch *ds)
 {
        struct ksz_device *dev = ds->priv;
@@ -1458,7 +1495,7 @@ static int ksz8_setup(struct dsa_switch *ds)
 
        ds->configure_vlan_while_not_filtering = false;
 
-       return 0;
+       return ksz8_handle_global_errata(ds);
 }
 
 static void ksz8_get_caps(struct dsa_switch *ds, int port,
@@ -1575,6 +1612,7 @@ struct ksz_chip_data {
        int num_statics;
        int cpu_ports;
        int port_cnt;
+       bool ksz87xx_eee_link_erratum;
 };
 
 static const struct ksz_chip_data ksz8_switch_chips[] = {
@@ -1586,6 +1624,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = {
                .num_statics = 8,
                .cpu_ports = 0x10,      /* can be configured as cpu port */
                .port_cnt = 5,          /* total cpu and user ports */
+               .ksz87xx_eee_link_erratum = true,
        },
        {
                /*
@@ -1609,6 +1648,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = {
                .num_statics = 8,
                .cpu_ports = 0x10,      /* can be configured as cpu port */
                .port_cnt = 4,          /* total cpu and user ports */
+               .ksz87xx_eee_link_erratum = true,
        },
        {
                .chip_id = 0x8765,
@@ -1618,6 +1658,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = {
                .num_statics = 8,
                .cpu_ports = 0x10,      /* can be configured as cpu port */
                .port_cnt = 5,          /* total cpu and user ports */
+               .ksz87xx_eee_link_erratum = true,
        },
        {
                .chip_id = 0x8830,
@@ -1652,6 +1693,8 @@ static int ksz8_switch_init(struct ksz_device *dev)
                        dev->host_mask = chip->cpu_ports;
                        dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
                                         chip->cpu_ports;
+                       dev->ksz87xx_eee_link_erratum =
+                               chip->ksz87xx_eee_link_erratum;
                        break;
                }
        }
index 6b40bc2..d74defc 100644 (file)
 
 #define IND_ACC_TABLE(table)           ((table) << 8)
 
+/* */
+#define REG_IND_EEE_GLOB2_LO           0x34
+#define REG_IND_EEE_GLOB2_HI           0x35
+
 /* Driver set switch broadcast storm protection at 10% rate. */
 #define BROADCAST_STORM_PROT_RATE      10
 
index 673589d..5f8d94a 100644 (file)
@@ -122,12 +122,23 @@ static const struct of_device_id ksz8795_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
 
+static const struct spi_device_id ksz8795_spi_ids[] = {
+       { "ksz8765" },
+       { "ksz8794" },
+       { "ksz8795" },
+       { "ksz8863" },
+       { "ksz8873" },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids);
+
 static struct spi_driver ksz8795_spi_driver = {
        .driver = {
                .name   = "ksz8795-switch",
                .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(ksz8795_dt_ids),
        },
+       .id_table = ksz8795_spi_ids,
        .probe  = ksz8795_spi_probe,
        .remove = ksz8795_spi_remove,
        .shutdown = ksz8795_spi_shutdown,
index 94ad6d9..8222c8a 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/platform_data/microchip-ksz.h>
 #include <linux/phy.h>
 #include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
 #include <net/dsa.h>
 #include <net/switchdev.h>
 
@@ -182,6 +183,29 @@ static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
                           bits, set ? bits : 0);
 }
 
+static int ksz9477_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+       struct ksz_device *dev = ds->priv;
+       u16 frame_size, max_frame = 0;
+       int i;
+
+       frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+       /* Cache the per-port MTU setting */
+       dev->ports[port].max_frame = frame_size;
+
+       for (i = 0; i < dev->port_cnt; i++)
+               max_frame = max(max_frame, dev->ports[i].max_frame);
+
+       return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2,
+                                 REG_SW_MTU_MASK, max_frame);
+}
+
+static int ksz9477_max_mtu(struct dsa_switch *ds, int port)
+{
+       return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
 {
        unsigned int val;
@@ -994,7 +1018,7 @@ exit:
 
 static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
                                   struct dsa_mall_mirror_tc_entry *mirror,
-                                  bool ingress)
+                                  bool ingress, struct netlink_ext_ack *extack)
 {
        struct ksz_device *dev = ds->priv;
 
@@ -1416,8 +1440,14 @@ static int ksz9477_setup(struct dsa_switch *ds)
        /* Do not work correctly with tail tagging. */
        ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
 
-       /* accept packet up to 2000bytes */
-       ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
+       /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
+       ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+
+       /* Now we can configure default MTU value */
+       ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK,
+                                VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
+       if (ret)
+               return ret;
 
        ksz9477_config_cpu_port(ds);
 
@@ -1464,6 +1494,8 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
        .port_mirror_add        = ksz9477_port_mirror_add,
        .port_mirror_del        = ksz9477_port_mirror_del,
        .get_stats64            = ksz9477_get_stats64,
+       .port_change_mtu        = ksz9477_change_mtu,
+       .port_max_mtu           = ksz9477_max_mtu,
 };
 
 static u32 ksz9477_get_port_addr(int port, int offset)
index 16939f2..0bd5846 100644 (file)
 #define REG_SW_MAC_ADDR_5              0x0307
 
 #define REG_SW_MTU__2                  0x0308
+#define REG_SW_MTU_MASK                        GENMASK(13, 0)
 
 #define REG_SW_ISP_TPID__2             0x030A
 
 /* 148,800 frames * 67 ms / 100 */
 #define BROADCAST_STORM_VALUE          9969
 
+#define KSZ9477_MAX_FRAME_SIZE         9000
+
 #endif /* KSZ9477_REGS_H */
index 940bb96..87ca464 100644 (file)
@@ -96,12 +96,24 @@ static const struct of_device_id ksz9477_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
 
+static const struct spi_device_id ksz9477_spi_ids[] = {
+       { "ksz9477" },
+       { "ksz9897" },
+       { "ksz9893" },
+       { "ksz9563" },
+       { "ksz8563" },
+       { "ksz9567" },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids);
+
 static struct spi_driver ksz9477_spi_driver = {
        .driver = {
                .name   = "ksz9477-switch",
                .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(ksz9477_dt_ids),
        },
+       .id_table = ksz9477_spi_ids,
        .probe  = ksz9477_spi_probe,
        .remove = ksz9477_spi_remove,
        .shutdown = ksz9477_spi_shutdown,
index 4ff0a15..485d4a9 100644 (file)
@@ -41,6 +41,7 @@ struct ksz_port {
 
        struct ksz_port_mib mib;
        phy_interface_t interface;
+       u16 max_frame;
 };
 
 struct ksz_device {
@@ -76,6 +77,7 @@ struct ksz_device {
        phy_interface_t compat_interface;
        u32 regs_size;
        bool phy_errata_9477;
+       bool ksz87xx_eee_link_erratum;
        bool synclko_125;
        bool synclko_disable;
 
index 66b00c1..19f0035 100644 (file)
@@ -1714,7 +1714,7 @@ static int mt753x_mirror_port_set(unsigned int id, u32 val)
 
 static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
                                  struct dsa_mall_mirror_tc_entry *mirror,
-                                 bool ingress)
+                                 bool ingress, struct netlink_ext_ack *extack)
 {
        struct mt7530_priv *priv = ds->priv;
        int monitor_port;
@@ -2941,7 +2941,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port,
 
        phylink_set_port_modes(mask);
 
-       if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
+       if (state->interface != PHY_INTERFACE_MODE_TRGMII &&
            !phy_interface_mode_is_8023z(state->interface)) {
                phylink_set(mask, 10baseT_Half);
                phylink_set(mask, 10baseT_Full);
index 84b90fc..64f4fdd 100644 (file)
@@ -1667,24 +1667,31 @@ static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip)
        return 0;
 }
 
-static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+static int mv88e6xxx_port_fast_age_fid(struct mv88e6xxx_chip *chip, int port,
+                                      u16 fid)
 {
-       struct mv88e6xxx_chip *chip = ds->priv;
-       int err;
-
-       if (dsa_to_port(ds, port)->lag)
+       if (dsa_to_port(chip->ds, port)->lag)
                /* Hardware is incapable of fast-aging a LAG through a
                 * regular ATU move operation. Until we have something
                 * more fancy in place this is a no-op.
                 */
-               return;
+               return -EOPNOTSUPP;
+
+       return mv88e6xxx_g1_atu_remove(chip, fid, port, false);
+}
+
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+{
+       struct mv88e6xxx_chip *chip = ds->priv;
+       int err;
 
        mv88e6xxx_reg_lock(chip);
-       err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
+       err = mv88e6xxx_port_fast_age_fid(chip, port, 0);
        mv88e6xxx_reg_unlock(chip);
 
        if (err)
-               dev_err(ds->dev, "p%d: failed to flush ATU\n", port);
+               dev_err(chip->ds->dev, "p%d: failed to flush ATU: %d\n",
+                       port, err);
 }
 
 static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
@@ -1791,6 +1798,187 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
        return mv88e6xxx_g1_atu_flush(chip, *fid, true);
 }
 
+static int mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
+                                  struct mv88e6xxx_stu_entry *entry)
+{
+       if (!chip->info->ops->stu_loadpurge)
+               return -EOPNOTSUPP;
+
+       return chip->info->ops->stu_loadpurge(chip, entry);
+}
+
+static int mv88e6xxx_stu_setup(struct mv88e6xxx_chip *chip)
+{
+       struct mv88e6xxx_stu_entry stu = {
+               .valid = true,
+               .sid = 0
+       };
+
+       if (!mv88e6xxx_has_stu(chip))
+               return 0;
+
+       /* Make sure that SID 0 is always valid. This is used by VTU
+        * entries that do not make use of the STU, e.g. when creating
+        * a VLAN upper on a port that is also part of a VLAN
+        * filtering bridge.
+        */
+       return mv88e6xxx_stu_loadpurge(chip, &stu);
+}
+
+static int mv88e6xxx_sid_get(struct mv88e6xxx_chip *chip, u8 *sid)
+{
+       DECLARE_BITMAP(busy, MV88E6XXX_N_SID) = { 0 };
+       struct mv88e6xxx_mst *mst;
+
+       __set_bit(0, busy);
+
+       list_for_each_entry(mst, &chip->msts, node)
+               __set_bit(mst->stu.sid, busy);
+
+       *sid = find_first_zero_bit(busy, MV88E6XXX_N_SID);
+
+       return (*sid >= mv88e6xxx_max_sid(chip)) ? -ENOSPC : 0;
+}
+
+static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
+{
+       struct mv88e6xxx_mst *mst, *tmp;
+       int err;
+
+       if (!sid)
+               return 0;
+
+       list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
+               if (mst->stu.sid != sid)
+                       continue;
+
+               if (!refcount_dec_and_test(&mst->refcnt))
+                       return 0;
+
+               mst->stu.valid = false;
+               err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+               if (err) {
+                       refcount_set(&mst->refcnt, 1);
+                       return err;
+               }
+
+               list_del(&mst->node);
+               kfree(mst);
+               return 0;
+       }
+
+       return -ENOENT;
+}
+
+static int mv88e6xxx_mst_get(struct mv88e6xxx_chip *chip, struct net_device *br,
+                            u16 msti, u8 *sid)
+{
+       struct mv88e6xxx_mst *mst;
+       int err, i;
+
+       if (!mv88e6xxx_has_stu(chip)) {
+               err = -EOPNOTSUPP;
+               goto err;
+       }
+
+       if (!msti) {
+               *sid = 0;
+               return 0;
+       }
+
+       list_for_each_entry(mst, &chip->msts, node) {
+               if (mst->br == br && mst->msti == msti) {
+                       refcount_inc(&mst->refcnt);
+                       *sid = mst->stu.sid;
+                       return 0;
+               }
+       }
+
+       err = mv88e6xxx_sid_get(chip, sid);
+       if (err)
+               goto err;
+
+       mst = kzalloc(sizeof(*mst), GFP_KERNEL);
+       if (!mst) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       INIT_LIST_HEAD(&mst->node);
+       refcount_set(&mst->refcnt, 1);
+       mst->br = br;
+       mst->msti = msti;
+       mst->stu.valid = true;
+       mst->stu.sid = *sid;
+
+       /* The bridge starts out all ports in the disabled state. But
+        * a STU state of disabled means to go by the port-global
+        * state. So we set all user port's initial state to blocking,
+        * to match the bridge's behavior.
+        */
+       for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+               mst->stu.state[i] = dsa_is_user_port(chip->ds, i) ?
+                       MV88E6XXX_PORT_CTL0_STATE_BLOCKING :
+                       MV88E6XXX_PORT_CTL0_STATE_DISABLED;
+
+       err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+       if (err)
+               goto err_free;
+
+       list_add_tail(&mst->node, &chip->msts);
+       return 0;
+
+err_free:
+       kfree(mst);
+err:
+       return err;
+}
+
+static int mv88e6xxx_port_mst_state_set(struct dsa_switch *ds, int port,
+                                       const struct switchdev_mst_state *st)
+{
+       struct dsa_port *dp = dsa_to_port(ds, port);
+       struct mv88e6xxx_chip *chip = ds->priv;
+       struct mv88e6xxx_mst *mst;
+       u8 state;
+       int err;
+
+       if (!mv88e6xxx_has_stu(chip))
+               return -EOPNOTSUPP;
+
+       switch (st->state) {
+       case BR_STATE_DISABLED:
+       case BR_STATE_BLOCKING:
+       case BR_STATE_LISTENING:
+               state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
+               break;
+       case BR_STATE_LEARNING:
+               state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
+               break;
+       case BR_STATE_FORWARDING:
+               state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       list_for_each_entry(mst, &chip->msts, node) {
+               if (mst->br == dsa_port_bridge_dev_get(dp) &&
+                   mst->msti == st->msti) {
+                       if (mst->stu.state[port] == state)
+                               return 0;
+
+                       mst->stu.state[port] = state;
+                       mv88e6xxx_reg_lock(chip);
+                       err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+                       mv88e6xxx_reg_unlock(chip);
+                       return err;
+               }
+       }
+
+       return -ENOENT;
+}
+
 static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
                                        u16 vid)
 {
@@ -2410,6 +2598,12 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
        if (err)
                return err;
 
+       if (!vlan.valid) {
+               err = mv88e6xxx_mst_put(chip, vlan.sid);
+               if (err)
+                       return err;
+       }
+
        return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
 }
 
@@ -2455,6 +2649,72 @@ unlock:
        return err;
 }
 
+static int mv88e6xxx_port_vlan_fast_age(struct dsa_switch *ds, int port, u16 vid)
+{
+       struct mv88e6xxx_chip *chip = ds->priv;
+       struct mv88e6xxx_vtu_entry vlan;
+       int err;
+
+       mv88e6xxx_reg_lock(chip);
+
+       err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+       if (err)
+               goto unlock;
+
+       err = mv88e6xxx_port_fast_age_fid(chip, port, vlan.fid);
+
+unlock:
+       mv88e6xxx_reg_unlock(chip);
+
+       return err;
+}
+
+static int mv88e6xxx_vlan_msti_set(struct dsa_switch *ds,
+                                  struct dsa_bridge bridge,
+                                  const struct switchdev_vlan_msti *msti)
+{
+       struct mv88e6xxx_chip *chip = ds->priv;
+       struct mv88e6xxx_vtu_entry vlan;
+       u8 old_sid, new_sid;
+       int err;
+
+       if (!mv88e6xxx_has_stu(chip))
+               return -EOPNOTSUPP;
+
+       mv88e6xxx_reg_lock(chip);
+
+       err = mv88e6xxx_vtu_get(chip, msti->vid, &vlan);
+       if (err)
+               goto unlock;
+
+       if (!vlan.valid) {
+               err = -EINVAL;
+               goto unlock;
+       }
+
+       old_sid = vlan.sid;
+
+       err = mv88e6xxx_mst_get(chip, bridge.dev, msti->msti, &new_sid);
+       if (err)
+               goto unlock;
+
+       if (new_sid != old_sid) {
+               vlan.sid = new_sid;
+
+               err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+               if (err) {
+                       mv88e6xxx_mst_put(chip, new_sid);
+                       goto unlock;
+               }
+       }
+
+       err = mv88e6xxx_mst_put(chip, old_sid);
+
+unlock:
+       mv88e6xxx_reg_unlock(chip);
+       return err;
+}
+
 static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
                                  const unsigned char *addr, u16 vid,
                                  struct dsa_db db)
@@ -3427,6 +3687,13 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
        if (err)
                goto unlock;
 
+       /* Must be called after mv88e6xxx_vtu_setup (which flushes the
+        * VTU, thereby also flushing the STU).
+        */
+       err = mv88e6xxx_stu_setup(chip);
+       if (err)
+               goto unlock;
+
        /* Setup Switch Port Registers */
        for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
                if (dsa_is_unused_port(ds, i))
@@ -3800,6 +4067,8 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
        .rmu_disable = mv88e6085_g1_rmu_disable,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .phylink_get_caps = mv88e6185_phylink_get_caps,
        .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
 };
@@ -3882,6 +4151,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .phylink_get_caps = mv88e6095_phylink_get_caps,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
 };
 
@@ -3918,6 +4189,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .phylink_get_caps = mv88e6185_phylink_get_caps,
        .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
 };
@@ -4007,6 +4280,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
        .serdes_get_lane = mv88e6341_serdes_get_lane,
        /* Check status register pause & lpa register */
@@ -4063,6 +4338,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .avb_ops = &mv88e6165_avb_ops,
        .ptp_ops = &mv88e6165_ptp_ops,
        .phylink_get_caps = mv88e6185_phylink_get_caps,
@@ -4099,6 +4376,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .avb_ops = &mv88e6165_avb_ops,
        .ptp_ops = &mv88e6165_ptp_ops,
        .phylink_get_caps = mv88e6185_phylink_get_caps,
@@ -4143,6 +4422,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .phylink_get_caps = mv88e6185_phylink_get_caps,
 };
 
@@ -4189,6 +4470,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .serdes_get_lane = mv88e6352_serdes_get_lane,
        .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
        .serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4240,6 +4523,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .phylink_get_caps = mv88e6185_phylink_get_caps,
 };
 
@@ -4286,6 +4571,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .serdes_get_lane = mv88e6352_serdes_get_lane,
        .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
        .serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4385,6 +4672,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6390_g1_stu_getnext,
+       .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
        .serdes_get_lane = mv88e6390_serdes_get_lane,
        /* Check status register pause & lpa register */
@@ -4446,6 +4735,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6390_g1_stu_getnext,
+       .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
        .serdes_get_lane = mv88e6390x_serdes_get_lane,
        /* Check status register pause & lpa register */
@@ -4505,6 +4796,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6390_g1_stu_getnext,
+       .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
        .serdes_get_lane = mv88e6390_serdes_get_lane,
        /* Check status register pause & lpa register */
@@ -4567,6 +4860,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .serdes_get_lane = mv88e6352_serdes_get_lane,
        .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
        .serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4667,6 +4962,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6390_g1_stu_getnext,
+       .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
        .serdes_get_lane = mv88e6390_serdes_get_lane,
        /* Check status register pause & lpa register */
@@ -4818,6 +5115,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
        .serdes_get_lane = mv88e6341_serdes_get_lane,
        /* Check status register pause & lpa register */
@@ -4878,6 +5177,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .phylink_get_caps = mv88e6185_phylink_get_caps,
 };
 
@@ -4920,6 +5221,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .avb_ops = &mv88e6352_avb_ops,
        .ptp_ops = &mv88e6352_ptp_ops,
        .phylink_get_caps = mv88e6185_phylink_get_caps,
@@ -4968,6 +5271,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6352_g1_stu_getnext,
+       .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
        .serdes_get_lane = mv88e6352_serdes_get_lane,
        .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
        .serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -5033,6 +5338,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6390_g1_stu_getnext,
+       .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
        .serdes_get_lane = mv88e6390_serdes_get_lane,
        /* Check status register pause & lpa register */
@@ -5098,6 +5405,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6390_g1_stu_getnext,
+       .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
        .serdes_get_lane = mv88e6390x_serdes_get_lane,
        .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
@@ -5166,6 +5475,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
        .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+       .stu_getnext = mv88e6390_g1_stu_getnext,
+       .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
        .serdes_power = mv88e6393x_serdes_power,
        .serdes_get_lane = mv88e6393x_serdes_get_lane,
        .serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state,
@@ -5192,6 +5503,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 10,
                .num_internal_phys = 5,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5234,6 +5546,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 11,
                .num_internal_phys = 8,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5257,6 +5570,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 3,
                .num_internal_phys = 5,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5301,6 +5615,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .num_gpio = 11,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x10,
                .global1_addr = 0x1b,
@@ -5324,6 +5639,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 6,
                .num_internal_phys = 5,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5348,6 +5664,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 6,
                .num_internal_phys = 0,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5371,6 +5688,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 7,
                .num_internal_phys = 5,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5395,6 +5713,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .num_gpio = 15,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5418,6 +5737,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 7,
                .num_internal_phys = 5,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5442,6 +5762,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .num_gpio = 15,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5487,6 +5808,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 9,
                .num_gpio = 16,
                .max_vid = 8191,
+               .max_sid = 63,
                .port_base_addr = 0x0,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5510,6 +5832,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 9,
                .num_gpio = 16,
                .max_vid = 8191,
+               .max_sid = 63,
                .port_base_addr = 0x0,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5532,6 +5855,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 11,        /* 10 + Z80 */
                .num_internal_phys = 9,
                .max_vid = 8191,
+               .max_sid = 63,
                .port_base_addr = 0x0,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5554,6 +5878,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 11,        /* 10 + Z80 */
                .num_internal_phys = 9,
                .max_vid = 8191,
+               .max_sid = 63,
                .port_base_addr = 0x0,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5576,6 +5901,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 11,        /* 10 + Z80 */
                .num_internal_phys = 9,
                .max_vid = 8191,
+               .max_sid = 63,
                .port_base_addr = 0x0,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5626,6 +5952,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .num_gpio = 15,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5671,6 +5998,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 9,
                .num_gpio = 16,
                .max_vid = 8191,
+               .max_sid = 63,
                .port_base_addr = 0x0,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5744,6 +6072,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 6,
                .num_gpio = 11,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x10,
                .global1_addr = 0x1b,
@@ -5768,6 +6097,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 7,
                .num_internal_phys = 5,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5791,6 +6121,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 7,
                .num_internal_phys = 5,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5815,6 +6146,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .num_gpio = 15,
                .max_vid = 4095,
+               .max_sid = 63,
                .port_base_addr = 0x10,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5839,6 +6171,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 9,
                .num_gpio = 16,
                .max_vid = 8191,
+               .max_sid = 63,
                .port_base_addr = 0x0,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5863,6 +6196,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 9,
                .num_gpio = 16,
                .max_vid = 8191,
+               .max_sid = 63,
                .port_base_addr = 0x0,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5886,6 +6220,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 11,        /* 10 + Z80 */
                .num_internal_phys = 9,
                .max_vid = 8191,
+               .max_sid = 63,
                .port_base_addr = 0x0,
                .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
@@ -5954,6 +6289,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
        mutex_init(&chip->reg_lock);
        INIT_LIST_HEAD(&chip->mdios);
        idr_init(&chip->policies);
+       INIT_LIST_HEAD(&chip->msts);
 
        return chip;
 }
@@ -6036,7 +6372,8 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
 
 static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
                                     struct dsa_mall_mirror_tc_entry *mirror,
-                                    bool ingress)
+                                    bool ingress,
+                                    struct netlink_ext_ack *extack)
 {
        enum mv88e6xxx_egress_direction direction = ingress ?
                                                MV88E6XXX_EGRESS_DIR_INGRESS :
@@ -6486,10 +6823,13 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
        .port_pre_bridge_flags  = mv88e6xxx_port_pre_bridge_flags,
        .port_bridge_flags      = mv88e6xxx_port_bridge_flags,
        .port_stp_state_set     = mv88e6xxx_port_stp_state_set,
+       .port_mst_state_set     = mv88e6xxx_port_mst_state_set,
        .port_fast_age          = mv88e6xxx_port_fast_age,
+       .port_vlan_fast_age     = mv88e6xxx_port_vlan_fast_age,
        .port_vlan_filtering    = mv88e6xxx_port_vlan_filtering,
        .port_vlan_add          = mv88e6xxx_port_vlan_add,
        .port_vlan_del          = mv88e6xxx_port_vlan_del,
+       .vlan_msti_set          = mv88e6xxx_vlan_msti_set,
        .port_fdb_add           = mv88e6xxx_port_fdb_add,
        .port_fdb_del           = mv88e6xxx_port_fdb_del,
        .port_fdb_dump          = mv88e6xxx_port_fdb_dump,
index 30b92a2..5e03cfe 100644 (file)
@@ -20,6 +20,7 @@
 
 #define EDSA_HLEN              8
 #define MV88E6XXX_N_FID                4096
+#define MV88E6XXX_N_SID                64
 
 #define MV88E6XXX_FID_STANDALONE       0
 #define MV88E6XXX_FID_BRIDGED          1
@@ -130,6 +131,7 @@ struct mv88e6xxx_info {
        unsigned int num_internal_phys;
        unsigned int num_gpio;
        unsigned int max_vid;
+       unsigned int max_sid;
        unsigned int port_base_addr;
        unsigned int phy_base_addr;
        unsigned int global1_addr;
@@ -181,6 +183,12 @@ struct mv88e6xxx_vtu_entry {
        bool    valid;
        bool    policy;
        u8      member[DSA_MAX_PORTS];
+       u8      state[DSA_MAX_PORTS];   /* Older silicon has no STU */
+};
+
+struct mv88e6xxx_stu_entry {
+       u8      sid;
+       bool    valid;
        u8      state[DSA_MAX_PORTS];
 };
 
@@ -279,6 +287,7 @@ enum mv88e6xxx_region_id {
        MV88E6XXX_REGION_GLOBAL2,
        MV88E6XXX_REGION_ATU,
        MV88E6XXX_REGION_VTU,
+       MV88E6XXX_REGION_STU,
        MV88E6XXX_REGION_PVT,
 
        _MV88E6XXX_REGION_MAX,
@@ -288,6 +297,16 @@ struct mv88e6xxx_region_priv {
        enum mv88e6xxx_region_id id;
 };
 
+struct mv88e6xxx_mst {
+       struct list_head node;
+
+       refcount_t refcnt;
+       struct net_device *br;
+       u16 msti;
+
+       struct mv88e6xxx_stu_entry stu;
+};
+
 struct mv88e6xxx_chip {
        const struct mv88e6xxx_info *info;
 
@@ -388,6 +407,9 @@ struct mv88e6xxx_chip {
 
        /* devlink regions */
        struct devlink_region *regions[_MV88E6XXX_REGION_MAX];
+
+       /* Bridge MST to SID mappings */
+       struct list_head msts;
 };
 
 struct mv88e6xxx_bus_ops {
@@ -602,6 +624,12 @@ struct mv88e6xxx_ops {
        int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
                             struct mv88e6xxx_vtu_entry *entry);
 
+       /* Spanning Tree Unit operations */
+       int (*stu_getnext)(struct mv88e6xxx_chip *chip,
+                          struct mv88e6xxx_stu_entry *entry);
+       int (*stu_loadpurge)(struct mv88e6xxx_chip *chip,
+                            struct mv88e6xxx_stu_entry *entry);
+
        /* GPIO operations */
        const struct mv88e6xxx_gpio_ops *gpio_ops;
 
@@ -700,6 +728,13 @@ struct mv88e6xxx_hw_stat {
        int type;
 };
 
+static inline bool mv88e6xxx_has_stu(struct mv88e6xxx_chip *chip)
+{
+       return chip->info->max_sid > 0 &&
+               chip->info->ops->stu_loadpurge &&
+               chip->info->ops->stu_getnext;
+}
+
 static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
 {
        return chip->info->pvt;
@@ -730,6 +765,11 @@ static inline unsigned int mv88e6xxx_max_vid(struct mv88e6xxx_chip *chip)
        return chip->info->max_vid;
 }
 
+static inline unsigned int mv88e6xxx_max_sid(struct mv88e6xxx_chip *chip)
+{
+       return chip->info->max_sid;
+}
+
 static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
 {
        return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0);
index 3810683..1266eab 100644 (file)
@@ -503,6 +503,85 @@ static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
        return 0;
 }
 
+/**
+ * struct mv88e6xxx_devlink_stu_entry - Devlink STU entry
+ * @sid:   Global1/3:   SID, unknown filters and learning.
+ * @vid:   Global1/6:   Valid bit.
+ * @data:  Global1/7-9: Membership data and priority override.
+ * @resvd: Reserved. In case we forgot something.
+ *
+ * The STU entry format varies between chipset generations. Peridot
+ * and Amethyst packs the STU data into Global1/7-8. Older silicon
+ * spreads the information across all three VTU data registers -
+ * inheriting the layout of even older hardware that had no STU at
+ * all. Since this is a low-level debug interface, copy all data
+ * verbatim and defer parsing to the consumer.
+ */
+struct mv88e6xxx_devlink_stu_entry {
+       u16 sid;
+       u16 vid;
+       u16 data[3];
+       u16 resvd;
+};
+
+static int mv88e6xxx_region_stu_snapshot(struct devlink *dl,
+                                        const struct devlink_region_ops *ops,
+                                        struct netlink_ext_ack *extack,
+                                        u8 **data)
+{
+       struct mv88e6xxx_devlink_stu_entry *table, *entry;
+       struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+       struct mv88e6xxx_chip *chip = ds->priv;
+       struct mv88e6xxx_stu_entry stu;
+       int err;
+
+       table = kcalloc(mv88e6xxx_max_sid(chip) + 1,
+                       sizeof(struct mv88e6xxx_devlink_stu_entry),
+                       GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+
+       entry = table;
+       stu.sid = mv88e6xxx_max_sid(chip);
+       stu.valid = false;
+
+       mv88e6xxx_reg_lock(chip);
+
+       do {
+               err = mv88e6xxx_g1_stu_getnext(chip, &stu);
+               if (err)
+                       break;
+
+               if (!stu.valid)
+                       break;
+
+               err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
+                                               &entry->sid);
+               err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
+                                               &entry->vid);
+               err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
+                                               &entry->data[0]);
+               err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
+                                               &entry->data[1]);
+               err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
+                                               &entry->data[2]);
+               if (err)
+                       break;
+
+               entry++;
+       } while (stu.sid < mv88e6xxx_max_sid(chip));
+
+       mv88e6xxx_reg_unlock(chip);
+
+       if (err) {
+               kfree(table);
+               return err;
+       }
+
+       *data = (u8 *)table;
+       return 0;
+}
+
 static int mv88e6xxx_region_pvt_snapshot(struct devlink *dl,
                                         const struct devlink_region_ops *ops,
                                         struct netlink_ext_ack *extack,
@@ -605,6 +684,12 @@ static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
        .destructor = kfree,
 };
 
+static struct devlink_region_ops mv88e6xxx_region_stu_ops = {
+       .name = "stu",
+       .snapshot = mv88e6xxx_region_stu_snapshot,
+       .destructor = kfree,
+};
+
 static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
        .name = "pvt",
        .snapshot = mv88e6xxx_region_pvt_snapshot,
@@ -640,6 +725,11 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
                .ops = &mv88e6xxx_region_vtu_ops
          /* calculated at runtime */
        },
+       [MV88E6XXX_REGION_STU] = {
+               .ops = &mv88e6xxx_region_stu_ops,
+               .cond = mv88e6xxx_has_stu,
+         /* calculated at runtime */
+       },
        [MV88E6XXX_REGION_PVT] = {
                .ops = &mv88e6xxx_region_pvt_ops,
                .size = MV88E6XXX_MAX_PVT_ENTRIES * sizeof(u16),
@@ -706,6 +796,10 @@ int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
                        size = (mv88e6xxx_max_vid(chip) + 1) *
                                sizeof(struct mv88e6xxx_devlink_vtu_entry);
                        break;
+               case MV88E6XXX_REGION_STU:
+                       size = (mv88e6xxx_max_sid(chip) + 1) *
+                               sizeof(struct mv88e6xxx_devlink_stu_entry);
+                       break;
                }
 
                region = dsa_devlink_region_create(ds, ops, 1, size);
index 2c1607c..65958b2 100644 (file)
@@ -348,6 +348,16 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
 int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
                               struct mv88e6xxx_vtu_entry *entry);
 int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+                            struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+                            struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+                              struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+                            struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+                              struct mv88e6xxx_stu_entry *entry);
 int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
 void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
 int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
index b1bd927..38e18f5 100644 (file)
@@ -44,8 +44,7 @@ static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip,
 
 /* Offset 0x03: VTU SID Register */
 
-static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
-                                    struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip, u8 *sid)
 {
        u16 val;
        int err;
@@ -54,15 +53,14 @@ static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
        if (err)
                return err;
 
-       entry->sid = val & MV88E6352_G1_VTU_SID_MASK;
+       *sid = val & MV88E6352_G1_VTU_SID_MASK;
 
        return 0;
 }
 
-static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip,
-                                     struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip, u8 sid)
 {
-       u16 val = entry->sid & MV88E6352_G1_VTU_SID_MASK;
+       u16 val = sid & MV88E6352_G1_VTU_SID_MASK;
 
        return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_SID, val);
 }
@@ -91,7 +89,7 @@ static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
 /* Offset 0x06: VTU VID Register */
 
 static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
-                                    struct mv88e6xxx_vtu_entry *entry)
+                                    bool *valid, u16 *vid)
 {
        u16 val;
        int err;
@@ -100,25 +98,28 @@ static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
        if (err)
                return err;
 
-       entry->vid = val & 0xfff;
+       if (vid) {
+               *vid = val & 0xfff;
 
-       if (val & MV88E6390_G1_VTU_VID_PAGE)
-               entry->vid |= 0x1000;
+               if (val & MV88E6390_G1_VTU_VID_PAGE)
+                       *vid |= 0x1000;
+       }
 
-       entry->valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
+       if (valid)
+               *valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
 
        return 0;
 }
 
 static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
-                                     struct mv88e6xxx_vtu_entry *entry)
+                                     bool valid, u16 vid)
 {
-       u16 val = entry->vid & 0xfff;
+       u16 val = vid & 0xfff;
 
-       if (entry->vid & 0x1000)
+       if (vid & 0x1000)
                val |= MV88E6390_G1_VTU_VID_PAGE;
 
-       if (entry->valid)
+       if (valid)
                val |= MV88E6XXX_G1_VTU_VID_VALID;
 
        return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_VID, val);
@@ -147,7 +148,7 @@ static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
 }
 
 static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
-                                     struct mv88e6xxx_vtu_entry *entry)
+                                     u8 *member, u8 *state)
 {
        u16 regs[3];
        int err;
@@ -160,36 +161,20 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
        /* Extract MemberTag data */
        for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
                unsigned int member_offset = (i % 4) * 4;
+               unsigned int state_offset = member_offset + 2;
 
-               entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
-       }
-
-       return 0;
-}
-
-static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
-                                     struct mv88e6xxx_vtu_entry *entry)
-{
-       u16 regs[3];
-       int err;
-       int i;
-
-       err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
-       if (err)
-               return err;
+               if (member)
+                       member[i] = (regs[i / 4] >> member_offset) & 0x3;
 
-       /* Extract PortState data */
-       for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
-               unsigned int state_offset = (i % 4) * 4 + 2;
-
-               entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
+               if (state)
+                       state[i] = (regs[i / 4] >> state_offset) & 0x3;
        }
 
        return 0;
 }
 
 static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
-                                      struct mv88e6xxx_vtu_entry *entry)
+                                      u8 *member, u8 *state)
 {
        u16 regs[3] = { 0 };
        int i;
@@ -199,8 +184,11 @@ static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
                unsigned int member_offset = (i % 4) * 4;
                unsigned int state_offset = member_offset + 2;
 
-               regs[i / 4] |= (entry->member[i] & 0x3) << member_offset;
-               regs[i / 4] |= (entry->state[i] & 0x3) << state_offset;
+               if (member)
+                       regs[i / 4] |= (member[i] & 0x3) << member_offset;
+
+               if (state)
+                       regs[i / 4] |= (state[i] & 0x3) << state_offset;
        }
 
        /* Write all 3 VTU/STU Data registers */
@@ -268,48 +256,6 @@ static int mv88e6390_g1_vtu_data_write(struct mv88e6xxx_chip *chip, u8 *data)
 
 /* VLAN Translation Unit Operations */
 
-static int mv88e6xxx_g1_vtu_stu_getnext(struct mv88e6xxx_chip *chip,
-                                       struct mv88e6xxx_vtu_entry *entry)
-{
-       int err;
-
-       err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
-       if (err)
-               return err;
-
-       err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
-       if (err)
-               return err;
-
-       err = mv88e6xxx_g1_vtu_sid_read(chip, entry);
-       if (err)
-               return err;
-
-       return mv88e6xxx_g1_vtu_vid_read(chip, entry);
-}
-
-static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip,
-                                   struct mv88e6xxx_vtu_entry *vtu)
-{
-       struct mv88e6xxx_vtu_entry stu;
-       int err;
-
-       err = mv88e6xxx_g1_vtu_sid_read(chip, vtu);
-       if (err)
-               return err;
-
-       stu.sid = vtu->sid - 1;
-
-       err = mv88e6xxx_g1_vtu_stu_getnext(chip, &stu);
-       if (err)
-               return err;
-
-       if (stu.sid != vtu->sid || !stu.valid)
-               return -EINVAL;
-
-       return 0;
-}
-
 int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
                             struct mv88e6xxx_vtu_entry *entry)
 {
@@ -327,7 +273,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
         * write the VID only once, when the entry is given as invalid.
         */
        if (!entry->valid) {
-               err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+               err = mv88e6xxx_g1_vtu_vid_write(chip, false, entry->vid);
                if (err)
                        return err;
        }
@@ -336,7 +282,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
        if (err)
                return err;
 
-       return mv88e6xxx_g1_vtu_vid_read(chip, entry);
+       return mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, &entry->vid);
 }
 
 int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
@@ -350,11 +296,7 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
                return err;
 
        if (entry->valid) {
-               err = mv88e6185_g1_vtu_data_read(chip, entry);
-               if (err)
-                       return err;
-
-               err = mv88e6185_g1_stu_data_read(chip, entry);
+               err = mv88e6185_g1_vtu_data_read(chip, entry->member, entry->state);
                if (err)
                        return err;
 
@@ -384,7 +326,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
                return err;
 
        if (entry->valid) {
-               err = mv88e6185_g1_vtu_data_read(chip, entry);
+               err = mv88e6185_g1_vtu_data_read(chip, entry->member, NULL);
                if (err)
                        return err;
 
@@ -392,12 +334,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
                if (err)
                        return err;
 
-               /* Fetch VLAN PortState data from the STU */
-               err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
-               if (err)
-                       return err;
-
-               err = mv88e6185_g1_stu_data_read(chip, entry);
+               err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
                if (err)
                        return err;
        }
@@ -420,16 +357,11 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
                if (err)
                        return err;
 
-               /* Fetch VLAN PortState data from the STU */
-               err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
-               if (err)
-                       return err;
-
-               err = mv88e6390_g1_vtu_data_read(chip, entry->state);
+               err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
                if (err)
                        return err;
 
-               err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+               err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
                if (err)
                        return err;
        }
@@ -447,12 +379,12 @@ int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
        if (err)
                return err;
 
-       err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+       err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
        if (err)
                return err;
 
        if (entry->valid) {
-               err = mv88e6185_g1_vtu_data_write(chip, entry);
+               err = mv88e6185_g1_vtu_data_write(chip, entry->member, entry->state);
                if (err)
                        return err;
 
@@ -479,27 +411,21 @@ int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
        if (err)
                return err;
 
-       err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+       err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
        if (err)
                return err;
 
        if (entry->valid) {
-               /* Write MemberTag and PortState data */
-               err = mv88e6185_g1_vtu_data_write(chip, entry);
-               if (err)
-                       return err;
-
-               err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+               /* Write MemberTag data */
+               err = mv88e6185_g1_vtu_data_write(chip, entry->member, NULL);
                if (err)
                        return err;
 
-               /* Load STU entry */
-               err = mv88e6xxx_g1_vtu_op(chip,
-                                         MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+               err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
                if (err)
                        return err;
 
-               err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+               err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
                if (err)
                        return err;
        }
@@ -517,41 +443,113 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
        if (err)
                return err;
 
-       err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+       err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
        if (err)
                return err;
 
        if (entry->valid) {
-               /* Write PortState data */
-               err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+               /* Write MemberTag data */
+               err = mv88e6390_g1_vtu_data_write(chip, entry->member);
                if (err)
                        return err;
 
-               err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+               err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
                if (err)
                        return err;
 
-               /* Load STU entry */
-               err = mv88e6xxx_g1_vtu_op(chip,
-                                         MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+               err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
                if (err)
                        return err;
+       }
 
-               /* Write MemberTag data */
-               err = mv88e6390_g1_vtu_data_write(chip, entry->member);
+       /* Load/Purge VTU entry */
+       return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+}
+
+int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+{
+       int err;
+
+       err = mv88e6xxx_g1_vtu_op_wait(chip);
+       if (err)
+               return err;
+
+       return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+}
+
+/* Spanning Tree Unit Operations */
+
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+                            struct mv88e6xxx_stu_entry *entry)
+{
+       int err;
+
+       err = mv88e6xxx_g1_vtu_op_wait(chip);
+       if (err)
+               return err;
+
+       /* To get the next higher active SID, the STU GetNext operation can be
+        * started again without setting the SID registers since it already
+        * contains the last SID.
+        *
+        * To save a few hardware accesses and abstract this to the caller,
+        * write the SID only once, when the entry is given as invalid.
+        */
+       if (!entry->valid) {
+               err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
                if (err)
                        return err;
+       }
 
-               err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+       err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, NULL);
+       if (err)
+               return err;
+
+       if (entry->valid) {
+               err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
                if (err)
                        return err;
        }
 
-       /* Load/Purge VTU entry */
-       return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+       return 0;
 }
 
-int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+                            struct mv88e6xxx_stu_entry *entry)
+{
+       int err;
+
+       err = mv88e6xxx_g1_stu_getnext(chip, entry);
+       if (err)
+               return err;
+
+       if (!entry->valid)
+               return 0;
+
+       return mv88e6185_g1_vtu_data_read(chip, NULL, entry->state);
+}
+
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+                            struct mv88e6xxx_stu_entry *entry)
+{
+       int err;
+
+       err = mv88e6xxx_g1_stu_getnext(chip, entry);
+       if (err)
+               return err;
+
+       if (!entry->valid)
+               return 0;
+
+       return mv88e6390_g1_vtu_data_read(chip, entry->state);
+}
+
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+                              struct mv88e6xxx_stu_entry *entry)
 {
        int err;
 
@@ -559,16 +557,59 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
        if (err)
                return err;
 
-       return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+       err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+       if (err)
+               return err;
+
+       if (entry->valid) {
+               err = mv88e6185_g1_vtu_data_write(chip, NULL, entry->state);
+               if (err)
+                       return err;
+       }
+
+       /* Load/Purge STU entry */
+       return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+}
+
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+                              struct mv88e6xxx_stu_entry *entry)
+{
+       int err;
+
+       err = mv88e6xxx_g1_vtu_op_wait(chip);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+       if (err)
+               return err;
+
+       if (entry->valid) {
+               err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+               if (err)
+                       return err;
+       }
+
+       /* Load/Purge STU entry */
+       return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
 }
 
+/* VTU Violation Management */
+
 static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
 {
        struct mv88e6xxx_chip *chip = dev_id;
-       struct mv88e6xxx_vtu_entry entry;
+       u16 val, vid;
        int spid;
        int err;
-       u16 val;
 
        mv88e6xxx_reg_lock(chip);
 
@@ -580,7 +621,7 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
        if (err)
                goto out;
 
-       err = mv88e6xxx_g1_vtu_vid_read(chip, &entry);
+       err = mv88e6xxx_g1_vtu_vid_read(chip, NULL, &vid);
        if (err)
                goto out;
 
@@ -588,13 +629,13 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
 
        if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
                dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
-                                   entry.vid, spid);
+                                   vid, spid);
                chip->ports[spid].vtu_member_violation++;
        }
 
        if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
                dev_dbg_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
-                                   entry.vid, spid);
+                                   vid, spid);
                chip->ports[spid].vtu_miss_violation++;
        }
 
index 7cc6709..413b000 100644 (file)
@@ -460,7 +460,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
        return 0;
 }
 
-static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu, bool change)
+static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
 {
        struct ocelot *ocelot = ds->priv;
        struct dsa_port *dp;
@@ -488,19 +488,15 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu, bool change)
        if (err)
                return err;
 
-       if (change) {
-               err = dsa_port_walk_fdbs(ds, cpu,
-                                        felix_migrate_fdbs_to_tag_8021q_port);
-               if (err)
-                       goto out_tag_8021q_unregister;
+       err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port);
+       if (err)
+               goto out_tag_8021q_unregister;
 
-               err = dsa_port_walk_mdbs(ds, cpu,
-                                        felix_migrate_mdbs_to_tag_8021q_port);
-               if (err)
-                       goto out_migrate_fdbs;
+       err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_tag_8021q_port);
+       if (err)
+               goto out_migrate_fdbs;
 
-               felix_migrate_flood_to_tag_8021q_port(ds, cpu);
-       }
+       felix_migrate_flood_to_tag_8021q_port(ds, cpu);
 
        err = felix_update_trapping_destinations(ds, true);
        if (err)
@@ -518,13 +514,10 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu, bool change)
        return 0;
 
 out_migrate_flood:
-       if (change)
-               felix_migrate_flood_to_npi_port(ds, cpu);
-       if (change)
-               dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
+       felix_migrate_flood_to_npi_port(ds, cpu);
+       dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
 out_migrate_fdbs:
-       if (change)
-               dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
+       dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
 out_tag_8021q_unregister:
        dsa_tag_8021q_unregister(ds);
        return err;
@@ -599,33 +592,27 @@ static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
        ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
 }
 
-static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu, bool change)
+static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
 {
        struct ocelot *ocelot = ds->priv;
        int err;
 
-       if (change) {
-               err = dsa_port_walk_fdbs(ds, cpu,
-                                        felix_migrate_fdbs_to_npi_port);
-               if (err)
-                       return err;
+       err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
+       if (err)
+               return err;
 
-               err = dsa_port_walk_mdbs(ds, cpu,
-                                        felix_migrate_mdbs_to_npi_port);
-               if (err)
-                       goto out_migrate_fdbs;
+       err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
+       if (err)
+               goto out_migrate_fdbs;
 
-               felix_migrate_flood_to_npi_port(ds, cpu);
-       }
+       felix_migrate_flood_to_npi_port(ds, cpu);
 
        felix_npi_port_init(ocelot, cpu);
 
        return 0;
 
 out_migrate_fdbs:
-       if (change)
-               dsa_port_walk_fdbs(ds, cpu,
-                                  felix_migrate_fdbs_to_tag_8021q_port);
+       dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port);
 
        return err;
 }
@@ -638,17 +625,17 @@ static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
 }
 
 static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu,
-                                 enum dsa_tag_protocol proto, bool change)
+                                 enum dsa_tag_protocol proto)
 {
        int err;
 
        switch (proto) {
        case DSA_TAG_PROTO_SEVILLE:
        case DSA_TAG_PROTO_OCELOT:
-               err = felix_setup_tag_npi(ds, cpu, change);
+               err = felix_setup_tag_npi(ds, cpu);
                break;
        case DSA_TAG_PROTO_OCELOT_8021Q:
-               err = felix_setup_tag_8021q(ds, cpu, change);
+               err = felix_setup_tag_8021q(ds, cpu);
                break;
        default:
                err = -EPROTONOSUPPORT;
@@ -692,9 +679,9 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
 
        felix_del_tag_protocol(ds, cpu, old_proto);
 
-       err = felix_set_tag_protocol(ds, cpu, proto, true);
+       err = felix_set_tag_protocol(ds, cpu, proto);
        if (err) {
-               felix_set_tag_protocol(ds, cpu, old_proto, true);
+               felix_set_tag_protocol(ds, cpu, old_proto);
                return err;
        }
 
@@ -752,6 +739,10 @@ static int felix_fdb_add(struct dsa_switch *ds, int port,
        if (IS_ERR(bridge_dev))
                return PTR_ERR(bridge_dev);
 
+       if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+           dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+               return 0;
+
        return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
 }
 
@@ -765,6 +756,10 @@ static int felix_fdb_del(struct dsa_switch *ds, int port,
        if (IS_ERR(bridge_dev))
                return PTR_ERR(bridge_dev);
 
+       if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+           dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+               return 0;
+
        return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
 }
 
@@ -804,6 +799,10 @@ static int felix_mdb_add(struct dsa_switch *ds, int port,
        if (IS_ERR(bridge_dev))
                return PTR_ERR(bridge_dev);
 
+       if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+           dsa_mdb_present_in_other_db(ds, port, mdb, db))
+               return 0;
+
        return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
 }
 
@@ -817,6 +816,10 @@ static int felix_mdb_del(struct dsa_switch *ds, int port,
        if (IS_ERR(bridge_dev))
                return PTR_ERR(bridge_dev);
 
+       if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+           dsa_mdb_present_in_other_db(ds, port, mdb, db))
+               return 0;
+
        return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
 }
 
@@ -1356,6 +1359,7 @@ static int felix_setup(struct dsa_switch *ds)
 {
        struct ocelot *ocelot = ds->priv;
        struct felix *felix = ocelot_to_felix(ocelot);
+       unsigned long cpu_flood;
        struct dsa_port *dp;
        int err;
 
@@ -1393,7 +1397,15 @@ static int felix_setup(struct dsa_switch *ds)
                /* The initial tag protocol is NPI which always returns 0, so
                 * there's no real point in checking for errors.
                 */
-               felix_set_tag_protocol(ds, dp->index, felix->tag_proto, false);
+               felix_set_tag_protocol(ds, dp->index, felix->tag_proto);
+
+               /* Start off with flooding disabled towards the NPI port
+                * (actually CPU port module).
+                */
+               cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
+               ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
+               ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
+
                break;
        }
 
@@ -1638,6 +1650,24 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port)
        ocelot_port_policer_del(ocelot, port);
 }
 
+static int felix_port_mirror_add(struct dsa_switch *ds, int port,
+                                struct dsa_mall_mirror_tc_entry *mirror,
+                                bool ingress, struct netlink_ext_ack *extack)
+{
+       struct ocelot *ocelot = ds->priv;
+
+       return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port,
+                                     ingress, extack);
+}
+
+static void felix_port_mirror_del(struct dsa_switch *ds, int port,
+                                 struct dsa_mall_mirror_tc_entry *mirror)
+{
+       struct ocelot *ocelot = ds->priv;
+
+       ocelot_port_mirror_del(ocelot, port, mirror->ingress);
+}
+
 static int felix_port_setup_tc(struct dsa_switch *ds, int port,
                               enum tc_setup_type type,
                               void *type_data)
@@ -1787,6 +1817,44 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
        return ocelot_mrp_del_ring_role(ocelot, port, mrp);
 }
 
+static int felix_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+       struct ocelot *ocelot = ds->priv;
+
+       return ocelot_port_get_default_prio(ocelot, port);
+}
+
+static int felix_port_set_default_prio(struct dsa_switch *ds, int port,
+                                      u8 prio)
+{
+       struct ocelot *ocelot = ds->priv;
+
+       return ocelot_port_set_default_prio(ocelot, port, prio);
+}
+
+static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+       struct ocelot *ocelot = ds->priv;
+
+       return ocelot_port_get_dscp_prio(ocelot, port, dscp);
+}
+
+static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+                                   u8 prio)
+{
+       struct ocelot *ocelot = ds->priv;
+
+       return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio);
+}
+
+static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+                                   u8 prio)
+{
+       struct ocelot *ocelot = ds->priv;
+
+       return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
+}
+
 const struct dsa_switch_ops felix_switch_ops = {
        .get_tag_protocol               = felix_get_tag_protocol,
        .change_tag_protocol            = felix_change_tag_protocol,
@@ -1830,6 +1898,8 @@ const struct dsa_switch_ops felix_switch_ops = {
        .port_max_mtu                   = felix_get_max_mtu,
        .port_policer_add               = felix_port_policer_add,
        .port_policer_del               = felix_port_policer_del,
+       .port_mirror_add                = felix_port_mirror_add,
+       .port_mirror_del                = felix_port_mirror_del,
        .cls_flower_add                 = felix_cls_flower_add,
        .cls_flower_del                 = felix_cls_flower_del,
        .cls_flower_stats               = felix_cls_flower_stats,
@@ -1850,6 +1920,11 @@ const struct dsa_switch_ops felix_switch_ops = {
        .port_mrp_del_ring_role         = felix_mrp_del_ring_role,
        .tag_8021q_vlan_add             = felix_tag_8021q_vlan_add,
        .tag_8021q_vlan_del             = felix_tag_8021q_vlan_del,
+       .port_get_default_prio          = felix_port_get_default_prio,
+       .port_set_default_prio          = felix_port_set_default_prio,
+       .port_get_dscp_prio             = felix_port_get_dscp_prio,
+       .port_add_dscp_prio             = felix_port_add_dscp_prio,
+       .port_del_dscp_prio             = felix_port_del_dscp_prio,
 };
 
 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
index ead3316..62d52e0 100644 (file)
@@ -37,6 +37,7 @@ static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
        VSC9959_PORT_MODE_SERDES,
        VSC9959_PORT_MODE_SERDES,
        OCELOT_PORT_MODE_INTERNAL,
+       OCELOT_PORT_MODE_INTERNAL,
 };
 
 static const u32 vsc9959_ana_regmap[] = {
index ee0dbf3..d3ed0a7 100644 (file)
@@ -2473,7 +2473,7 @@ qca8k_port_mdb_del(struct dsa_switch *ds, int port,
 static int
 qca8k_port_mirror_add(struct dsa_switch *ds, int port,
                      struct dsa_mall_mirror_tc_entry *mirror,
-                     bool ingress)
+                     bool ingress, struct netlink_ext_ack *extack)
 {
        struct qca8k_priv *priv = ds->priv;
        int monitor_port, ret;
index 3358e97..b33841c 100644 (file)
@@ -2847,7 +2847,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
 
 static int sja1105_mirror_add(struct dsa_switch *ds, int port,
                              struct dsa_mall_mirror_tc_entry *mirror,
-                             bool ingress)
+                             bool ingress, struct netlink_ext_ack *extack)
 {
        return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
                                    ingress, true);
index e320ccc..21047ae 100644 (file)
@@ -405,15 +405,13 @@ static int mcf8390_init(struct net_device *dev)
 static int mcf8390_probe(struct platform_device *pdev)
 {
        struct net_device *dev;
-       struct resource *mem, *irq;
+       struct resource *mem;
        resource_size_t msize;
-       int ret;
+       int ret, irq;
 
-       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (irq == NULL) {
-               dev_err(&pdev->dev, "no IRQ specified?\n");
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
                return -ENXIO;
-       }
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (mem == NULL) {
@@ -433,7 +431,7 @@ static int mcf8390_probe(struct platform_device *pdev)
        SET_NETDEV_DEV(dev, &pdev->dev);
        platform_set_drvdata(pdev, dev);
 
-       dev->irq = irq->start;
+       dev->irq = irq;
        dev->base_addr = mem->start;
 
        ret = mcf8390_init(dev);
index 9acf589..87f40c2 100644 (file)
@@ -132,6 +132,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
 {
        struct arc_emac_mdio_bus_data *data = &priv->bus_data;
        struct device_node *np = priv->dev->of_node;
+       const char *name = "Synopsys MII Bus";
        struct mii_bus *bus;
        int error;
 
@@ -142,7 +143,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
        priv->bus = bus;
        bus->priv = priv;
        bus->parent = priv->dev;
-       bus->name = "Synopsys MII Bus";
+       bus->name = name;
        bus->read = &arc_mdio_read;
        bus->write = &arc_mdio_write;
        bus->reset = &arc_mdio_reset;
@@ -167,7 +168,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
        if (error) {
                mdiobus_free(bus);
                return dev_err_probe(priv->dev, error,
-                                    "cannot register MDIO bus %s\n", bus->name);
+                                    "cannot register MDIO bus %s\n", name);
        }
 
        return 0;
index 4ad3fc7..a89b93c 100644 (file)
@@ -1181,8 +1181,11 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
        alx->hw.mtu = mtu;
        alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
        netdev_update_features(netdev);
-       if (netif_running(netdev))
+       if (netif_running(netdev)) {
+               mutex_lock(&alx->mtx);
                alx_reinit(alx);
+               mutex_unlock(&alx->mtx);
+       }
        return 0;
 }
 
index f50604f..4945939 100644 (file)
@@ -1051,7 +1051,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
         * each ring/block may need up to 8 bytes for alignment, hence the
         * additional bytes tacked onto the end.
         */
-       ring_header->size = size =
+       ring_header->size =
                sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc +
                sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc +
                sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc +
index 447a75e..dd5945c 100644 (file)
@@ -2533,6 +2533,4 @@ void bnx2x_register_phc(struct bnx2x *bp);
  * Meant for implicit re-load flows.
  */
 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
-int bnx2x_init_firmware(struct bnx2x *bp);
-void bnx2x_release_firmware(struct bnx2x *bp);
 #endif /* bnx2x.h */
index 8d36ebb..5729a5a 100644 (file)
@@ -2364,24 +2364,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
        /* is another pf loaded on this engine? */
        if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
            load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
-               /* build my FW version dword */
-               u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) +
-                               (bp->fw_rev << 16) + (bp->fw_eng << 24);
+               u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
+               u32 loaded_fw;
 
                /* read loaded FW from chip */
-               u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+               loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
 
-               DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
-                  loaded_fw, my_fw);
+               loaded_fw_major = loaded_fw & 0xff;
+               loaded_fw_minor = (loaded_fw >> 8) & 0xff;
+               loaded_fw_rev = (loaded_fw >> 16) & 0xff;
+               loaded_fw_eng = (loaded_fw >> 24) & 0xff;
+
+               DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
+                  loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
 
                /* abort nic load if version mismatch */
-               if (my_fw != loaded_fw) {
+               if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
+                   loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
+                   loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
+                   loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
                        if (print_err)
-                               BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
-                                         loaded_fw, my_fw);
+                               BNX2X_ERR("loaded FW incompatible. Aborting\n");
                        else
-                               BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
-                                              loaded_fw, my_fw);
+                               BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
+
                        return -EBUSY;
                }
        }
index 4e85e7d..7071604 100644 (file)
@@ -6178,7 +6178,8 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
                return -EINVAL;
        }
 
-       ret = scnprintf(str, *len, "%hx.%hx", num >> 16, num);
+       ret = scnprintf(str, *len, "%x.%x", (num >> 16) & 0xFFFF,
+                       num & 0xFFFF);
        *len -= ret;
        return 0;
 }
@@ -6193,7 +6194,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
                return -EINVAL;
        }
 
-       ret = scnprintf(str, *len, "%hhx.%hhx.%hhx", num >> 16, num >> 8, num);
+       ret = scnprintf(str, *len, "%x.%x.%x", (num >> 16) & 0xFF,
+                       (num >> 8) & 0xFF, num & 0xFF);
        *len -= ret;
        return 0;
 }
index eedb48d..c19b072 100644 (file)
@@ -12319,15 +12319,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
        bnx2x_read_fwinfo(bp);
 
-       if (IS_PF(bp)) {
-               rc = bnx2x_init_firmware(bp);
-
-               if (rc) {
-                       bnx2x_free_mem_bp(bp);
-                       return rc;
-               }
-       }
-
        func = BP_FUNC(bp);
 
        /* need to reset chip if undi was active */
@@ -12340,7 +12331,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
                rc = bnx2x_prev_unload(bp);
                if (rc) {
-                       bnx2x_release_firmware(bp);
                        bnx2x_free_mem_bp(bp);
                        return rc;
                }
@@ -13409,7 +13399,7 @@ do {                                                                    \
             (u8 *)bp->arr, len);                                       \
 } while (0)
 
-int bnx2x_init_firmware(struct bnx2x *bp)
+static int bnx2x_init_firmware(struct bnx2x *bp)
 {
        const char *fw_file_name, *fw_file_name_v15;
        struct bnx2x_fw_file_hdr *fw_hdr;
@@ -13509,7 +13499,7 @@ request_firmware_exit:
        return rc;
 }
 
-void bnx2x_release_firmware(struct bnx2x *bp)
+static void bnx2x_release_firmware(struct bnx2x *bp)
 {
        kfree(bp->init_ops_offsets);
        kfree(bp->init_ops);
@@ -14026,7 +14016,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
        return 0;
 
 init_one_freemem:
-       bnx2x_release_firmware(bp);
        bnx2x_free_mem_bp(bp);
 
 init_one_exit:
index 63b8fc4..1c28495 100644 (file)
@@ -370,7 +370,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
        i = skb_get_queue_mapping(skb);
        if (unlikely(i >= bp->tx_nr_rings)) {
                dev_kfree_skb_any(skb);
-               atomic_long_inc(&dev->tx_dropped);
+               dev_core_stats_tx_dropped_inc(dev);
                return NETDEV_TX_OK;
        }
 
@@ -646,7 +646,7 @@ tx_kick_pending:
        if (txr->kick_pending)
                bnxt_txr_db_kick(bp, txr, txr->tx_prod);
        txr->tx_buf_ring[txr->tx_prod].skb = NULL;
-       atomic_long_inc(&dev->tx_dropped);
+       dev_core_stats_tx_dropped_inc(dev);
        return NETDEV_TX_OK;
 }
 
@@ -2061,22 +2061,6 @@ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
        case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
                netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
                break;
-       case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: {
-               struct bnxt_hw_health *hw_health = &bp->hw_health;
-
-               hw_health->nvm_err_address = EVENT_DATA2_NVM_ERR_ADDR(data2);
-               if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1)) {
-                       hw_health->synd = BNXT_HW_STATUS_NVM_WRITE_ERR;
-                       hw_health->nvm_write_errors++;
-               } else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1)) {
-                       hw_health->synd = BNXT_HW_STATUS_NVM_ERASE_ERR;
-                       hw_health->nvm_erase_errors++;
-               } else {
-                       hw_health->synd = BNXT_HW_STATUS_NVM_UNKNOWN_ERR;
-               }
-               set_bit(BNXT_FW_NVM_ERR_SP_EVENT, &bp->sp_event);
-               break;
-       }
        default:
                netdev_err(bp->dev, "FW reported unknown error type %u\n",
                           err_type);
@@ -11903,9 +11887,6 @@ static void bnxt_sp_task(struct work_struct *work)
        if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
                bnxt_fw_echo_reply(bp);
 
-       if (test_and_clear_bit(BNXT_FW_NVM_ERR_SP_EVENT, &bp->sp_event))
-               bnxt_devlink_health_hw_report(bp);
-
        /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
         * must be the last functions to be called before exiting.
         */
@@ -13489,7 +13470,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 #ifdef CONFIG_BNXT_SRIOV
        init_waitqueue_head(&bp->sriov_cfg_wait);
-       mutex_init(&bp->sriov_lock);
 #endif
        if (BNXT_SUPPORTS_TPA(bp)) {
                bp->gro_func = bnxt_gro_func_5730x;
index 9dd878d..61aa3e8 100644 (file)
@@ -516,21 +516,6 @@ struct rx_tpa_end_cmp_ext {
          ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\
         ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT)
 
-#define EVENT_DATA2_NVM_ERR_ADDR(data2)                                        \
-       (((data2) &                                                     \
-         ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK) >>\
-        ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT)
-
-#define EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1)                          \
-       (((data1) &                                                     \
-         ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK) ==\
-        ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE)
-
-#define EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1)                          \
-       (((data1) &                                                     \
-         ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK) ==\
-        ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE)
-
 struct nqe_cn {
        __le16  type;
        #define NQ_CN_TYPE_MASK           0x3fUL
@@ -1543,33 +1528,6 @@ struct bnxt_ctx_mem_info {
        struct bnxt_mem_init    mem_init[BNXT_CTX_MEM_INIT_MAX];
 };
 
-enum bnxt_hw_err {
-       BNXT_HW_STATUS_HEALTHY                  = 0x0,
-       BNXT_HW_STATUS_NVM_WRITE_ERR            = 0x1,
-       BNXT_HW_STATUS_NVM_ERASE_ERR            = 0x2,
-       BNXT_HW_STATUS_NVM_UNKNOWN_ERR          = 0x3,
-       BNXT_HW_STATUS_NVM_TEST_VPD_ENT_ERR     = 0x4,
-       BNXT_HW_STATUS_NVM_TEST_VPD_READ_ERR    = 0x5,
-       BNXT_HW_STATUS_NVM_TEST_VPD_WRITE_ERR   = 0x6,
-       BNXT_HW_STATUS_NVM_TEST_INCMPL_ERR      = 0x7,
-};
-
-struct bnxt_hw_health {
-       u32 nvm_err_address;
-       u32 nvm_write_errors;
-       u32 nvm_erase_errors;
-       u32 nvm_test_vpd_ent_errors;
-       u32 nvm_test_vpd_read_errors;
-       u32 nvm_test_vpd_write_errors;
-       u32 nvm_test_incmpl_errors;
-       u8 synd;
-       /* max a test in a day if previous test was successful */
-#define HW_RETEST_MIN_TIME     (1000 * 3600 * 24)
-       u8 nvm_test_result;
-       unsigned long nvm_test_timestamp;
-       struct devlink_health_reporter *hw_reporter;
-};
-
 enum bnxt_health_severity {
        SEVERITY_NORMAL = 0,
        SEVERITY_WARNING,
@@ -2087,7 +2045,6 @@ struct bnxt {
 #define BNXT_FW_EXCEPTION_SP_EVENT     19
 #define BNXT_LINK_CFG_CHANGE_SP_EVENT  21
 #define BNXT_FW_ECHO_REQUEST_SP_EVENT  23
-#define BNXT_FW_NVM_ERR_SP_EVENT       25
 
        struct delayed_work     fw_reset_task;
        int                     fw_reset_state;
@@ -2115,12 +2072,6 @@ struct bnxt {
        wait_queue_head_t       sriov_cfg_wait;
        bool                    sriov_cfg;
 #define BNXT_SRIOV_CFG_WAIT_TMO        msecs_to_jiffies(10000)
-
-       /* lock to protect VF-rep creation/cleanup via
-        * multiple paths such as ->sriov_configure() and
-        * devlink ->eswitch_mode_set()
-        */
-       struct mutex            sriov_lock;
 #endif
 
 #if BITS_PER_LONG == 32
@@ -2188,8 +2139,6 @@ struct bnxt {
        struct dentry           *debugfs_pdev;
        struct device           *hwmon_dev;
        enum board_idx          board_idx;
-
-       struct bnxt_hw_health   hw_health;
 };
 
 #define BNXT_NUM_RX_RING_STATS                 8
index 77e5510..0c17f90 100644 (file)
@@ -20,7 +20,6 @@
 #include "bnxt_ulp.h"
 #include "bnxt_ptp.h"
 #include "bnxt_coredump.h"
-#include "bnxt_nvm_defs.h"     /* NVRAM content constant and structure defs */
 
 static void __bnxt_fw_recover(struct bnxt *bp)
 {
@@ -242,148 +241,6 @@ static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
        .recover = bnxt_fw_recover,
 };
 
-static int bnxt_hw_recover(struct devlink_health_reporter *reporter,
-                          void *priv_ctx,
-                          struct netlink_ext_ack *extack)
-{
-       struct bnxt *bp = devlink_health_reporter_priv(reporter);
-       struct bnxt_hw_health *hw_health = &bp->hw_health;
-
-       hw_health->synd = BNXT_HW_STATUS_HEALTHY;
-       return 0;
-}
-
-static const char *hw_err_str(u8 synd)
-{
-       switch (synd) {
-       case BNXT_HW_STATUS_HEALTHY:
-               return "healthy";
-       case BNXT_HW_STATUS_NVM_WRITE_ERR:
-               return "nvm write error";
-       case BNXT_HW_STATUS_NVM_ERASE_ERR:
-               return "nvm erase error";
-       case BNXT_HW_STATUS_NVM_UNKNOWN_ERR:
-               return "unrecognized nvm error";
-       case BNXT_HW_STATUS_NVM_TEST_VPD_ENT_ERR:
-               return "nvm test vpd entry error";
-       case BNXT_HW_STATUS_NVM_TEST_VPD_READ_ERR:
-               return "nvm test vpd read error";
-       case BNXT_HW_STATUS_NVM_TEST_VPD_WRITE_ERR:
-               return "nvm test vpd write error";
-       case BNXT_HW_STATUS_NVM_TEST_INCMPL_ERR:
-               return "nvm test incomplete error";
-       default:
-               return "unknown hw error";
-       }
-}
-
-static void bnxt_nvm_test(struct bnxt *bp)
-{
-       struct bnxt_hw_health *h = &bp->hw_health;
-       u32 datalen;
-       u16 index;
-       u8 *buf;
-
-       if (!h->nvm_test_result) {
-               if (!h->nvm_test_timestamp ||
-                   time_after(jiffies, h->nvm_test_timestamp +
-                                       msecs_to_jiffies(HW_RETEST_MIN_TIME)))
-                       h->nvm_test_timestamp = jiffies;
-               else
-                       return;
-       }
-
-       if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD,
-                                BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
-                                &index, NULL, &datalen) || !datalen) {
-               h->nvm_test_result = BNXT_HW_STATUS_NVM_TEST_VPD_ENT_ERR;
-               h->nvm_test_vpd_ent_errors++;
-               return;
-       }
-
-       buf = kzalloc(datalen, GFP_KERNEL);
-       if (!buf) {
-               h->nvm_test_result = BNXT_HW_STATUS_NVM_TEST_INCMPL_ERR;
-               h->nvm_test_incmpl_errors++;
-               return;
-       }
-
-       if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
-               h->nvm_test_result = BNXT_HW_STATUS_NVM_TEST_VPD_READ_ERR;
-               h->nvm_test_vpd_read_errors++;
-               goto err;
-       }
-
-       if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
-                            BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
-               h->nvm_test_result = BNXT_HW_STATUS_NVM_TEST_VPD_WRITE_ERR;
-               h->nvm_test_vpd_write_errors++;
-       }
-
-err:
-       kfree(buf);
-}
-
-static int bnxt_hw_diagnose(struct devlink_health_reporter *reporter,
-                           struct devlink_fmsg *fmsg,
-                           struct netlink_ext_ack *extack)
-{
-       struct bnxt *bp = devlink_health_reporter_priv(reporter);
-       struct bnxt_hw_health *h = &bp->hw_health;
-       u8 synd = h->synd;
-       int rc;
-
-       bnxt_nvm_test(bp);
-       if (h->nvm_test_result) {
-               synd = h->nvm_test_result;
-               devlink_health_report(h->hw_reporter, hw_err_str(synd), NULL);
-       }
-
-       rc = devlink_fmsg_string_pair_put(fmsg, "Status", hw_err_str(synd));
-       if (rc)
-               return rc;
-       rc = devlink_fmsg_u32_pair_put(fmsg, "nvm_write_errors", h->nvm_write_errors);
-       if (rc)
-               return rc;
-       rc = devlink_fmsg_u32_pair_put(fmsg, "nvm_erase_errors", h->nvm_erase_errors);
-       if (rc)
-               return rc;
-       rc = devlink_fmsg_u32_pair_put(fmsg, "nvm_test_vpd_ent_errors",
-                                      h->nvm_test_vpd_ent_errors);
-       if (rc)
-               return rc;
-       rc = devlink_fmsg_u32_pair_put(fmsg, "nvm_test_vpd_read_errors",
-                                      h->nvm_test_vpd_read_errors);
-       if (rc)
-               return rc;
-       rc = devlink_fmsg_u32_pair_put(fmsg, "nvm_test_vpd_write_errors",
-                                      h->nvm_test_vpd_write_errors);
-       if (rc)
-               return rc;
-       rc = devlink_fmsg_u32_pair_put(fmsg, "nvm_test_incomplete_errors",
-                                      h->nvm_test_incmpl_errors);
-       if (rc)
-               return rc;
-
-       return 0;
-}
-
-void bnxt_devlink_health_hw_report(struct bnxt *bp)
-{
-       struct bnxt_hw_health *hw_health = &bp->hw_health;
-
-       netdev_warn(bp->dev, "%s reported at address 0x%x\n", hw_err_str(hw_health->synd),
-                   hw_health->nvm_err_address);
-
-       devlink_health_report(hw_health->hw_reporter, hw_err_str(hw_health->synd), NULL);
-}
-
-static const struct devlink_health_reporter_ops bnxt_dl_hw_reporter_ops = {
-       .name = "hw",
-       .diagnose = bnxt_hw_diagnose,
-       .recover = bnxt_hw_recover,
-};
-
 static struct devlink_health_reporter *
 __bnxt_dl_reporter_create(struct bnxt *bp,
                          const struct devlink_health_reporter_ops *ops)
@@ -403,10 +260,6 @@ __bnxt_dl_reporter_create(struct bnxt *bp,
 void bnxt_dl_fw_reporters_create(struct bnxt *bp)
 {
        struct bnxt_fw_health *fw_health = bp->fw_health;
-       struct bnxt_hw_health *hw_health = &bp->hw_health;
-
-       if (!hw_health->hw_reporter)
-               hw_health->hw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_hw_reporter_ops);
 
        if (fw_health && !fw_health->fw_reporter)
                fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops);
@@ -415,12 +268,6 @@ void bnxt_dl_fw_reporters_create(struct bnxt *bp)
 void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
 {
        struct bnxt_fw_health *fw_health = bp->fw_health;
-       struct bnxt_hw_health *hw_health = &bp->hw_health;
-
-       if (hw_health->hw_reporter) {
-               devlink_health_reporter_destroy(hw_health->hw_reporter);
-               hw_health->hw_reporter = NULL;
-       }
 
        if (fw_health && fw_health->fw_reporter) {
                devlink_health_reporter_destroy(fw_health->fw_reporter);
index 056962e..b810506 100644 (file)
@@ -74,7 +74,6 @@ enum bnxt_dl_version_type {
 void bnxt_devlink_health_fw_report(struct bnxt *bp);
 void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
 void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
-void bnxt_devlink_health_hw_report(struct bnxt *bp);
 void bnxt_dl_fw_reporters_create(struct bnxt *bp);
 void bnxt_dl_fw_reporters_destroy(struct bnxt *bp);
 int bnxt_dl_register(struct bnxt *bp);
index 1780747..22e965e 100644 (file)
@@ -2168,10 +2168,14 @@ static void bnxt_print_admin_err(struct bnxt *bp)
        netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
 }
 
-int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
-                    u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
-                    u32 dir_item_len, const u8 *data,
-                    size_t data_len)
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+                               u16 ext, u16 *index, u32 *item_length,
+                               u32 *data_length);
+
+static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+                           u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+                           u32 dir_item_len, const u8 *data,
+                           size_t data_len)
 {
        struct bnxt *bp = netdev_priv(dev);
        struct hwrm_nvm_write_input *req;
@@ -2495,48 +2499,6 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
        return rc;
 }
 
-static int nvm_update_err_to_stderr(struct net_device *dev, u8 result)
-{
-       switch (result) {
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
-               netdev_err(dev, "PKG install error : Data integrity on NVM\n");
-               return -EINVAL;
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
-               netdev_err(dev, "PKG install error : Invalid package\n");
-               return -ENOPKG;
-       case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
-               netdev_err(dev, "PKG install error : Authentication error\n");
-               return -EPERM;
-       case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
-       case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
-               netdev_err(dev, "PKG install error : Invalid device\n");
-               return -EOPNOTSUPP;
-       default:
-               netdev_err(dev, "PKG install error : Internal error\n");
-               return -EIO;
-       }
-}
-
 #define BNXT_PKG_DMA_SIZE      0x40000
 #define BNXT_NVM_MORE_FLAG     (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
 #define BNXT_NVM_LAST_FLAG     (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
@@ -2691,7 +2653,7 @@ pkg_abort:
        if (resp->result) {
                netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
                           (s8)resp->result, (int)resp->problem_item);
-               rc = nvm_update_err_to_stderr(dev, resp->result);
+               rc = -ENOPKG;
        }
        if (rc == -EACCES)
                bnxt_print_admin_err(bp);
@@ -2815,8 +2777,8 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
        return rc;
 }
 
-int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
-                       u32 length, u8 *data)
+static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+                              u32 length, u8 *data)
 {
        struct bnxt *bp = netdev_priv(dev);
        int rc;
@@ -2850,9 +2812,9 @@ int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
        return rc;
 }
 
-int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
-                        u16 ext, u16 *index, u32 *item_length,
-                        u32 *data_length)
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+                               u16 ext, u16 *index, u32 *item_length,
+                               u32 *data_length)
 {
        struct hwrm_nvm_find_dir_entry_output *output;
        struct hwrm_nvm_find_dir_entry_input *req;
index 2593e00..6aa4484 100644 (file)
@@ -56,13 +56,6 @@ int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
                                   u32 install_type);
 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
-int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext,
-                        u16 *index, u32 *item_length, u32 *data_length);
-int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
-                       u32 length, u8 *data);
-int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, u16 dir_ordinal,
-                    u16 dir_ext, u16 dir_attr, u32 dir_item_len,
-                    const u8 *data, size_t data_len);
 void bnxt_ethtool_init(struct bnxt *bp);
 void bnxt_ethtool_free(struct bnxt *bp);
 
index 1d177fe..ddf2f39 100644 (file)
@@ -846,7 +846,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
                return;
 
        /* synchronize VF and VF-rep create and destroy */
-       mutex_lock(&bp->sriov_lock);
+       devl_lock(bp->dl);
        bnxt_vf_reps_destroy(bp);
 
        if (pci_vfs_assigned(bp->pdev)) {
@@ -859,7 +859,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
                /* Free the HW resources reserved for various VF's */
                bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
        }
-       mutex_unlock(&bp->sriov_lock);
+       devl_unlock(bp->dl);
 
        bnxt_free_vf_resources(bp);
 
index 8eb28e0..eb4803b 100644 (file)
@@ -559,44 +559,34 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
                             struct netlink_ext_ack *extack)
 {
        struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
-       int rc = 0;
 
-       mutex_lock(&bp->sriov_lock);
        if (bp->eswitch_mode == mode) {
                netdev_info(bp->dev, "already in %s eswitch mode\n",
                            mode == DEVLINK_ESWITCH_MODE_LEGACY ?
                            "legacy" : "switchdev");
-               rc = -EINVAL;
-               goto done;
+               return -EINVAL;
        }
 
        switch (mode) {
        case DEVLINK_ESWITCH_MODE_LEGACY:
                bnxt_vf_reps_destroy(bp);
-               break;
+               return 0;
 
        case DEVLINK_ESWITCH_MODE_SWITCHDEV:
                if (bp->hwrm_spec_code < 0x10803) {
                        netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
-                       rc = -ENOTSUPP;
-                       goto done;
+                       return -ENOTSUPP;
                }
 
                if (pci_num_vf(bp->pdev) == 0) {
                        netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
-                       rc = -EPERM;
-                       goto done;
+                       return -EPERM;
                }
-               rc = bnxt_vf_reps_create(bp);
-               break;
+               return bnxt_vf_reps_create(bp);
 
        default:
-               rc = -EINVAL;
-               goto done;
+               return -EINVAL;
        }
-done:
-       mutex_unlock(&bp->sriov_lock);
-       return rc;
 }
 
 #endif
index cfe0911..9a41145 100644 (file)
@@ -2287,8 +2287,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
                dma_length_status = status->length_status;
                if (dev->features & NETIF_F_RXCSUM) {
                        rx_csum = (__force __be16)(status->rx_csum & 0xffff);
-                       skb->csum = (__force __wsum)ntohs(rx_csum);
-                       skb->ip_summed = CHECKSUM_COMPLETE;
+                       if (rx_csum) {
+                               skb->csum = (__force __wsum)ntohs(rx_csum);
+                               skb->ip_summed = CHECKSUM_COMPLETE;
+                       }
                }
 
                /* DMA flags and length are still valid no matter how
index e31a5a3..f55d9d9 100644 (file)
 void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+
+       if (!device_can_wakeup(kdev)) {
+               wol->supported = 0;
+               wol->wolopts = 0;
+               return;
+       }
 
        wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
        wol->wolopts = priv->wolopts;
index 4c23115..800d5ce 100644 (file)
@@ -1575,7 +1575,14 @@ static int macb_poll(struct napi_struct *napi, int budget)
        if (work_done < budget) {
                napi_complete_done(napi, work_done);
 
-               /* Packets received while interrupts were disabled */
+               /* RSR bits only seem to propagate to raise interrupts when
+                * interrupts are enabled at the time, so if bits are already
+                * set due to packets received while interrupts were disabled,
+                * they will not cause another interrupt to be generated when
+                * interrupts are re-enabled.
+                * Check for this case here. This has been seen to happen
+                * around 30% of the time under heavy network load.
+                */
                status = macb_readl(bp, RSR);
                if (status) {
                        if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1583,6 +1590,22 @@ static int macb_poll(struct napi_struct *napi, int budget)
                        napi_reschedule(napi);
                } else {
                        queue_writel(queue, IER, bp->rx_intr_mask);
+
+                       /* In rare cases, packets could have been received in
+                        * the window between the check above and re-enabling
+                        * interrupts. Therefore, a double-check is required
+                        * to avoid losing a wakeup. This can potentially race
+                        * with the interrupt handler doing the same actions
+                        * if an interrupt is raised just after enabling them,
+                        * but this should be harmless.
+                        */
+                       status = macb_readl(bp, RSR);
+                       if (unlikely(status)) {
+                               queue_writel(queue, IDR, bp->rx_intr_mask);
+                               if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                                       queue_writel(queue, ISR, MACB_BIT(RCOMP));
+                               napi_schedule(napi);
+                       }
                }
        }
 
index 6352131..174b1e1 100644 (file)
@@ -3349,6 +3349,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        if (!adapter->registered_device_map) {
                dev_err(&pdev->dev, "could not register any net devices\n");
+               err = -ENODEV;
                goto out_free_dev;
        }
 
index 3233408..69dbf95 100644 (file)
@@ -608,7 +608,6 @@ static s32 nps_enet_probe(struct platform_device *pdev)
        /* Get IRQ number */
        priv->irq = platform_get_irq(pdev, 0);
        if (priv->irq < 0) {
-               dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
                err = -ENODEV;
                goto out_netdev;
        }
index 939fa9d..4b04725 100644 (file)
@@ -2077,8 +2077,10 @@ static int dpaa2_eth_open(struct net_device *net_dev)
                goto enable_err;
        }
 
-       if (dpaa2_eth_is_type_phy(priv))
+       if (dpaa2_eth_is_type_phy(priv)) {
+               dpaa2_mac_start(priv->mac);
                phylink_start(priv->mac->phylink);
+       }
 
        return 0;
 
@@ -2153,6 +2155,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
 
        if (dpaa2_eth_is_type_phy(priv)) {
                phylink_stop(priv->mac->phylink);
+               dpaa2_mac_stop(priv->mac);
        } else {
                netif_tx_stop_all_queues(net_dev);
                netif_carrier_off(net_dev);
index 521f036..c48811d 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/acpi.h>
 #include <linux/pcs-lynx.h>
+#include <linux/phy/phy.h>
 #include <linux/property.h>
 
 #include "dpaa2-eth.h"
 #define phylink_to_dpaa2_mac(config) \
        container_of((config), struct dpaa2_mac, phylink_config)
 
+#define DPMAC_PROTOCOL_CHANGE_VER_MAJOR                4
+#define DPMAC_PROTOCOL_CHANGE_VER_MINOR                8
+
+#define DPAA2_MAC_FEATURE_PROTOCOL_CHANGE      BIT(0)
+
+static int dpaa2_mac_cmp_ver(struct dpaa2_mac *mac,
+                            u16 ver_major, u16 ver_minor)
+{
+       if (mac->ver_major == ver_major)
+               return mac->ver_minor - ver_minor;
+       return mac->ver_major - ver_major;
+}
+
+static void dpaa2_mac_detect_features(struct dpaa2_mac *mac)
+{
+       mac->features = 0;
+
+       if (dpaa2_mac_cmp_ver(mac, DPMAC_PROTOCOL_CHANGE_VER_MAJOR,
+                             DPMAC_PROTOCOL_CHANGE_VER_MINOR) >= 0)
+               mac->features |= DPAA2_MAC_FEATURE_PROTOCOL_CHANGE;
+}
+
 static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
 {
        *if_mode = PHY_INTERFACE_MODE_NA;
@@ -38,6 +61,29 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
        return 0;
 }
 
+static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode)
+{
+       switch (if_mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               return DPMAC_ETH_IF_RGMII;
+       case PHY_INTERFACE_MODE_USXGMII:
+               return DPMAC_ETH_IF_USXGMII;
+       case PHY_INTERFACE_MODE_QSGMII:
+               return DPMAC_ETH_IF_QSGMII;
+       case PHY_INTERFACE_MODE_SGMII:
+               return DPMAC_ETH_IF_SGMII;
+       case PHY_INTERFACE_MODE_10GBASER:
+               return DPMAC_ETH_IF_XFI;
+       case PHY_INTERFACE_MODE_1000BASEX:
+               return DPMAC_ETH_IF_1000BASEX;
+       default:
+               return DPMAC_ETH_IF_MII;
+       }
+}
+
 static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
                                                u16 dpmac_id)
 {
@@ -125,6 +171,19 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
        if (err)
                netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
                           __func__, err);
+
+       if (!mac->serdes_phy)
+               return;
+
+       /* This happens only if we support changing of protocol at runtime */
+       err = dpmac_set_protocol(mac->mc_io, 0, mac->mc_dev->mc_handle,
+                                dpmac_eth_if_mode(state->interface));
+       if (err)
+               netdev_err(mac->net_dev,  "dpmac_set_protocol() = %d\n", err);
+
+       err = phy_set_mode_ext(mac->serdes_phy, PHY_MODE_ETHERNET, state->interface);
+       if (err)
+               netdev_err(mac->net_dev, "phy_set_mode_ext() = %d\n", err);
 }
 
 static void dpaa2_mac_link_up(struct phylink_config *config,
@@ -235,10 +294,66 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
        }
 }
 
+static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac)
+{
+       int intf, err;
+
+       /* We support the current interface mode, and if we have a PCS
+        * similar interface modes that do not require the SerDes lane to be
+        * reconfigured.
+        */
+       __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+       if (mac->pcs) {
+               switch (mac->if_mode) {
+               case PHY_INTERFACE_MODE_1000BASEX:
+               case PHY_INTERFACE_MODE_SGMII:
+                       __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+                                 mac->phylink_config.supported_interfaces);
+                       __set_bit(PHY_INTERFACE_MODE_SGMII,
+                                 mac->phylink_config.supported_interfaces);
+                       break;
+
+               default:
+                       break;
+               }
+       }
+
+       if (!mac->serdes_phy)
+               return;
+
+       /* In case we have access to the SerDes phy/lane, then ask the SerDes
+        * driver what interfaces are supported based on the current PLL
+        * configuration.
+        */
+       for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
+               if (intf == PHY_INTERFACE_MODE_NA)
+                       continue;
+
+               err = phy_validate(mac->serdes_phy, PHY_MODE_ETHERNET, intf, NULL);
+               if (err)
+                       continue;
+
+               __set_bit(intf, mac->phylink_config.supported_interfaces);
+       }
+}
+
+void dpaa2_mac_start(struct dpaa2_mac *mac)
+{
+       if (mac->serdes_phy)
+               phy_power_on(mac->serdes_phy);
+}
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac)
+{
+       if (mac->serdes_phy)
+               phy_power_off(mac->serdes_phy);
+}
+
 int dpaa2_mac_connect(struct dpaa2_mac *mac)
 {
        struct net_device *net_dev = mac->net_dev;
        struct fwnode_handle *dpmac_node;
+       struct phy *serdes_phy = NULL;
        struct phylink *phylink;
        int err;
 
@@ -255,6 +370,20 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
                return -EINVAL;
        mac->if_mode = err;
 
+       if (mac->features & DPAA2_MAC_FEATURE_PROTOCOL_CHANGE &&
+           !phy_interface_mode_is_rgmii(mac->if_mode) &&
+           is_of_node(dpmac_node)) {
+               serdes_phy = of_phy_get(to_of_node(dpmac_node), NULL);
+
+               if (serdes_phy == ERR_PTR(-ENODEV))
+                       serdes_phy = NULL;
+               else if (IS_ERR(serdes_phy))
+                       return PTR_ERR(serdes_phy);
+               else
+                       phy_init(serdes_phy);
+       }
+       mac->serdes_phy = serdes_phy;
+
        /* The MAC does not have the capability to add RGMII delays so
         * error out if the interface mode requests them and there is no PHY
         * to act upon them
@@ -283,25 +412,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
                MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
                MAC_10000FD;
 
-       /* We support the current interface mode, and if we have a PCS
-        * similar interface modes that do not require the PLLs to be
-        * reconfigured.
-        */
-       __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
-       if (mac->pcs) {
-               switch (mac->if_mode) {
-               case PHY_INTERFACE_MODE_1000BASEX:
-               case PHY_INTERFACE_MODE_SGMII:
-                       __set_bit(PHY_INTERFACE_MODE_1000BASEX,
-                                 mac->phylink_config.supported_interfaces);
-                       __set_bit(PHY_INTERFACE_MODE_SGMII,
-                                 mac->phylink_config.supported_interfaces);
-                       break;
-
-               default:
-                       break;
-               }
-       }
+       dpaa2_mac_set_supported_interfaces(mac);
 
        phylink = phylink_create(&mac->phylink_config,
                                 dpmac_node, mac->if_mode,
@@ -336,6 +447,8 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
        phylink_disconnect_phy(mac->phylink);
        phylink_destroy(mac->phylink);
        dpaa2_pcs_destroy(mac);
+       of_phy_put(mac->serdes_phy);
+       mac->serdes_phy = NULL;
 }
 
 int dpaa2_mac_open(struct dpaa2_mac *mac)
@@ -359,6 +472,14 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
                goto err_close_dpmac;
        }
 
+       err = dpmac_get_api_version(mac->mc_io, 0, &mac->ver_major, &mac->ver_minor);
+       if (err) {
+               netdev_err(net_dev, "dpmac_get_api_version() = %d\n", err);
+               goto err_close_dpmac;
+       }
+
+       dpaa2_mac_detect_features(mac);
+
        /* Find the device node representing the MAC device and link the device
         * behind the associated netdev to it.
         */
index 1331a84..a58cab1 100644 (file)
@@ -17,6 +17,8 @@ struct dpaa2_mac {
        struct net_device *net_dev;
        struct fsl_mc_io *mc_io;
        struct dpmac_attr attr;
+       u16 ver_major, ver_minor;
+       unsigned long features;
 
        struct phylink_config phylink_config;
        struct phylink *phylink;
@@ -24,6 +26,8 @@ struct dpaa2_mac {
        enum dpmac_link_type if_link_type;
        struct phylink_pcs *pcs;
        struct fwnode_handle *fw_node;
+
+       struct phy *serdes_phy;
 };
 
 bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
@@ -43,4 +47,8 @@ void dpaa2_mac_get_strings(u8 *data);
 
 void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
 
+void dpaa2_mac_start(struct dpaa2_mac *mac);
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac);
+
 #endif /* DPAA2_MAC_H */
index 9a56107..e507e90 100644 (file)
@@ -703,8 +703,10 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
 
        dpaa2_switch_enable_ctrl_if_napi(ethsw);
 
-       if (dpaa2_switch_port_is_type_phy(port_priv))
+       if (dpaa2_switch_port_is_type_phy(port_priv)) {
+               dpaa2_mac_start(port_priv->mac);
                phylink_start(port_priv->mac->phylink);
+       }
 
        return 0;
 }
@@ -717,6 +719,7 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
 
        if (dpaa2_switch_port_is_type_phy(port_priv)) {
                phylink_stop(port_priv->mac->phylink);
+               dpaa2_mac_stop(port_priv->mac);
        } else {
                netif_tx_stop_all_queues(netdev);
                netif_carrier_off(netdev);
index a24b20f..e9ac2ec 100644 (file)
 #define DPMAC_CMDID_CLOSE              DPMAC_CMD(0x800)
 #define DPMAC_CMDID_OPEN               DPMAC_CMD(0x80c)
 
+#define DPMAC_CMDID_GET_API_VERSION    DPMAC_CMD(0xa0c)
+
 #define DPMAC_CMDID_GET_ATTR           DPMAC_CMD(0x004)
 #define DPMAC_CMDID_SET_LINK_STATE     DPMAC_CMD_V2(0x0c3)
 
 #define DPMAC_CMDID_GET_COUNTER                DPMAC_CMD(0x0c4)
 
+#define DPMAC_CMDID_SET_PROTOCOL       DPMAC_CMD(0x0c7)
+
 /* Macros for accessing command fields smaller than 1byte */
 #define DPMAC_MASK(field)        \
        GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
@@ -70,4 +74,12 @@ struct dpmac_rsp_get_counter {
        __le64 counter;
 };
 
+struct dpmac_rsp_get_api_version {
+       __le16 major;
+       __le16 minor;
+};
+
+struct dpmac_cmd_set_protocol {
+       u8 eth_if;
+};
 #endif /* _FSL_DPMAC_CMD_H */
index d5997b6..f440a4c 100644 (file)
@@ -181,3 +181,57 @@ int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 
        return 0;
 }
+
+/**
+ * dpmac_get_api_version() - Get Data Path MAC version
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path mac API
+ * @minor_ver: Minor version of data path mac API
+ *
+ * Return:  '0' on Success; Error code otherwise.
+ */
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+                         u16 *major_ver, u16 *minor_ver)
+{
+       struct dpmac_rsp_get_api_version *rsp_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+                                         cmd_flags,
+                                         0);
+
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
+       *major_ver = le16_to_cpu(rsp_params->major);
+       *minor_ver = le16_to_cpu(rsp_params->minor);
+
+       return 0;
+}
+
+/**
+ * dpmac_set_protocol() - Reconfigure the DPMAC protocol
+ * @mc_io:      Pointer to opaque I/O object
+ * @cmd_flags:  Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:      Token of DPMAC object
+ * @protocol:   New protocol for the DPMAC to be reconfigured in.
+ *
+ * Return:      '0' on Success; Error code otherwise.
+ */
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+                      enum dpmac_eth_if protocol)
+{
+       struct dpmac_cmd_set_protocol *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PROTOCOL,
+                                         cmd_flags, token);
+       cmd_params = (struct dpmac_cmd_set_protocol *)cmd.params;
+       cmd_params->eth_if = protocol;
+
+       return mc_send_command(mc_io, &cmd);
+}
index 8f7ceb7..1748881 100644 (file)
@@ -205,4 +205,9 @@ enum dpmac_counter_id {
 int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
                      enum dpmac_counter_id id, u64 *value);
 
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+                         u16 *major_ver, u16 *minor_ver);
+
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+                      enum dpmac_eth_if protocol);
 #endif /* __FSL_DPMAC_H */
index 70e6d97..1c8f5cc 100644 (file)
@@ -147,7 +147,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
        /* return all Fs if nothing was there */
        if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
                dev_dbg(&bus->dev,
-                       "Error while reading PHY%d reg at %d.%hhu\n",
+                       "Error while reading PHY%d reg at %d.%d\n",
                        phy_id, dev_addr, regnum);
                return 0xffff;
        }
index ff75626..9a2c16d 100644 (file)
@@ -1464,6 +1464,7 @@ static int gfar_get_ts_info(struct net_device *dev,
        ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
        if (ptp_node) {
                ptp_dev = of_find_device_by_node(ptp_node);
+               of_node_put(ptp_node);
                if (ptp_dev)
                        ptp = platform_get_drvdata(ptp_dev);
        }
index ef8058a..ec90da1 100644 (file)
@@ -243,7 +243,7 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
        if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
            !priv->has_a011043) {
                dev_dbg(&bus->dev,
-                       "Error while reading PHY%d reg at %d.%hhu\n",
+                       "Error while reading PHY%d reg at %d.%d\n",
                        phy_id, dev_addr, regnum);
                ret = 0xffff;
        } else {
index 2ff5138..1ecedec 100644 (file)
@@ -18,6 +18,7 @@ if NET_VENDOR_FUNGIBLE
 
 config FUN_CORE
        tristate
+       select SBITMAP
        help
          A service module offering basic common services to Fungible
          device drivers.
index 1b21ccc..9d6f214 100644 (file)
@@ -3,17 +3,16 @@
 #ifndef _FUN_KTLS_H
 #define _FUN_KTLS_H
 
-struct net_device;
-struct funeth_priv;
-
-#ifdef CONFIG_TLS_DEVICE
 #include <net/tls.h>
 
+struct funeth_priv;
+
 struct fun_ktls_tx_ctx {
        __be64 tlsid;
        u32 next_seq;
 };
 
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
 int fun_ktls_init(struct net_device *netdev);
 void fun_ktls_cleanup(struct funeth_priv *fp);
 
index c58b10c..67dd02e 100644 (file)
@@ -253,7 +253,7 @@ static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx,
        int cpu, res;
 
        cpu = cpumask_local_spread(idx, node);
-       node = local_memory_node(cpu_to_node(cpu));
+       node = cpu_to_mem(cpu);
 
        irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node);
        if (!irq)
index 46684af..ff6e292 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/tcp.h>
 #include <uapi/linux/udp.h>
 #include "funeth.h"
+#include "funeth_ktls.h"
 #include "funeth_txrx.h"
 #include "funeth_trace.h"
 #include "fun_queue.h"
@@ -75,12 +76,10 @@ static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
        return *(__be16 *)&tcp_flag_word(th);
 }
 
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
-#include "funeth_ktls.h"
-
 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
                                  unsigned int *tls_len)
 {
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
        const struct fun_ktls_tx_ctx *tls_ctx;
        u32 datalen, seq;
 
@@ -108,8 +107,10 @@ static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
                FUN_QSTAT_INC(q, tx_tls_drops);
 
        return skb;
-}
+#else
+       return NULL;
 #endif
+}
 
 /* Write as many descriptors as needed for the supplied skb starting at the
  * current producer location. The caller has made certain enough descriptors
index 7aed056..04c9f91 100644 (file)
@@ -239,7 +239,7 @@ static inline void fun_txq_wr_db(const struct funeth_txq *q)
 
 static inline int fun_irq_node(const struct fun_irq *p)
 {
-       return local_memory_node(cpu_to_node(cpumask_first(&p->affinity_mask)));
+       return cpu_to_mem(cpumask_first(&p->affinity_mask));
 }
 
 int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
index e4e98aa..021bbf3 100644 (file)
@@ -439,7 +439,7 @@ static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx)
                if (frag_size > rx->packet_buffer_size) {
                        packet_size_error = true;
                        netdev_warn(priv->dev,
-                                   "RX fragment error: packet_buffer_size=%d, frag_size=%d, droping packet.",
+                                   "RX fragment error: packet_buffer_size=%d, frag_size=%d, dropping packet.",
                                    rx->packet_buffer_size, be16_to_cpu(desc->len));
                }
                page_info = &rx->data.page_info[idx];
index d7a27c2..54faf0f 100644 (file)
@@ -887,8 +887,8 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
        p[21] = net_stats->rx_compressed;
        p[22] = net_stats->tx_compressed;
 
-       p[23] = netdev->rx_dropped.counter;
-       p[24] = netdev->tx_dropped.counter;
+       p[23] = 0; /* was netdev->rx_dropped.counter */
+       p[24] = 0; /* was netdev->tx_dropped.counter */
 
        p[25] = priv->tx_timeout_count;
 
index 089f444..1f87a8a 100644 (file)
@@ -1225,7 +1225,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
                ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
                if (ret == -EOPNOTSUPP) {
                        dev_warn(&hdev->pdev->dev,
-                                "fw %08x does't support ets tc weight cmd\n",
+                                "fw %08x doesn't support ets tc weight cmd\n",
                                 hdev->fw_version);
                        ret = 0;
                }
index 79aef68..eb41380 100644 (file)
@@ -150,7 +150,7 @@ struct rfd_struct
 #define RFD_ERR_RNR  0x02     /* status: receiver out of resources */
 #define RFD_ERR_OVR  0x01     /* DMA Overrun! */
 
-#define RFD_ERR_FTS  0x0080    /* Frame to short */
+#define RFD_ERR_FTS  0x0080    /* Frame too short */
 #define RFD_ERR_NEOP 0x0040    /* No EOP flag (for bitstuffing only) */
 #define RFD_ERR_TRUN 0x0020    /* (82596 only/SF mode) indicates truncated frame */
 #define RFD_MATCHADD 0x0002     /* status: Destinationaddress !matches IA (only 82596) */
index 0f0efee..fd07c36 100644 (file)
@@ -146,11 +146,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
                        break;
        }
        if (!(mdic & E1000_MDIC_READY)) {
-               e_dbg("MDI Read did not complete\n");
+               e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
                return -E1000_ERR_PHY;
        }
        if (mdic & E1000_MDIC_ERROR) {
-               e_dbg("MDI Error\n");
+               e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
                return -E1000_ERR_PHY;
        }
        if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
@@ -210,11 +210,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
                        break;
        }
        if (!(mdic & E1000_MDIC_READY)) {
-               e_dbg("MDI Write did not complete\n");
+               e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
                return -E1000_ERR_PHY;
        }
        if (mdic & E1000_MDIC_ERROR) {
-               e_dbg("MDI Error\n");
+               e_dbg("MDI Write PHY Red Address %d Error\n", offset);
                return -E1000_ERR_PHY;
        }
        if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
index 90fff05..be7c6f3 100644 (file)
@@ -741,10 +741,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
                vsi = pf->vsi[vf->lan_vsi_idx];
                dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
                         vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
-               dev_info(&pf->pdev->dev, "       num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
-                        vf->num_mdd_events,
-                        vf->num_invalid_msgs,
-                        vf->num_valid_msgs);
+               dev_info(&pf->pdev->dev, "       num MDD=%lld\n",
+                        vf->num_mdd_events);
        } else {
                dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
        }
index fe6dca8..3a38bf8 100644 (file)
@@ -682,10 +682,11 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
        __le16 le_sum;
 
        ret_code = i40e_calc_nvm_checksum(hw, &checksum);
-       le_sum = cpu_to_le16(checksum);
-       if (!ret_code)
+       if (!ret_code) {
+               le_sum = cpu_to_le16(checksum);
                ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
                                             1, &le_sum, true);
+       }
 
        return ret_code;
 }
index dfdb6e7..2606e8f 100644 (file)
@@ -1917,19 +1917,17 @@ sriov_configure_out:
 /***********************virtual channel routines******************/
 
 /**
- * i40e_vc_send_msg_to_vf_ex
+ * i40e_vc_send_msg_to_vf
  * @vf: pointer to the VF info
  * @v_opcode: virtual channel opcode
  * @v_retval: virtual channel return value
  * @msg: pointer to the msg buffer
  * @msglen: msg length
- * @is_quiet: true for not printing unsuccessful return values, false otherwise
  *
  * send msg to VF
  **/
-static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
-                                    u32 v_retval, u8 *msg, u16 msglen,
-                                    bool is_quiet)
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+                                 u32 v_retval, u8 *msg, u16 msglen)
 {
        struct i40e_pf *pf;
        struct i40e_hw *hw;
@@ -1944,25 +1942,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
        hw = &pf->hw;
        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 
-       /* single place to detect unsuccessful return values */
-       if (v_retval && !is_quiet) {
-               vf->num_invalid_msgs++;
-               dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
-                        vf->vf_id, v_opcode, v_retval);
-               if (vf->num_invalid_msgs >
-                   I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
-                       dev_err(&pf->pdev->dev,
-                               "Number of invalid messages exceeded for VF %d\n",
-                               vf->vf_id);
-                       dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
-                       set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
-               }
-       } else {
-               vf->num_valid_msgs++;
-               /* reset the invalid counter, if a valid message is received. */
-               vf->num_invalid_msgs = 0;
-       }
-
        aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,  v_opcode, v_retval,
                                        msg, msglen, NULL);
        if (aq_ret) {
@@ -1975,23 +1954,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
        return 0;
 }
 
-/**
- * i40e_vc_send_msg_to_vf
- * @vf: pointer to the VF info
- * @v_opcode: virtual channel opcode
- * @v_retval: virtual channel return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * send msg to VF
- **/
-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
-                                 u32 v_retval, u8 *msg, u16 msglen)
-{
-       return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
-                                        msg, msglen, false);
-}
-
 /**
  * i40e_vc_send_resp_to_vf
  * @vf: pointer to the VF info
@@ -2822,7 +2784,6 @@ error_param:
  * i40e_check_vf_permission
  * @vf: pointer to the VF info
  * @al: MAC address list from virtchnl
- * @is_quiet: set true for printing msg without opcode info, false otherwise
  *
  * Check that the given list of MAC addresses is allowed. Will return -EPERM
  * if any address in the list is not valid. Checks the following conditions:
@@ -2837,8 +2798,7 @@ error_param:
  * addresses might not be accurate.
  **/
 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
-                                          struct virtchnl_ether_addr_list *al,
-                                          bool *is_quiet)
+                                          struct virtchnl_ether_addr_list *al)
 {
        struct i40e_pf *pf = vf->pf;
        struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2846,7 +2806,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
        int mac2add_cnt = 0;
        int i;
 
-       *is_quiet = false;
        for (i = 0; i < al->num_elements; i++) {
                struct i40e_mac_filter *f;
                u8 *addr = al->list[i].addr;
@@ -2870,7 +2829,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
                    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
                        dev_err(&pf->pdev->dev,
                                "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
-                       *is_quiet = true;
                        return -EPERM;
                }
 
@@ -2921,7 +2879,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
            (struct virtchnl_ether_addr_list *)msg;
        struct i40e_pf *pf = vf->pf;
        struct i40e_vsi *vsi = NULL;
-       bool is_quiet = false;
        i40e_status ret = 0;
        int i;
 
@@ -2938,7 +2895,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
         */
        spin_lock_bh(&vsi->mac_filter_hash_lock);
 
-       ret = i40e_check_vf_permission(vf, al, &is_quiet);
+       ret = i40e_check_vf_permission(vf, al);
        if (ret) {
                spin_unlock_bh(&vsi->mac_filter_hash_lock);
                goto error_param;
@@ -2976,8 +2933,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
 
 error_param:
        /* send the response to the VF */
-       return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
-                                      ret, NULL, 0, is_quiet);
+       return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+                                     ret, NULL, 0);
 }
 
 /**
index 03c42fd..a554d0a 100644 (file)
@@ -10,8 +10,6 @@
 
 #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
 
-#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED  10
-
 #define I40E_VLAN_PRIORITY_SHIFT       13
 #define I40E_VLAN_MASK                 0xFFF
 #define I40E_PRIORITY_MASK             0xE000
@@ -92,9 +90,6 @@ struct i40e_vf {
        u8 num_queue_pairs;     /* num of qps assigned to VF vsis */
        u8 num_req_queues;      /* num of requested qps */
        u64 num_mdd_events;     /* num of mdd events detected */
-       /* num of continuous malformed or invalid msgs detected */
-       u64 num_invalid_msgs;
-       u64 num_valid_msgs;     /* num of valid msgs detected */
 
        unsigned long vf_caps;  /* vf's adv. capabilities */
        unsigned long vf_states;        /* vf's runtime states */
index bed2ebf..49aed3e 100644 (file)
@@ -291,6 +291,7 @@ struct iavf_adapter {
 #define IAVF_FLAG_REINIT_ITR_NEEDED            BIT(16)
 #define IAVF_FLAG_QUEUES_DISABLED              BIT(17)
 #define IAVF_FLAG_SETUP_NETDEV_FEATURES                BIT(18)
+#define IAVF_FLAG_REINIT_MSIX_NEEDED           BIT(20)
 /* duplicates for common code */
 #define IAVF_FLAG_DCB_ENABLED                  0
        /* flags for admin queue service task */
index 45a1e88..190590d 100644 (file)
@@ -2234,7 +2234,7 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
                        "Requested %d queues, but PF only gave us %d.\n",
                        num_req_queues,
                        adapter->vsi_res->num_queue_pairs);
-               adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+               adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
                adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
                iavf_schedule_reset(adapter);
 
@@ -2705,6 +2705,13 @@ restart_watchdog:
                queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
 }
 
+/**
+ * iavf_disable_vf - disable VF
+ * @adapter: board private structure
+ *
+ * Set communication failed flag and free all resources.
+ * NOTE: This function is expected to be called with crit_lock being held.
+ **/
 static void iavf_disable_vf(struct iavf_adapter *adapter)
 {
        struct iavf_mac_filter *f, *ftmp;
@@ -2759,7 +2766,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
        memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
        iavf_shutdown_adminq(&adapter->hw);
        adapter->netdev->flags &= ~IFF_UP;
-       mutex_unlock(&adapter->crit_lock);
        adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
        iavf_change_state(adapter, __IAVF_DOWN);
        wake_up(&adapter->down_waitqueue);
@@ -2894,7 +2900,8 @@ continue_reset:
        }
        adapter->aq_required = 0;
 
-       if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+       if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+           (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
                err = iavf_reinit_interrupt_scheme(adapter);
                if (err)
                        goto reset_err;
@@ -2966,12 +2973,13 @@ continue_reset:
                if (err)
                        goto reset_err;
 
-               if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+               if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+                   (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
                        err = iavf_request_traffic_irqs(adapter, netdev->name);
                        if (err)
                                goto reset_err;
 
-                       adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+                       adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
                }
 
                iavf_configure(adapter);
@@ -2986,6 +2994,9 @@ continue_reset:
                iavf_change_state(adapter, __IAVF_DOWN);
                wake_up(&adapter->down_waitqueue);
        }
+
+       adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+
        mutex_unlock(&adapter->client_lock);
        mutex_unlock(&adapter->crit_lock);
 
@@ -4773,6 +4784,13 @@ static void iavf_remove(struct pci_dev *pdev)
        struct iavf_hw *hw = &adapter->hw;
        int err;
 
+       /* When reboot/shutdown is in progress no need to do anything
+        * as the adapter is already REMOVE state that was set during
+        * iavf_shutdown() callback.
+        */
+       if (adapter->state == __IAVF_REMOVE)
+               return;
+
        set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
        /* Wait until port initialization is complete.
         * There are flows where register/unregister netdev may race.
index c6f52f8..782450d 100644 (file)
@@ -1828,6 +1828,22 @@ int iavf_request_reset(struct iavf_adapter *adapter)
        return err;
 }
 
+/**
+ * iavf_netdev_features_vlan_strip_set - update vlan strip status
+ * @netdev: ptr to netdev being adjusted
+ * @enable: enable or disable vlan strip
+ *
+ * Helper function to change vlan strip status in netdev->features.
+ */
+static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
+                                               const bool enable)
+{
+       if (enable)
+               netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+       else
+               netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+}
+
 /**
  * iavf_virtchnl_completion
  * @adapter: adapter structure
@@ -2051,8 +2067,18 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                        }
                        break;
                case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+                       dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+                       /* Vlan stripping could not be enabled by ethtool.
+                        * Disable it in netdev->features.
+                        */
+                       iavf_netdev_features_vlan_strip_set(netdev, false);
+                       break;
                case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
                        dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+                       /* Vlan stripping could not be disabled by ethtool.
+                        * Enable it in netdev->features.
+                        */
+                       iavf_netdev_features_vlan_strip_set(netdev, true);
                        break;
                default:
                        dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
@@ -2306,6 +2332,20 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                spin_unlock_bh(&adapter->adv_rss_lock);
                }
                break;
+       case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+               /* PF enabled vlan strip on this VF.
+                * Update netdev->features if needed to be in sync with ethtool.
+                */
+               if (!v_retval)
+                       iavf_netdev_features_vlan_strip_set(netdev, true);
+               break;
+       case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+               /* PF disabled vlan strip on this VF.
+                * Update netdev->features if needed to be in sync with ethtool.
+                */
+               if (!v_retval)
+                       iavf_netdev_features_vlan_strip_set(netdev, false);
+               break;
        default:
                if (adapter->current_op && (v_opcode != adapter->current_op))
                        dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
index 44b8464..9183d48 100644 (file)
@@ -34,11 +34,13 @@ ice-y := ice_main.o \
         ice_repr.o     \
         ice_tc_lib.o
 ice-$(CONFIG_PCI_IOV) +=       \
+       ice_sriov.o             \
+       ice_virtchnl.o          \
        ice_virtchnl_allowlist.o \
        ice_virtchnl_fdir.o     \
-       ice_sriov.o             \
+       ice_vf_mbx.o            \
        ice_vf_vsi_vlan_ops.o   \
-       ice_virtchnl_pf.o
+       ice_vf_lib.o
 ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
 ice-$(CONFIG_TTY) += ice_gnss.o
 ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
index dc42ff9..e9aa1fb 100644 (file)
@@ -51,9 +51,7 @@
 #include <net/gre.h>
 #include <net/udp_tunnel.h>
 #include <net/vxlan.h>
-#if IS_ENABLED(CONFIG_DCB)
-#include <scsi/iscsi_proto.h>
-#endif /* CONFIG_DCB */
+#include <net/gtp.h>
 #include "ice_devids.h"
 #include "ice_type.h"
 #include "ice_txrx.h"
@@ -63,8 +61,8 @@
 #include "ice_flow.h"
 #include "ice_sched.h"
 #include "ice_idc_int.h"
-#include "ice_virtchnl_pf.h"
 #include "ice_sriov.h"
+#include "ice_vf_mbx.h"
 #include "ice_ptp.h"
 #include "ice_fdir.h"
 #include "ice_xsk.h"
@@ -487,6 +485,7 @@ enum ice_pf_flags {
        ICE_FLAG_VF_VLAN_PRUNING,
        ICE_FLAG_LINK_LENIENT_MODE_ENA,
        ICE_FLAG_PLUG_AUX_DEV,
+       ICE_FLAG_MTU_CHANGED,
        ICE_FLAG_GNSS,                  /* GNSS successfully initialized */
        ICE_PF_FLAGS_NBITS              /* must be last */
 };
@@ -833,6 +832,9 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf);
 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
 void ice_update_vsi_stats(struct ice_vsi *vsi);
 void ice_update_pf_stats(struct ice_pf *pf);
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+                            struct ice_q_stats stats, u64 *pkts, u64 *bytes);
 int ice_up(struct ice_vsi *vsi);
 int ice_down(struct ice_vsi *vsi);
 int ice_vsi_cfg(struct ice_vsi *vsi);
@@ -896,7 +898,16 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
  */
 static inline void ice_clear_rdma_cap(struct ice_pf *pf)
 {
-       ice_unplug_aux_dev(pf);
+       /* We can directly unplug aux device here only if the flag bit
+        * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
+        * could race with ice_plug_aux_dev() called from
+        * ice_service_task(). In this case we only clear that bit now and
+        * aux device will be unplugged later once ice_plug_aux_device()
+        * called from ice_service_task() finishes (see ice_service_task()).
+        */
+       if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+               ice_unplug_aux_dev(pf);
+
        clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
 }
 #endif /* _ICE_H_ */
index 80ed76f..9669ad9 100644 (file)
@@ -3,6 +3,9 @@
 
 #ifndef _ICE_ARFS_H_
 #define _ICE_ARFS_H_
+
+#include "ice_fdir.h"
+
 enum ice_arfs_fltr_state {
        ICE_ARFS_INACTIVE,
        ICE_ARFS_ACTIVE,
index a309447..136d791 100644 (file)
@@ -5,7 +5,7 @@
 #include "ice_base.h"
 #include "ice_lib.h"
 #include "ice_dcb_lib.h"
-#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
 
 static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
 {
index 1efe6b2..872ea7d 100644 (file)
@@ -6,12 +6,12 @@
 
 #include <linux/bitfield.h>
 
-#include "ice.h"
 #include "ice_type.h"
 #include "ice_nvm.h"
 #include "ice_flex_pipe.h"
-#include "ice_switch.h"
 #include <linux/avf/virtchnl.h>
+#include "ice_switch.h"
+#include "ice_fdir.h"
 
 #define ICE_SQ_SEND_DELAY_TIME_MS      10
 #define ICE_SQ_SEND_MAX_EXECUTE                3
index d73348f..6abf28a 100644 (file)
@@ -5,6 +5,7 @@
 #define _ICE_DCB_H_
 
 #include "ice_type.h"
+#include <scsi/iscsi_proto.h>
 
 #define ICE_DCBX_STATUS_NOT_STARTED    0
 #define ICE_DCBX_STATUS_IN_PROGRESS    1
index 3996258..24cda7e 100644 (file)
@@ -2311,7 +2311,7 @@ ice_set_link_ksettings(struct net_device *netdev,
        if (err)
                goto done;
 
-       curr_link_speed = pi->phy.link_info.link_speed;
+       curr_link_speed = pi->phy.curr_user_speed_req;
        adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
 
        /* If speed didn't get set, set it to what it currently is.
index 38fe0a7..c73cdab 100644 (file)
@@ -4,6 +4,7 @@
 #include "ice_common.h"
 #include "ice_flex_pipe.h"
 #include "ice_flow.h"
+#include "ice.h"
 
 /* For supporting double VLAN mode, it is necessary to enable or disable certain
  * boost tcam entries. The metadata labels names that match the following
@@ -1804,16 +1805,43 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
        return bld;
 }
 
+static bool ice_is_gtp_u_profile(u16 prof_idx)
+{
+       return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
+               prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
+              prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
+}
+
+static bool ice_is_gtp_c_profile(u16 prof_idx)
+{
+       switch (prof_idx) {
+       case ICE_PROFID_IPV4_GTPC_TEID:
+       case ICE_PROFID_IPV4_GTPC_NO_TEID:
+       case ICE_PROFID_IPV6_GTPC_TEID:
+       case ICE_PROFID_IPV6_GTPC_NO_TEID:
+               return true;
+       default:
+               return false;
+       }
+}
+
 /**
  * ice_get_sw_prof_type - determine switch profile type
  * @hw: pointer to the HW structure
  * @fv: pointer to the switch field vector
+ * @prof_idx: profile index to check
  */
 static enum ice_prof_type
-ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
 {
        u16 i;
 
+       if (ice_is_gtp_c_profile(prof_idx))
+               return ICE_PROF_TUN_GTPC;
+
+       if (ice_is_gtp_u_profile(prof_idx))
+               return ICE_PROF_TUN_GTPU;
+
        for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
                /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
                if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
@@ -1860,7 +1888,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
 
                if (fv) {
                        /* Determine field vector type */
-                       prof_type = ice_get_sw_prof_type(hw, fv);
+                       prof_type = ice_get_sw_prof_type(hw, fv, offset);
 
                        if (req_profs & prof_type)
                                set_bit((u16)offset, bm);
@@ -1871,20 +1899,19 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
 /**
  * ice_get_sw_fv_list
  * @hw: pointer to the HW structure
- * @prot_ids: field vector to search for with a given protocol ID
- * @ids_cnt: lookup/protocol count
+ * @lkups: list of protocol types
  * @bm: bitmap of field vectors to consider
  * @fv_list: Head of a list
  *
  * Finds all the field vector entries from switch block that contain
- * a given protocol ID and returns a list of structures of type
+ * a given protocol ID and offset and returns a list of structures of type
  * "ice_sw_fv_list_entry". Every structure in the list has a field vector
  * definition and profile ID information
  * NOTE: The caller of the function is responsible for freeing the memory
  * allocated for every list entry.
  */
 int
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
                   unsigned long *bm, struct list_head *fv_list)
 {
        struct ice_sw_fv_list_entry *fvl;
@@ -1896,7 +1923,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
 
        memset(&state, 0, sizeof(state));
 
-       if (!ids_cnt || !hw->seg)
+       if (!lkups->n_val_words || !hw->seg)
                return -EINVAL;
 
        ice_seg = hw->seg;
@@ -1915,20 +1942,17 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
                if (!test_bit((u16)offset, bm))
                        continue;
 
-               for (i = 0; i < ids_cnt; i++) {
+               for (i = 0; i < lkups->n_val_words; i++) {
                        int j;
 
-                       /* This code assumes that if a switch field vector line
-                        * has a matching protocol, then this line will contain
-                        * the entries necessary to represent every field in
-                        * that protocol header.
-                        */
                        for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
-                               if (fv->ew[j].prot_id == prot_ids[i])
+                               if (fv->ew[j].prot_id ==
+                                   lkups->fv_words[i].prot_id &&
+                                   fv->ew[j].off == lkups->fv_words[i].off)
                                        break;
                        if (j >= hw->blk[ICE_BLK_SW].es.fvw)
                                break;
-                       if (i + 1 == ids_cnt) {
+                       if (i + 1 == lkups->n_val_words) {
                                fvl = devm_kzalloc(ice_hw_to_dev(hw),
                                                   sizeof(*fvl), GFP_KERNEL);
                                if (!fvl)
index 2fd5312..9c530c8 100644 (file)
@@ -87,7 +87,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
 void
 ice_init_prof_result_bm(struct ice_hw *hw);
 int
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
                   unsigned long *bm, struct list_head *fv_list);
 int
 ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
index 5735e95..974d14a 100644 (file)
@@ -417,6 +417,8 @@ enum ice_tunnel_type {
        TNL_VXLAN = 0,
        TNL_GENEVE,
        TNL_GRETAP,
+       TNL_GTPC,
+       TNL_GTPU,
        __TNL_TYPE_CNT,
        TNL_LAST = 0xFF,
        TNL_ALL = 0xFF,
@@ -673,7 +675,9 @@ enum ice_prof_type {
        ICE_PROF_NON_TUN = 0x1,
        ICE_PROF_TUN_UDP = 0x2,
        ICE_PROF_TUN_GRE = 0x4,
-       ICE_PROF_TUN_ALL = 0x6,
+       ICE_PROF_TUN_GTPU = 0x8,
+       ICE_PROF_TUN_GTPC = 0x10,
+       ICE_PROF_TUN_ALL = 0x1E,
        ICE_PROF_ALL = 0xFF,
 };
 
index beed483..ef103e4 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "ice_common.h"
 #include "ice_flow.h"
+#include <net/gre.h>
 
 /* Describe properties of a protocol header field */
 struct ice_flow_field_info {
index 84b6e44..b465d27 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef _ICE_FLOW_H_
 #define _ICE_FLOW_H_
 
+#include "ice_flex_type.h"
+
 #define ICE_FLOW_ENTRY_HANDLE_INVAL    0
 #define ICE_FLOW_FLD_OFF_INVAL         0xffff
 
index 755e158..35579cf 100644 (file)
@@ -125,7 +125,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
         * writes.
         */
        kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
-       if (!kworker) {
+       if (IS_ERR(kworker)) {
                kfree(gnss);
                return NULL;
        }
@@ -253,7 +253,7 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
        int err;
 
        tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
-       if (!tty_driver) {
+       if (IS_ERR(tty_driver)) {
                dev_err(ice_pf_to_dev(pf), "Failed to allocate memory for GNSS TTY\n");
                return NULL;
        }
index b7796b8..4b0c867 100644 (file)
@@ -5,7 +5,6 @@
 #define _ICE_IDC_INT_H_
 
 #include <linux/net/intel/iidc.h>
-#include "ice.h"
 
 struct ice_pf;
 
index 113a2c5..b897926 100644 (file)
@@ -3960,9 +3960,9 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
         */
        if (status == -EIO) {
                if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
-                       dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
-                                (ena ? "ON" : "OFF"), status,
-                                ice_aq_str(hw->adminq.sq_last_status));
+                       dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
+                               (ena ? "ON" : "OFF"), status,
+                               ice_aq_str(hw->adminq.sq_last_status));
        } else if (status) {
                dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
                        (ena ? "ON" : "OFF"), status,
index 289e5c9..0a2e695 100644 (file)
@@ -48,6 +48,21 @@ static DEFINE_IDA(ice_aux_ida);
 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
 EXPORT_SYMBOL(ice_xdp_locking_key);
 
+/**
+ * ice_hw_to_dev - Get device pointer from the hardware structure
+ * @hw: pointer to the device HW structure
+ *
+ * Used to access the device pointer from compilation units which can't easily
+ * include the definition of struct ice_pf without leading to circular header
+ * dependencies.
+ */
+struct device *ice_hw_to_dev(struct ice_hw *hw)
+{
+       struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+
+       return &pf->pdev->dev;
+}
+
 static struct workqueue_struct *ice_wq;
 static const struct net_device_ops ice_netdev_safe_mode_ops;
 static const struct net_device_ops ice_netdev_ops;
@@ -619,7 +634,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
                clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
                clear_bit(ICE_PFR_REQ, pf->state);
                wake_up(&pf->reset_wait_queue);
-               ice_reset_all_vfs(pf, true);
+               ice_reset_all_vfs(pf);
        }
 }
 
@@ -670,7 +685,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
                        clear_bit(ICE_CORER_REQ, pf->state);
                        clear_bit(ICE_GLOBR_REQ, pf->state);
                        wake_up(&pf->reset_wait_queue);
-                       ice_reset_all_vfs(pf, true);
+                       ice_reset_all_vfs(pf);
                }
 
                return;
@@ -1808,9 +1823,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
                                 * reset, so print the event prior to reset.
                                 */
                                ice_print_vf_rx_mdd_event(vf);
-                               mutex_lock(&vf->cfg_lock);
-                               ice_reset_vf(vf, false);
-                               mutex_unlock(&vf->cfg_lock);
+                               ice_reset_vf(vf, ICE_VF_RESET_LOCK);
                        }
                }
        }
@@ -2265,9 +2278,30 @@ static void ice_service_task(struct work_struct *work)
                return;
        }
 
-       if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+       if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
+               /* Plug aux device per request */
                ice_plug_aux_dev(pf);
 
+               /* Mark plugging as done but check whether unplug was
+                * requested during ice_plug_aux_dev() call
+                * (e.g. from ice_clear_rdma_cap()) and if so then
+                * plug aux device.
+                */
+               if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+                       ice_unplug_aux_dev(pf);
+       }
+
+       if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
+               struct iidc_event *event;
+
+               event = kzalloc(sizeof(*event), GFP_KERNEL);
+               if (event) {
+                       set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+                       ice_send_event_to_aux(pf, event);
+                       kfree(event);
+               }
+       }
+
        ice_clean_adminq_subtask(pf);
        ice_check_media_subtask(pf);
        ice_check_for_hang_subtask(pf);
@@ -3033,7 +3067,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
                struct iidc_event *event;
 
                ena_mask &= ~ICE_AUX_CRIT_ERR;
-               event = kzalloc(sizeof(*event), GFP_KERNEL);
+               event = kzalloc(sizeof(*event), GFP_ATOMIC);
                if (event) {
                        set_bit(IIDC_EVENT_CRIT_ERR, event->type);
                        /* report the entire OICR value to AUX driver */
@@ -3718,7 +3752,7 @@ static void ice_set_pf_caps(struct ice_pf *pf)
        if (func_caps->common_cap.sr_iov_1_1) {
                set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
                pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
-                                             ICE_MAX_VF_COUNT);
+                                             ICE_MAX_SRIOV_VFS);
        }
        clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
        if (func_caps->common_cap.rss_table_size)
@@ -4887,7 +4921,6 @@ static void ice_remove(struct pci_dev *pdev)
        ice_devlink_unregister_params(pf);
        set_bit(ICE_DOWN, pf->state);
 
-       mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
        ice_deinit_lag(pf);
        if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
                ice_ptp_release(pf);
@@ -4897,6 +4930,7 @@ static void ice_remove(struct pci_dev *pdev)
                ice_remove_arfs(pf);
        ice_setup_mc_magic_wake(pf);
        ice_vsi_release_all(pf);
+       mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
        ice_set_wake(pf);
        ice_free_irq_msix_misc(pf);
        ice_for_each_vsi(pf, i) {
@@ -6113,9 +6147,9 @@ int ice_up(struct ice_vsi *vsi)
  * This function fetches stats from the ring considering the atomic operations
  * that needs to be performed to read u64 values in 32 bit machine.
  */
-static void
-ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
-                            u64 *pkts, u64 *bytes)
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+                            struct ice_q_stats stats, u64 *pkts, u64 *bytes)
 {
        unsigned int start;
 
@@ -6145,8 +6179,9 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
                u64 pkts = 0, bytes = 0;
 
                ring = READ_ONCE(rings[i]);
-               if (ring)
-                       ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+               if (!ring)
+                       continue;
+               ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
                vsi_stats->tx_packets += pkts;
                vsi_stats->tx_bytes += bytes;
                vsi->tx_restart += ring->tx_stats.restart_q;
@@ -7037,7 +7072,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
        struct ice_pf *pf = vsi->back;
-       struct iidc_event *event;
        u8 count = 0;
        int err = 0;
 
@@ -7072,14 +7106,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
                return -EBUSY;
        }
 
-       event = kzalloc(sizeof(*event), GFP_KERNEL);
-       if (!event)
-               return -ENOMEM;
-
-       set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
-       ice_send_event_to_aux(pf, event);
-       clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
-
        netdev->mtu = (unsigned int)new_mtu;
 
        /* if VSI is up, bring it down and then back up */
@@ -7087,21 +7113,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
                err = ice_down(vsi);
                if (err) {
                        netdev_err(netdev, "change MTU if_down err %d\n", err);
-                       goto event_after;
+                       return err;
                }
 
                err = ice_up(vsi);
                if (err) {
                        netdev_err(netdev, "change MTU if_up err %d\n", err);
-                       goto event_after;
+                       return err;
                }
        }
 
        netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
-event_after:
-       set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
-       ice_send_event_to_aux(pf, event);
-       kfree(event);
+       set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
 
        return err;
 }
index 380e8ae..82bc54f 100644 (file)
@@ -5,7 +5,14 @@
 #define _ICE_OSDEP_H_
 
 #include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
 #include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/pci_ids.h>
 #ifndef CONFIG_64BIT
 #include <linux/io-64-nonatomic-lo-hi.h>
 #endif
@@ -25,8 +32,8 @@ struct ice_dma_mem {
        size_t size;
 };
 
-#define ice_hw_to_dev(ptr)     \
-       (&(container_of((ptr), struct ice_pf, hw))->pdev->dev)
+struct ice_hw;
+struct device *ice_hw_to_dev(struct ice_hw *hw);
 
 #ifdef CONFIG_DYNAMIC_DEBUG
 #define ice_debug(hw, type, fmt, args...) \
index 695b6dd..3f64300 100644 (file)
@@ -29,6 +29,7 @@ enum ice_protocol_type {
        ICE_MAC_OFOS = 0,
        ICE_MAC_IL,
        ICE_ETYPE_OL,
+       ICE_ETYPE_IL,
        ICE_VLAN_OFOS,
        ICE_IPV4_OFOS,
        ICE_IPV4_IL,
@@ -40,6 +41,8 @@ enum ice_protocol_type {
        ICE_VXLAN,
        ICE_GENEVE,
        ICE_NVGRE,
+       ICE_GTP,
+       ICE_GTP_NO_PAY,
        ICE_VXLAN_GPE,
        ICE_SCTP_IL,
        ICE_PROTOCOL_LAST
@@ -51,6 +54,8 @@ enum ice_sw_tunnel_type {
        ICE_SW_TUN_VXLAN,
        ICE_SW_TUN_GENEVE,
        ICE_SW_TUN_NVGRE,
+       ICE_SW_TUN_GTPU,
+       ICE_SW_TUN_GTPC,
        ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
 };
 
@@ -92,6 +97,7 @@ enum ice_prot_id {
 #define ICE_MAC_OFOS_HW                1
 #define ICE_MAC_IL_HW          4
 #define ICE_ETYPE_OL_HW                9
+#define ICE_ETYPE_IL_HW                10
 #define ICE_VLAN_OF_HW         16
 #define ICE_VLAN_OL_HW         17
 #define ICE_IPV4_OFOS_HW       32
@@ -180,6 +186,20 @@ struct ice_udp_tnl_hdr {
        __be32 vni;     /* only use lower 24-bits */
 };
 
+struct ice_udp_gtp_hdr {
+       u8 flags;
+       u8 msg_type;
+       __be16 rsrvd_len;
+       __be32 teid;
+       __be16 rsrvd_seq_nbr;
+       u8 rsrvd_n_pdu_nbr;
+       u8 rsrvd_next_ext;
+       u8 rsvrd_ext_len;
+       u8 pdu_type;
+       u8 qfi;
+       u8 rsvrd;
+};
+
 struct ice_nvgre_hdr {
        __be16 flags;
        __be16 protocol;
@@ -196,6 +216,7 @@ union ice_prot_hdr {
        struct ice_sctp_hdr sctp_hdr;
        struct ice_udp_tnl_hdr tnl_hdr;
        struct ice_nvgre_hdr nvgre_hdr;
+       struct ice_udp_gtp_hdr gtp_hdr;
 };
 
 /* This is mapping table entry that maps every word within a given protocol
index 000c39d..a1cd332 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "ice.h"
 #include "ice_lib.h"
+#include "ice_trace.h"
 
 #define E810_OUT_PROP_DELAY_NS 1
 
@@ -2063,11 +2064,15 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
                struct sk_buff *skb;
                int err;
 
+               ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
                err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
                                          &raw_tstamp);
                if (err)
                        continue;
 
+               ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
                /* Check if the timestamp is invalid or stale */
                if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
                    raw_tstamp == tx->tstamps[idx].cached_tstamp)
@@ -2093,6 +2098,8 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
                tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
                shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
 
+               ice_trace(tx_tstamp_complete, skb, idx);
+
                skb_tstamp_tx(skb, &shhwtstamps);
                dev_kfree_skb_any(skb);
        }
@@ -2131,6 +2138,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
                tx->tstamps[idx].start = jiffies;
                tx->tstamps[idx].skb = skb_get(skb);
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+               ice_trace(tx_tstamp_request, skb, idx);
        }
 
        spin_unlock(&tx->lock);
index 2adfaf2..848f2ad 100644 (file)
@@ -4,7 +4,7 @@
 #include "ice.h"
 #include "ice_eswitch.h"
 #include "ice_devlink.h"
-#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
 #include "ice_tc_lib.h"
 
 /**
@@ -142,6 +142,59 @@ ice_repr_get_devlink_port(struct net_device *netdev)
        return &repr->vf->devlink_port;
 }
 
+/**
+ * ice_repr_sp_stats64 - get slow path stats for port representor
+ * @dev: network interface device structure
+ * @stats: netlink stats structure
+ *
+ * RX/TX stats are being swapped here to be consistent with VF stats. In slow
+ * path, port representor receives data when the corresponding VF is sending it
+ * (and vice versa), TX and RX bytes/packets are effectively swapped on port
+ * representor.
+ */
+static int
+ice_repr_sp_stats64(const struct net_device *dev,
+                   struct rtnl_link_stats64 *stats)
+{
+       struct ice_netdev_priv *np = netdev_priv(dev);
+       int vf_id = np->repr->vf->vf_id;
+       struct ice_tx_ring *tx_ring;
+       struct ice_rx_ring *rx_ring;
+       u64 pkts, bytes;
+
+       tx_ring = np->vsi->tx_rings[vf_id];
+       ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats,
+                                    &pkts, &bytes);
+       stats->rx_packets = pkts;
+       stats->rx_bytes = bytes;
+
+       rx_ring = np->vsi->rx_rings[vf_id];
+       ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats,
+                                    &pkts, &bytes);
+       stats->tx_packets = pkts;
+       stats->tx_bytes = bytes;
+       stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed +
+                           rx_ring->rx_stats.alloc_buf_failed;
+
+       return 0;
+}
+
+static bool
+ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+       return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
+}
+
+static int
+ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
+                              void *sp)
+{
+       if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
+               return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
+
+       return -EINVAL;
+}
+
 static int
 ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
                             struct flow_cls_offload *flower)
@@ -199,6 +252,8 @@ static const struct net_device_ops ice_repr_netdev_ops = {
        .ndo_start_xmit = ice_eswitch_port_start_xmit,
        .ndo_get_devlink_port = ice_repr_get_devlink_port,
        .ndo_setup_tc = ice_repr_setup_tc,
+       .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
+       .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
 };
 
 /**
@@ -284,7 +339,7 @@ static int ice_repr_add(struct ice_vf *vf)
 
        devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
 
-       ice_vc_change_ops_to_repr(&vf->vc_ops);
+       ice_virtchnl_set_repr_ops(vf);
 
        return 0;
 
@@ -329,7 +384,7 @@ static void ice_repr_rem(struct ice_vf *vf)
        kfree(vf->repr);
        vf->repr = NULL;
 
-       ice_vc_set_dflt_vf_ops(&vf->vc_ops);
+       ice_virtchnl_set_dflt_ops(vf);
 }
 
 /**
index 0c77ff0..378a45b 100644 (file)
@@ -5,7 +5,6 @@
 #define _ICE_REPR_H_
 
 #include <net/dst_metadata.h>
-#include "ice.h"
 
 struct ice_repr {
        struct ice_vsi *src_vsi;
index 52c6bac..8915a9d 100644 (file)
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2018, Intel Corporation. */
 
-#include "ice_common.h"
-#include "ice_sriov.h"
+#include "ice.h"
+#include "ice_vf_lib_private.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_dcb_lib.h"
+#include "ice_flow.h"
+#include "ice_eswitch.h"
+#include "ice_virtchnl_allowlist.h"
+#include "ice_flex_pipe.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_vlan.h"
 
 /**
- * ice_aq_send_msg_to_vf
- * @hw: pointer to the hardware structure
- * @vfid: VF ID to send msg
- * @v_opcode: opcodes for VF-PF communication
- * @v_retval: return error code
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @cd: pointer to command details
+ * ice_free_vf_entries - Free all VF entries from the hash table
+ * @pf: pointer to the PF structure
  *
- * Send message to VF driver (0x0802) using mailbox
- * queue and asynchronously sending message via
- * ice_sq_send_cmd() function
+ * Iterate over the VF hash table, removing and releasing all VF entries.
+ * Called during VF teardown or as cleanup during failed VF initialization.
  */
-int
-ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
-                     u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+static void ice_free_vf_entries(struct ice_pf *pf)
+{
+       struct ice_vfs *vfs = &pf->vfs;
+       struct hlist_node *tmp;
+       struct ice_vf *vf;
+       unsigned int bkt;
+
+       /* Remove all VFs from the hash table and release their main
+        * reference. Once all references to the VF are dropped, ice_put_vf()
+        * will call ice_release_vf which will remove the VF memory.
+        */
+       lockdep_assert_held(&vfs->table_lock);
+
+       hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
+               hash_del_rcu(&vf->entry);
+               ice_put_vf(vf);
+       }
+}
+
+/**
+ * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
+ * @vf: invalidate this VF's VSI after freeing it
+ */
+static void ice_vf_vsi_release(struct ice_vf *vf)
+{
+       ice_vsi_release(ice_get_vf_vsi(vf));
+       ice_vf_invalidate_vsi(vf);
+}
+
+/**
+ * ice_free_vf_res - Free a VF's resources
+ * @vf: pointer to the VF info
+ */
+static void ice_free_vf_res(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+       int i, last_vector_idx;
+
+       /* First, disable VF's configuration API to prevent OS from
+        * accessing the VF's VSI after it's freed or invalidated.
+        */
+       clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+       ice_vf_fdir_exit(vf);
+       /* free VF control VSI */
+       if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+               ice_vf_ctrl_vsi_release(vf);
+
+       /* free VSI and disconnect it from the parent uplink */
+       if (vf->lan_vsi_idx != ICE_NO_VSI) {
+               ice_vf_vsi_release(vf);
+               vf->num_mac = 0;
+       }
+
+       last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
+
+       /* clear VF MDD event information */
+       memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+       memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+
+       /* Disable interrupts so that VF starts in a known state */
+       for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
+               wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
+               ice_flush(&pf->hw);
+       }
+       /* reset some of the state variables keeping track of the resources */
+       clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+       clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+}
+
+/**
+ * ice_dis_vf_mappings
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_mappings(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+       struct device *dev;
+       int first, last, v;
+       struct ice_hw *hw;
+
+       hw = &pf->hw;
+       vsi = ice_get_vf_vsi(vf);
+
+       dev = ice_pf_to_dev(pf);
+       wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+       wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
+
+       first = vf->first_vector_idx;
+       last = first + pf->vfs.num_msix_per - 1;
+       for (v = first; v <= last; v++) {
+               u32 reg;
+
+               reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
+                       GLINT_VECT2FUNC_IS_PF_M) |
+                      ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+                       GLINT_VECT2FUNC_PF_NUM_M));
+               wr32(hw, GLINT_VECT2FUNC(v), reg);
+       }
+
+       if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
+               wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
+       else
+               dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
+
+       if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
+               wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
+       else
+               dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
+}
+
+/**
+ * ice_sriov_free_msix_res - Reset/free any used MSIX resources
+ * @pf: pointer to the PF structure
+ *
+ * Since no MSIX entries are taken from the pf->irq_tracker then just clear
+ * the pf->sriov_base_vector.
+ *
+ * Returns 0 on success, and -EINVAL on error.
+ */
+static int ice_sriov_free_msix_res(struct ice_pf *pf)
+{
+       struct ice_res_tracker *res;
+
+       if (!pf)
+               return -EINVAL;
+
+       res = pf->irq_tracker;
+       if (!res)
+               return -EINVAL;
+
+       /* give back irq_tracker resources used */
+       WARN_ON(pf->sriov_base_vector < res->num_entries);
+
+       pf->sriov_base_vector = 0;
+
+       return 0;
+}
+
+/**
+ * ice_free_vfs - Free all VFs
+ * @pf: pointer to the PF structure
+ */
+void ice_free_vfs(struct ice_pf *pf)
+{
+       struct device *dev = ice_pf_to_dev(pf);
+       struct ice_vfs *vfs = &pf->vfs;
+       struct ice_hw *hw = &pf->hw;
+       struct ice_vf *vf;
+       unsigned int bkt;
+
+       if (!ice_has_vfs(pf))
+               return;
+
+       while (test_and_set_bit(ICE_VF_DIS, pf->state))
+               usleep_range(1000, 2000);
+
+       /* Disable IOV before freeing resources. This lets any VF drivers
+        * running in the host get themselves cleaned up before we yank
+        * the carpet out from underneath their feet.
+        */
+       if (!pci_vfs_assigned(pf->pdev))
+               pci_disable_sriov(pf->pdev);
+       else
+               dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
+
+       mutex_lock(&vfs->table_lock);
+
+       ice_eswitch_release(pf);
+
+       ice_for_each_vf(pf, bkt, vf) {
+               mutex_lock(&vf->cfg_lock);
+
+               ice_dis_vf_qs(vf);
+
+               if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+                       /* disable VF qp mappings and set VF disable state */
+                       ice_dis_vf_mappings(vf);
+                       set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+                       ice_free_vf_res(vf);
+               }
+
+               if (!pci_vfs_assigned(pf->pdev)) {
+                       u32 reg_idx, bit_idx;
+
+                       reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+                       bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+                       wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+               }
+
+               /* clear malicious info since the VF is getting released */
+               if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+                                       ICE_MAX_SRIOV_VFS, vf->vf_id))
+                       dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+                               vf->vf_id);
+
+               mutex_unlock(&vf->cfg_lock);
+       }
+
+       if (ice_sriov_free_msix_res(pf))
+               dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
+
+       vfs->num_qps_per = 0;
+       ice_free_vf_entries(pf);
+
+       mutex_unlock(&vfs->table_lock);
+
+       clear_bit(ICE_VF_DIS, pf->state);
+       clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+}
+
+/**
+ * ice_vf_vsi_setup - Set up a VF VSI
+ * @vf: VF to setup VSI for
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
+{
+       struct ice_port_info *pi = ice_vf_get_port_info(vf);
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+
+       vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL);
+
+       if (!vsi) {
+               dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
+               ice_vf_invalidate_vsi(vf);
+               return NULL;
+       }
+
+       vf->lan_vsi_idx = vsi->idx;
+       vf->lan_vsi_num = vsi->vsi_num;
+
+       return vsi;
+}
+
+/**
+ * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF that the first MSIX vector index is being calculated for
+ *
+ * This returns the first MSIX vector index in PF space that is used by this VF.
+ * This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration will have to increment by
+ * 1 to avoid meddling with the OICR index.
+ */
+static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
+{
+       return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
+}
+
+/**
+ * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
+ * @vf: VF to enable MSIX mappings for
+ *
+ * Some of the registers need to be indexed/configured using hardware global
+ * device values and other registers need 0-based values, which represent PF
+ * based values.
+ */
+static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
+{
+       int device_based_first_msix, device_based_last_msix;
+       int pf_based_first_msix, pf_based_last_msix, v;
+       struct ice_pf *pf = vf->pf;
+       int device_based_vf_id;
+       struct ice_hw *hw;
+       u32 reg;
+
+       hw = &pf->hw;
+       pf_based_first_msix = vf->first_vector_idx;
+       pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
+
+       device_based_first_msix = pf_based_first_msix +
+               pf->hw.func_caps.common_cap.msix_vector_first_id;
+       device_based_last_msix =
+               (device_based_first_msix + pf->vfs.num_msix_per) - 1;
+       device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+       reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
+               VPINT_ALLOC_FIRST_M) |
+              ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
+               VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
+       wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+
+       reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
+                & VPINT_ALLOC_PCI_FIRST_M) |
+              ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
+               VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
+       wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
+
+       /* map the interrupts to its functions */
+       for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
+               reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+                       GLINT_VECT2FUNC_VF_NUM_M) |
+                      ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+                       GLINT_VECT2FUNC_PF_NUM_M));
+               wr32(hw, GLINT_VECT2FUNC(v), reg);
+       }
+
+       /* Map mailbox interrupt to VF MSI-X vector 0 */
+       wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
+ * @vf: VF to enable the mappings for
+ * @max_txq: max Tx queues allowed on the VF's VSI
+ * @max_rxq: max Rx queues allowed on the VF's VSI
+ */
+static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
+{
+       struct device *dev = ice_pf_to_dev(vf->pf);
+       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+       struct ice_hw *hw = &vf->pf->hw;
+       u32 reg;
+
+       /* set regardless of mapping mode */
+       wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
+
+       /* VF Tx queues allocation */
+       if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+               /* set the VF PF Tx queue range
+                * VFNUMQ value should be set to (number of queues - 1). A value
+                * of 0 means 1 queue and a value of 255 means 256 queues
+                */
+               reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
+                       VPLAN_TX_QBASE_VFFIRSTQ_M) |
+                      (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+                       VPLAN_TX_QBASE_VFNUMQ_M));
+               wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
+       } else {
+               dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
+       }
+
+       /* set regardless of mapping mode */
+       wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
+
+       /* VF Rx queues allocation */
+       if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+               /* set the VF PF Rx queue range
+                * VFNUMQ value should be set to (number of queues - 1). A value
+                * of 0 means 1 queue and a value of 255 means 256 queues
+                */
+               reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
+                       VPLAN_RX_QBASE_VFFIRSTQ_M) |
+                      (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+                       VPLAN_RX_QBASE_VFNUMQ_M));
+               wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
+       } else {
+               dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
+       }
+}
+
+/**
+ * ice_ena_vf_mappings - enable VF MSIX and queue mapping
+ * @vf: pointer to the VF structure
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+       ice_ena_vf_msix_mappings(vf);
+       ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
+}
+
+/**
+ * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
+ * @vf: VF to calculate the register index for
+ * @q_vector: a q_vector associated to the VF
+ */
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+{
+       struct ice_pf *pf;
+
+       if (!vf || !q_vector)
+               return -EINVAL;
+
+       pf = vf->pf;
+
+       /* always add one to account for the OICR being the first MSIX */
+       return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
+               q_vector->v_idx + 1;
+}
+
+/**
+ * ice_get_max_valid_res_idx - Get the max valid resource index
+ * @res: pointer to the resource to find the max valid index for
+ *
+ * Start from the end of the ice_res_tracker and return right when we find the
+ * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
+ * valid for SR-IOV because it is the only consumer that manipulates the
+ * res->end and this is always called when res->end is set to res->num_entries.
+ */
+static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
+{
+       int i;
+
+       if (!res)
+               return -EINVAL;
+
+       for (i = res->num_entries - 1; i >= 0; i--)
+               if (res->list[i] & ICE_RES_VALID_BIT)
+                       return i;
+
+       return 0;
+}
+
+/**
+ * ice_sriov_set_msix_res - Set any used MSIX resources
+ * @pf: pointer to PF structure
+ * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
+ *
+ * This function allows SR-IOV resources to be taken from the end of the PF's
+ * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
+ * just set the pf->sriov_base_vector and return success.
+ *
+ * If there are not enough resources available, return an error. This should
+ * always be caught by ice_set_per_vf_res().
+ *
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
+ * in the PF's space available for SR-IOV.
+ */
+static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
+{
+       u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+       int vectors_used = pf->irq_tracker->num_entries;
+       int sriov_base_vector;
+
+       sriov_base_vector = total_vectors - num_msix_needed;
+
+       /* make sure we only grab irq_tracker entries from the list end and
+        * that we have enough available MSIX vectors
+        */
+       if (sriov_base_vector < vectors_used)
+               return -EINVAL;
+
+       pf->sriov_base_vector = sriov_base_vector;
+
+       return 0;
+}
+
+/**
+ * ice_set_per_vf_res - check if vectors and queues are available
+ * @pf: pointer to the PF structure
+ * @num_vfs: the number of SR-IOV VFs being configured
+ *
+ * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
+ * get more vectors and can enable more queues per VF. Note that this does not
+ * grab any vectors from the SW pool already allocated. Also note, that all
+ * vector counts include one for each VF's miscellaneous interrupt vector
+ * (i.e. OICR).
+ *
+ * Minimum VFs - 2 vectors, 1 queue pair
+ * Small VFs - 5 vectors, 4 queue pairs
+ * Medium VFs - 17 vectors, 16 queue pairs
+ *
+ * Second, determine number of queue pairs per VF by starting with a pre-defined
+ * maximum each VF supports. If this is not possible, then we adjust based on
+ * queue pairs available on the device.
+ *
+ * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
+ * by each VF during VF initialization and reset.
+ */
+static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
+{
+       int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
+       u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
+       int msix_avail_per_vf, msix_avail_for_sriov;
+       struct device *dev = ice_pf_to_dev(pf);
+       int err;
+
+       lockdep_assert_held(&pf->vfs.table_lock);
+
+       if (!num_vfs)
+               return -EINVAL;
+
+       if (max_valid_res_idx < 0)
+               return -ENOSPC;
+
+       /* determine MSI-X resources per VF */
+       msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
+               pf->irq_tracker->num_entries;
+       msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
+       if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
+               num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
+       } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
+               num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+       } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
+               num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
+       } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
+               num_msix_per_vf = ICE_MIN_INTR_PER_VF;
+       } else {
+               dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
+                       msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
+                       num_vfs);
+               return -ENOSPC;
+       }
+
+       num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
+                       ICE_MAX_RSS_QS_PER_VF);
+       avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
+       if (!avail_qs)
+               num_txq = 0;
+       else if (num_txq > avail_qs)
+               num_txq = rounddown_pow_of_two(avail_qs);
+
+       num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
+                       ICE_MAX_RSS_QS_PER_VF);
+       avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
+       if (!avail_qs)
+               num_rxq = 0;
+       else if (num_rxq > avail_qs)
+               num_rxq = rounddown_pow_of_two(avail_qs);
+
+       if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
+               dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
+                       ICE_MIN_QS_PER_VF, num_vfs);
+               return -ENOSPC;
+       }
+
+       err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
+       if (err) {
+               dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
+                       num_vfs, err);
+               return err;
+       }
+
+       /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
+       pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
+       pf->vfs.num_msix_per = num_msix_per_vf;
+       dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
+                num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
+
+       return 0;
+}
+
+/**
+ * ice_init_vf_vsi_res - initialize/setup VF VSI resources
+ * @vf: VF to initialize/setup the VSI for
+ *
+ * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
+ * VF VSI's broadcast filter and is only used during initial VF creation.
+ */
+static int ice_init_vf_vsi_res(struct ice_vf *vf)
+{
+       struct ice_vsi_vlan_ops *vlan_ops;
+       struct ice_pf *pf = vf->pf;
+       u8 broadcast[ETH_ALEN];
+       struct ice_vsi *vsi;
+       struct device *dev;
+       int err;
+
+       vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+
+       dev = ice_pf_to_dev(pf);
+       vsi = ice_vf_vsi_setup(vf);
+       if (!vsi)
+               return -ENOMEM;
+
+       err = ice_vsi_add_vlan_zero(vsi);
+       if (err) {
+               dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+                        vf->vf_id);
+               goto release_vsi;
+       }
+
+       vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+       err = vlan_ops->ena_rx_filtering(vsi);
+       if (err) {
+               dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
+                        vf->vf_id);
+               goto release_vsi;
+       }
+
+       eth_broadcast_addr(broadcast);
+       err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+       if (err) {
+               dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
+                       vf->vf_id, err);
+               goto release_vsi;
+       }
+
+       err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
+       if (err) {
+               dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
+                        vf->vf_id);
+               goto release_vsi;
+       }
+
+       vf->num_mac = 1;
+
+       return 0;
+
+release_vsi:
+       ice_vf_vsi_release(vf);
+       return err;
+}
+
+/**
+ * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
+ * @pf: PF the VFs are associated with
+ */
+static int ice_start_vfs(struct ice_pf *pf)
+{
+       struct ice_hw *hw = &pf->hw;
+       unsigned int bkt, it_cnt;
+       struct ice_vf *vf;
+       int retval;
+
+       lockdep_assert_held(&pf->vfs.table_lock);
+
+       it_cnt = 0;
+       ice_for_each_vf(pf, bkt, vf) {
+               vf->vf_ops->clear_reset_trigger(vf);
+
+               retval = ice_init_vf_vsi_res(vf);
+               if (retval) {
+                       dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
+                               vf->vf_id, retval);
+                       goto teardown;
+               }
+
+               set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+               ice_ena_vf_mappings(vf);
+               wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+               it_cnt++;
+       }
+
+       ice_flush(hw);
+       return 0;
+
+teardown:
+       ice_for_each_vf(pf, bkt, vf) {
+               if (it_cnt == 0)
+                       break;
+
+               ice_dis_vf_mappings(vf);
+               ice_vf_vsi_release(vf);
+               it_cnt--;
+       }
+
+       return retval;
+}
+
+/**
+ * ice_sriov_free_vf - Free VF memory after all references are dropped
+ * @vf: pointer to VF to free
+ *
+ * Called by ice_put_vf through ice_release_vf once the last reference to a VF
+ * structure has been dropped.
+ */
+static void ice_sriov_free_vf(struct ice_vf *vf)
 {
-       struct ice_aqc_pf_vf_msg *cmd;
-       struct ice_aq_desc desc;
+       mutex_destroy(&vf->cfg_lock);
+
+       kfree_rcu(vf, rcu);
+}
+
+/**
+ * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
+ * @vf: the vf to configure
+ */
+static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+
+       wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+       wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+}
+
+/**
+ * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
+ * @vf: pointer to VF structure
+ * @is_vflr: true if reset occurred due to VFLR
+ *
+ * Trigger and cleanup after a VF reset for a SR-IOV VF.
+ */
+static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
+{
+       struct ice_pf *pf = vf->pf;
+       u32 reg, reg_idx, bit_idx;
+       unsigned int vf_abs_id, i;
+       struct device *dev;
+       struct ice_hw *hw;
+
+       dev = ice_pf_to_dev(pf);
+       hw = &pf->hw;
+       vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+       /* In the case of a VFLR, HW has already reset the VF and we just need
+        * to clean up. Otherwise we must first trigger the reset using the
+        * VFRTRIG register.
+        */
+       if (!is_vflr) {
+               reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+               reg |= VPGEN_VFRTRIG_VFSWR_M;
+               wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+       }
+
+       /* clear the VFLR bit in GLGEN_VFLRSTAT */
+       reg_idx = (vf_abs_id) / 32;
+       bit_idx = (vf_abs_id) % 32;
+       wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+       ice_flush(hw);
+
+       wr32(hw, PF_PCI_CIAA,
+            VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
+       for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
+               reg = rd32(hw, PF_PCI_CIAD);
+               /* no transactions pending so stop polling */
+               if ((reg & VF_TRANS_PENDING_M) == 0)
+                       break;
+
+               dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
+               udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
+       }
+}
+
+/**
+ * ice_sriov_poll_reset_status - poll SRIOV VF reset status
+ * @vf: pointer to VF structure
+ *
+ * Returns true when reset is successful, else returns false
+ */
+static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+       unsigned int i;
+       u32 reg;
+
+       for (i = 0; i < 10; i++) {
+               /* VF reset requires driver to first reset the VF and then
+                * poll the status register to make sure that the reset
+                * completed successfully.
+                */
+               reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
+               if (reg & VPGEN_VFRSTAT_VFRD_M)
+                       return true;
+
+               /* only sleep if the reset is not done */
+               usleep_range(10, 20);
+       }
+       return false;
+}
+
+/**
+ * ice_sriov_clear_reset_trigger - enable VF to access hardware
+ * @vf: VF to enabled hardware access for
+ */
+static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
+{
+       struct ice_hw *hw = &vf->pf->hw;
+       u32 reg;
+
+       reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+       reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+       wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+       ice_flush(hw);
+}
+
+/**
+ * ice_sriov_vsi_rebuild - release and rebuild VF's VSI
+ * @vf: VF to release and setup the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
+ * configuration change, etc.).
+ */
+static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+
+       ice_vf_vsi_release(vf);
+       if (!ice_vf_vsi_setup(vf)) {
+               dev_err(ice_pf_to_dev(pf),
+                       "Failed to release and setup the VF%u's VSI\n",
+                       vf->vf_id);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/**
+ * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
+ * @vf: VF to perform tasks on
+ */
+static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
+{
+       ice_vf_rebuild_host_cfg(vf);
+       ice_vf_set_initialized(vf);
+       ice_ena_vf_mappings(vf);
+       wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+static const struct ice_vf_ops ice_sriov_vf_ops = {
+       .reset_type = ICE_VF_RESET,
+       .free = ice_sriov_free_vf,
+       .clear_mbx_register = ice_sriov_clear_mbx_register,
+       .trigger_reset_register = ice_sriov_trigger_reset_register,
+       .poll_reset_status = ice_sriov_poll_reset_status,
+       .clear_reset_trigger = ice_sriov_clear_reset_trigger,
+       .vsi_rebuild = ice_sriov_vsi_rebuild,
+       .post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
+};
+
+/**
+ * ice_create_vf_entries - Allocate and insert VF entries
+ * @pf: pointer to the PF structure
+ * @num_vfs: the number of VFs to allocate
+ *
+ * Allocate new VF entries and insert them into the hash table. Set some
+ * basic default fields for initializing the new VFs.
+ *
+ * After this function exits, the hash table will have num_vfs entries
+ * inserted.
+ *
+ * Returns 0 on success or an integer error code on failure.
+ */
+static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
+{
+       struct ice_vfs *vfs = &pf->vfs;
+       struct ice_vf *vf;
+       u16 vf_id;
+       int err;
+
+       lockdep_assert_held(&vfs->table_lock);
+
+       for (vf_id = 0; vf_id < num_vfs; vf_id++) {
+               vf = kzalloc(sizeof(*vf), GFP_KERNEL);
+               if (!vf) {
+                       err = -ENOMEM;
+                       goto err_free_entries;
+               }
+               kref_init(&vf->refcnt);
+
+               vf->pf = pf;
+               vf->vf_id = vf_id;
+
+               /* set sriov vf ops for VFs created during SRIOV flow */
+               vf->vf_ops = &ice_sriov_vf_ops;
+
+               vf->vf_sw_id = pf->first_sw;
+               /* assign default capabilities */
+               vf->spoofchk = true;
+               vf->num_vf_qs = pf->vfs.num_qps_per;
+               ice_vc_set_default_allowlist(vf);
+
+               /* ctrl_vsi_idx will be set to a valid value only when VF
+                * creates its first fdir rule.
+                */
+               ice_vf_ctrl_invalidate_vsi(vf);
+               ice_vf_fdir_init(vf);
+
+               ice_virtchnl_set_dflt_ops(vf);
+
+               mutex_init(&vf->cfg_lock);
+
+               hash_add_rcu(vfs->table, &vf->entry, vf_id);
+       }
+
+       return 0;
+
+err_free_entries:
+       ice_free_vf_entries(pf);
+       return err;
+}
+
+/**
+ * ice_ena_vfs - enable VFs so they are ready to be used
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to enable
+ */
+static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
+{
+       struct device *dev = ice_pf_to_dev(pf);
+       struct ice_hw *hw = &pf->hw;
+       int ret;
+
+       /* Disable global interrupt 0 so we don't try to handle the VFLR. */
+       wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+            ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+       set_bit(ICE_OICR_INTR_DIS, pf->state);
+       ice_flush(hw);
+
+       ret = pci_enable_sriov(pf->pdev, num_vfs);
+       if (ret)
+               goto err_unroll_intr;
+
+       mutex_lock(&pf->vfs.table_lock);
+
+       ret = ice_set_per_vf_res(pf, num_vfs);
+       if (ret) {
+               dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
+                       num_vfs, ret);
+               goto err_unroll_sriov;
+       }
+
+       ret = ice_create_vf_entries(pf, num_vfs);
+       if (ret) {
+               dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
+                       num_vfs);
+               goto err_unroll_sriov;
+       }
+
+       ret = ice_start_vfs(pf);
+       if (ret) {
+               dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
+               ret = -EAGAIN;
+               goto err_unroll_vf_entries;
+       }
+
+       clear_bit(ICE_VF_DIS, pf->state);
+
+       ret = ice_eswitch_configure(pf);
+       if (ret) {
+               dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
+               goto err_unroll_sriov;
+       }
+
+       /* rearm global interrupts */
+       if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+               ice_irq_dynamic_ena(hw, NULL, NULL);
+
+       mutex_unlock(&pf->vfs.table_lock);
+
+       return 0;
+
+err_unroll_vf_entries:
+       ice_free_vf_entries(pf);
+err_unroll_sriov:
+       mutex_unlock(&pf->vfs.table_lock);
+       pci_disable_sriov(pf->pdev);
+err_unroll_intr:
+       /* rearm interrupts here */
+       ice_irq_dynamic_ena(hw, NULL, NULL);
+       clear_bit(ICE_OICR_INTR_DIS, pf->state);
+       return ret;
+}
+
+/**
+ * ice_pci_sriov_ena - Enable or change number of VFs
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * Returns 0 on success and negative on failure
+ */
+static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
+{
+       int pre_existing_vfs = pci_num_vf(pf->pdev);
+       struct device *dev = ice_pf_to_dev(pf);
+       int err;
+
+       if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+               ice_free_vfs(pf);
+       else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+               return 0;
+
+       if (num_vfs > pf->vfs.num_supported) {
+               dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
+                       num_vfs, pf->vfs.num_supported);
+               return -EOPNOTSUPP;
+       }
+
+       dev_info(dev, "Enabling %d VFs\n", num_vfs);
+       err = ice_ena_vfs(pf, num_vfs);
+       if (err) {
+               dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+               return err;
+       }
+
+       set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+       return 0;
+}
 
-       ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+/**
+ * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
+ * @pf: PF to enabled SR-IOV on
+ */
+static int ice_check_sriov_allowed(struct ice_pf *pf)
+{
+       struct device *dev = ice_pf_to_dev(pf);
 
-       cmd = &desc.params.virt;
-       cmd->id = cpu_to_le32(vfid);
+       if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+               dev_err(dev, "This device is not capable of SR-IOV\n");
+               return -EOPNOTSUPP;
+       }
 
-       desc.cookie_high = cpu_to_le32(v_opcode);
-       desc.cookie_low = cpu_to_le32(v_retval);
+       if (ice_is_safe_mode(pf)) {
+               dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
+               return -EOPNOTSUPP;
+       }
 
-       if (msglen)
-               desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+       if (!ice_pf_state_is_nominal(pf)) {
+               dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+               return -EBUSY;
+       }
 
-       return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+       return 0;
 }
 
 /**
- * ice_conv_link_speed_to_virtchnl
- * @adv_link_support: determines the format of the returned link speed
- * @link_speed: variable containing the link_speed to be converted
+ * ice_sriov_configure - Enable or change number of VFs via sysfs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate or 0 to free VFs
  *
- * Convert link speed supported by HW to link speed supported by virtchnl.
- * If adv_link_support is true, then return link speed in Mbps. Else return
- * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
- * needs to cast back to an enum virtchnl_link_speed in the case where
- * adv_link_support is false, but when adv_link_support is true the caller can
- * expect the speed in Mbps.
+ * This function is called when the user updates the number of VFs in sysfs. On
+ * success return whatever num_vfs was set to by the caller. Return negative on
+ * failure.
  */
-u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
 {
-       u32 speed;
+       struct ice_pf *pf = pci_get_drvdata(pdev);
+       struct device *dev = ice_pf_to_dev(pf);
+       int err;
 
-       if (adv_link_support)
-               switch (link_speed) {
-               case ICE_AQ_LINK_SPEED_10MB:
-                       speed = ICE_LINK_SPEED_10MBPS;
-                       break;
-               case ICE_AQ_LINK_SPEED_100MB:
-                       speed = ICE_LINK_SPEED_100MBPS;
-                       break;
-               case ICE_AQ_LINK_SPEED_1000MB:
-                       speed = ICE_LINK_SPEED_1000MBPS;
-                       break;
-               case ICE_AQ_LINK_SPEED_2500MB:
-                       speed = ICE_LINK_SPEED_2500MBPS;
-                       break;
-               case ICE_AQ_LINK_SPEED_5GB:
-                       speed = ICE_LINK_SPEED_5000MBPS;
-                       break;
-               case ICE_AQ_LINK_SPEED_10GB:
-                       speed = ICE_LINK_SPEED_10000MBPS;
-                       break;
-               case ICE_AQ_LINK_SPEED_20GB:
-                       speed = ICE_LINK_SPEED_20000MBPS;
-                       break;
-               case ICE_AQ_LINK_SPEED_25GB:
-                       speed = ICE_LINK_SPEED_25000MBPS;
-                       break;
-               case ICE_AQ_LINK_SPEED_40GB:
-                       speed = ICE_LINK_SPEED_40000MBPS;
-                       break;
-               case ICE_AQ_LINK_SPEED_50GB:
-                       speed = ICE_LINK_SPEED_50000MBPS;
-                       break;
-               case ICE_AQ_LINK_SPEED_100GB:
-                       speed = ICE_LINK_SPEED_100000MBPS;
-                       break;
-               default:
-                       speed = ICE_LINK_SPEED_UNKNOWN;
-                       break;
-               }
-       else
-               /* Virtchnl speeds are not defined for every speed supported in
-                * the hardware. To maintain compatibility with older AVF
-                * drivers, while reporting the speed the new speed values are
-                * resolved to the closest known virtchnl speeds
-                */
-               switch (link_speed) {
-               case ICE_AQ_LINK_SPEED_10MB:
-               case ICE_AQ_LINK_SPEED_100MB:
-                       speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
-                       break;
-               case ICE_AQ_LINK_SPEED_1000MB:
-               case ICE_AQ_LINK_SPEED_2500MB:
-               case ICE_AQ_LINK_SPEED_5GB:
-                       speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
-                       break;
-               case ICE_AQ_LINK_SPEED_10GB:
-                       speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
-                       break;
-               case ICE_AQ_LINK_SPEED_20GB:
-                       speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
-                       break;
-               case ICE_AQ_LINK_SPEED_25GB:
-                       speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
-                       break;
-               case ICE_AQ_LINK_SPEED_40GB:
-               case ICE_AQ_LINK_SPEED_50GB:
-               case ICE_AQ_LINK_SPEED_100GB:
-                       speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
-                       break;
-               default:
-                       speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
-                       break;
+       err = ice_check_sriov_allowed(pf);
+       if (err)
+               return err;
+
+       if (!num_vfs) {
+               if (!pci_vfs_assigned(pdev)) {
+                       ice_mbx_deinit_snapshot(&pf->hw);
+                       ice_free_vfs(pf);
+                       if (pf->lag)
+                               ice_enable_lag(pf->lag);
+                       return 0;
                }
 
-       return speed;
+               dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
+               return -EBUSY;
+       }
+
+       err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
+       if (err)
+               return err;
+
+       err = ice_pci_sriov_ena(pf, num_vfs);
+       if (err) {
+               ice_mbx_deinit_snapshot(&pf->hw);
+               return err;
+       }
+
+       if (pf->lag)
+               ice_disable_lag(pf->lag);
+       return num_vfs;
 }
 
-/* The mailbox overflow detection algorithm helps to check if there
- * is a possibility of a malicious VF transmitting too many MBX messages to the
- * PF.
- * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
- * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
- * The struct ice_mbx_snapshot helps to track and traverse a static window of
- * messages within the mailbox queue while looking for a malicious VF.
- *
- * 2. When the caller starts processing its mailbox queue in response to an
- * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
- * the algorithm can be run for the first time for that interrupt. This can be
- * done via ice_mbx_reset_snapshot().
- *
- * 3. For every message read by the caller from the MBX Queue, the caller must
- * call the detection algorithm's entry function ice_mbx_vf_state_handler().
- * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
- * filled as it is required to be passed to the algorithm.
- *
- * 4. Every time a message is read from the MBX queue, a VFId is received which
- * is passed to the state handler. The boolean output is_malvf of the state
- * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
- * whether this VF is malicious or not.
- *
- * 5. When a VF is identified to be malicious, the caller can send a message
- * to the system administrator. The caller can invoke ice_mbx_report_malvf()
- * to help determine if a malicious VF is to be reported or not. This function
- * requires the caller to maintain a global bitmap to track all malicious VFs
- * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
- * to be malicious by ice_mbx_vf_state_handler().
+/**
+ * ice_process_vflr_event - Free VF resources via IRQ calls
+ * @pf: pointer to the PF structure
  *
- * 6. The global bitmap maintained by PF can be cleared completely if PF is in
- * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
- * When a VF is shut down and brought back up, we assume that the new VF
- * brought up is not malicious and hence report it if found malicious.
+ * called from the VFLR IRQ handler to
+ * free up VF resources and state variables
+ */
+void ice_process_vflr_event(struct ice_pf *pf)
+{
+       struct ice_hw *hw = &pf->hw;
+       struct ice_vf *vf;
+       unsigned int bkt;
+       u32 reg;
+
+       if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
+           !ice_has_vfs(pf))
+               return;
+
+       mutex_lock(&pf->vfs.table_lock);
+       ice_for_each_vf(pf, bkt, vf) {
+               u32 reg_idx, bit_idx;
+
+               reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+               bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+               /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+               reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+               if (reg & BIT(bit_idx))
+                       /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+                       ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
+       }
+       mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
+ * @pf: PF used to index all VFs
+ * @pfq: queue index relative to the PF's function space
  *
- * 7. The function ice_mbx_reset_snapshot() is called to reset the information
- * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ * If no VF is found who owns the pfq then return NULL, otherwise return a
+ * pointer to the VF who owns the pfq
  *
- * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
- * when driver is unloaded.
+ * If this function returns non-NULL, it acquires a reference count of the VF
+ * structure. The caller is responsible for calling ice_put_vf() to drop this
+ * reference.
+ */
+static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
+{
+       struct ice_vf *vf;
+       unsigned int bkt;
+
+       rcu_read_lock();
+       ice_for_each_vf_rcu(pf, bkt, vf) {
+               struct ice_vsi *vsi;
+               u16 rxq_idx;
+
+               vsi = ice_get_vf_vsi(vf);
+
+               ice_for_each_rxq(vsi, rxq_idx)
+                       if (vsi->rxq_map[rxq_idx] == pfq) {
+                               struct ice_vf *found;
+
+                               if (kref_get_unless_zero(&vf->refcnt))
+                                       found = vf;
+                               else
+                                       found = NULL;
+                               rcu_read_unlock();
+                               return found;
+                       }
+       }
+       rcu_read_unlock();
+
+       return NULL;
+}
+
+/**
+ * ice_globalq_to_pfq - convert from global queue index to PF space queue index
+ * @pf: PF used for conversion
+ * @globalq: global queue index used to convert to PF space queue index
  */
-#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
-/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
- * the max messages check must be ignored in the algorithm
+static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
+{
+       return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
+}
+
+/**
+ * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
+ * @pf: PF that the LAN overflow event happened on
+ * @event: structure holding the event information for the LAN overflow event
+ *
+ * Determine if the LAN overflow event was caused by a VF queue. If it was not
+ * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
+ * reset on the offending VF.
  */
-#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+       u32 gldcb_rtctq, queue;
+       struct ice_vf *vf;
+
+       gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+       dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
+
+       /* event returns device global Rx queue number */
+       queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
+               GLDCB_RTCTQ_RXQNUM_S;
+
+       vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
+       if (!vf)
+               return;
+
+       ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
+       ice_put_vf(vf);
+}
 
 /**
- * ice_mbx_traverse - Pass through mailbox snapshot
- * @hw: pointer to the HW struct
- * @new_state: new algorithm state
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
  *
- * Traversing the mailbox static snapshot without checking
- * for malicious VFs.
+ * Enable or disable VF spoof checking
  */
-static void
-ice_mbx_traverse(struct ice_hw *hw,
-                enum ice_mbx_snapshot_state *new_state)
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
 {
-       struct ice_mbx_snap_buffer_data *snap_buf;
-       u32 num_iterations;
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_pf *pf = np->vsi->back;
+       struct ice_vsi *vf_vsi;
+       struct device *dev;
+       struct ice_vf *vf;
+       int ret;
 
-       snap_buf = &hw->mbx_snapshot.mbx_buf;
+       dev = ice_pf_to_dev(pf);
 
-       /* As mailbox buffer is circular, applying a mask
-        * on the incremented iteration count.
-        */
-       num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
-
-       /* Checking either of the below conditions to exit snapshot traversal:
-        * Condition-1: If the number of iterations in the mailbox is equal to
-        * the mailbox head which would indicate that we have reached the end
-        * of the static snapshot.
-        * Condition-2: If the maximum messages serviced in the mailbox for a
-        * given interrupt is the highest possible value then there is no need
-        * to check if the number of messages processed is equal to it. If not
-        * check if the number of messages processed is greater than or equal
-        * to the maximum number of mailbox entries serviced in current work item.
-        */
-       if (num_iterations == snap_buf->head ||
-           (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
-            ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
-               *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf)
+               return -EINVAL;
+
+       ret = ice_check_vf_ready_for_cfg(vf);
+       if (ret)
+               goto out_put_vf;
+
+       vf_vsi = ice_get_vf_vsi(vf);
+       if (!vf_vsi) {
+               netdev_err(netdev, "VSI %d for VF %d is null\n",
+                          vf->lan_vsi_idx, vf->vf_id);
+               ret = -EINVAL;
+               goto out_put_vf;
+       }
+
+       if (vf_vsi->type != ICE_VSI_VF) {
+               netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
+                          vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
+               ret = -ENODEV;
+               goto out_put_vf;
+       }
+
+       if (ena == vf->spoofchk) {
+               dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
+               ret = 0;
+               goto out_put_vf;
+       }
+
+       ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
+       if (ret)
+               dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
+                       ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
+       else
+               vf->spoofchk = ena;
+
+out_put_vf:
+       ice_put_vf(vf);
+       return ret;
 }
 
 /**
- * ice_mbx_detect_malvf - Detect malicious VF in snapshot
- * @hw: pointer to the HW struct
- * @vf_id: relative virtual function ID
- * @new_state: new algorithm state
- * @is_malvf: boolean output to indicate if VF is malicious
+ * ice_get_vf_cfg
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
  *
- * This function tracks the number of asynchronous messages
- * sent per VF and marks the VF as malicious if it exceeds
- * the permissible number of messages to send.
+ * return VF configuration
  */
-static int
-ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
-                    enum ice_mbx_snapshot_state *new_state,
-                    bool *is_malvf)
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
 {
-       struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
+       struct ice_vf *vf;
+       int ret;
+
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf)
+               return -EINVAL;
 
-       if (vf_id >= snap->mbx_vf.vfcntr_len)
-               return -EIO;
+       ret = ice_check_vf_ready_for_cfg(vf);
+       if (ret)
+               goto out_put_vf;
 
-       /* increment the message count in the VF array */
-       snap->mbx_vf.vf_cntr[vf_id]++;
+       ivi->vf = vf_id;
+       ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
 
-       if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
-               *is_malvf = true;
+       /* VF configuration for VLAN and applicable QoS */
+       ivi->vlan = ice_vf_get_port_vlan_id(vf);
+       ivi->qos = ice_vf_get_port_vlan_prio(vf);
+       if (ice_vf_is_port_vlan_ena(vf))
+               ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
 
-       /* continue to iterate through the mailbox snapshot */
-       ice_mbx_traverse(hw, new_state);
+       ivi->trusted = vf->trusted;
+       ivi->spoofchk = vf->spoofchk;
+       if (!vf->link_forced)
+               ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+       else if (vf->link_up)
+               ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+       else
+               ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+       ivi->max_tx_rate = vf->max_tx_rate;
+       ivi->min_tx_rate = vf->min_tx_rate;
 
-       return 0;
+out_put_vf:
+       ice_put_vf(vf);
+       return ret;
 }
 
 /**
- * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
- * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
+ * @pf: PF used to reference the switch's rules
+ * @umac: unicast MAC to compare against existing switch rules
  *
- * Reset the mailbox snapshot structure and clear VF counter array.
+ * Return true on the first/any match, else return false
  */
-static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
 {
-       u32 vfcntr_len;
+       struct ice_sw_recipe *mac_recipe_list =
+               &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
+       struct ice_fltr_mgmt_list_entry *list_itr;
+       struct list_head *rule_head;
+       struct mutex *rule_lock; /* protect MAC filter list access */
 
-       if (!snap || !snap->mbx_vf.vf_cntr)
-               return;
+       rule_head = &mac_recipe_list->filt_rules;
+       rule_lock = &mac_recipe_list->filt_rule_lock;
 
-       /* Clear VF counters. */
-       vfcntr_len = snap->mbx_vf.vfcntr_len;
-       if (vfcntr_len)
-               memset(snap->mbx_vf.vf_cntr, 0,
-                      (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
-
-       /* Reset mailbox snapshot for a new capture. */
-       memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
-       snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
-}
-
-/**
- * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
- * @hw: pointer to the HW struct
- * @mbx_data: pointer to structure containing mailbox data
- * @vf_id: relative virtual function (VF) ID
- * @is_malvf: boolean output to indicate if VF is malicious
- *
- * The function serves as an entry point for the malicious VF
- * detection algorithm by handling the different states and state
- * transitions of the algorithm:
- * New snapshot: This state is entered when creating a new static
- * snapshot. The data from any previous mailbox snapshot is
- * cleared and a new capture of the mailbox head and tail is
- * logged. This will be the new static snapshot to detect
- * asynchronous messages sent by VFs. On capturing the snapshot
- * and depending on whether the number of pending messages in that
- * snapshot exceed the watermark value, the state machine enters
- * traverse or detect states.
- * Traverse: If pending message count is below watermark then iterate
- * through the snapshot without any action on VF.
- * Detect: If pending message count exceeds watermark traverse
- * the static snapshot and look for a malicious VF.
+       mutex_lock(rule_lock);
+       list_for_each_entry(list_itr, rule_head, list_entry) {
+               u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
+
+               if (ether_addr_equal(existing_mac, umac)) {
+                       mutex_unlock(rule_lock);
+                       return true;
+               }
+       }
+
+       mutex_unlock(rule_lock);
+
+       return false;
+}
+
+/**
+ * ice_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: MAC address
+ *
+ * program VF MAC address
  */
-int
-ice_mbx_vf_state_handler(struct ice_hw *hw,
-                        struct ice_mbx_data *mbx_data, u16 vf_id,
-                        bool *is_malvf)
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
 {
-       struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
-       struct ice_mbx_snap_buffer_data *snap_buf;
-       struct ice_ctl_q_info *cq = &hw->mailboxq;
-       enum ice_mbx_snapshot_state new_state;
-       int status = 0;
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
+       struct ice_vf *vf;
+       int ret;
 
-       if (!is_malvf || !mbx_data)
+       if (is_multicast_ether_addr(mac)) {
+               netdev_err(netdev, "%pM not a valid unicast address\n", mac);
                return -EINVAL;
+       }
 
-       /* When entering the mailbox state machine assume that the VF
-        * is not malicious until detected.
-        */
-       *is_malvf = false;
-
-        /* Checking if max messages allowed to be processed while servicing current
-         * interrupt is not less than the defined AVF message threshold.
-         */
-       if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf)
                return -EINVAL;
 
-       /* The watermark value should not be lesser than the threshold limit
-        * set for the number of asynchronous messages a VF can send to mailbox
-        * nor should it be greater than the maximum number of messages in the
-        * mailbox serviced in current interrupt.
+       /* nothing left to do, unicast MAC already set */
+       if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
+           ether_addr_equal(vf->hw_lan_addr.addr, mac)) {
+               ret = 0;
+               goto out_put_vf;
+       }
+
+       ret = ice_check_vf_ready_for_cfg(vf);
+       if (ret)
+               goto out_put_vf;
+
+       if (ice_unicast_mac_exists(pf, mac)) {
+               netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
+                          mac, vf_id, mac);
+               ret = -EINVAL;
+               goto out_put_vf;
+       }
+
+       mutex_lock(&vf->cfg_lock);
+
+       /* VF is notified of its new MAC via the PF's response to the
+        * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
         */
-       if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
-           mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+       ether_addr_copy(vf->dev_lan_addr.addr, mac);
+       ether_addr_copy(vf->hw_lan_addr.addr, mac);
+       if (is_zero_ether_addr(mac)) {
+               /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
+               vf->pf_set_mac = false;
+               netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+                           vf->vf_id);
+       } else {
+               /* PF will add MAC rule for the VF */
+               vf->pf_set_mac = true;
+               netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+                           mac, vf_id);
+       }
+
+       ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+       mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+       ice_put_vf(vf);
+       return ret;
+}
+
+/**
+ * ice_set_vf_trust
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @trusted: Boolean value to enable/disable trusted VF
+ *
+ * Enable or disable a given VF as trusted
+ */
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+{
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
+       struct ice_vf *vf;
+       int ret;
+
+       if (ice_is_eswitch_mode_switchdev(pf)) {
+               dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+               return -EOPNOTSUPP;
+       }
+
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf)
                return -EINVAL;
 
-       new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
-       snap_buf = &snap->mbx_buf;
+       ret = ice_check_vf_ready_for_cfg(vf);
+       if (ret)
+               goto out_put_vf;
+
+       /* Check if already trusted */
+       if (trusted == vf->trusted) {
+               ret = 0;
+               goto out_put_vf;
+       }
 
-       switch (snap_buf->state) {
-       case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
-               /* Clear any previously held data in mailbox snapshot structure. */
-               ice_mbx_reset_snapshot(snap);
+       mutex_lock(&vf->cfg_lock);
 
-               /* Collect the pending ARQ count, number of messages processed and
-                * the maximum number of messages allowed to be processed from the
-                * Mailbox for current interrupt.
-                */
-               snap_buf->num_pending_arq = mbx_data->num_pending_arq;
-               snap_buf->num_msg_proc = mbx_data->num_msg_proc;
-               snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+       vf->trusted = trusted;
+       ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+       dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
+                vf_id, trusted ? "" : "un");
 
-               /* Capture a new static snapshot of the mailbox by logging the
-                * head and tail of snapshot and set num_iterations to the tail
-                * value to mark the start of the iteration through the snapshot.
-                */
-               snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
-                                                 mbx_data->num_pending_arq);
-               snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
-               snap_buf->num_iterations = snap_buf->tail;
-
-               /* Pending ARQ messages returned by ice_clean_rq_elem
-                * is the difference between the head and tail of the
-                * mailbox queue. Comparing this value against the watermark
-                * helps to check if we potentially have malicious VFs.
-                */
-               if (snap_buf->num_pending_arq >=
-                   mbx_data->async_watermark_val) {
-                       new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
-                       status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
-               } else {
-                       new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
-                       ice_mbx_traverse(hw, &new_state);
-               }
-               break;
+       mutex_unlock(&vf->cfg_lock);
 
-       case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
-               new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
-               ice_mbx_traverse(hw, &new_state);
-               break;
+out_put_vf:
+       ice_put_vf(vf);
+       return ret;
+}
 
-       case ICE_MAL_VF_DETECT_STATE_DETECT:
-               new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
-               status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
-               break;
+/**
+ * ice_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link_state: required link state
+ *
+ * Set VF's link state, irrespective of physical link state status
+ */
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
+{
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
+       struct ice_vf *vf;
+       int ret;
+
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf)
+               return -EINVAL;
+
+       ret = ice_check_vf_ready_for_cfg(vf);
+       if (ret)
+               goto out_put_vf;
 
+       switch (link_state) {
+       case IFLA_VF_LINK_STATE_AUTO:
+               vf->link_forced = false;
+               break;
+       case IFLA_VF_LINK_STATE_ENABLE:
+               vf->link_forced = true;
+               vf->link_up = true;
+               break;
+       case IFLA_VF_LINK_STATE_DISABLE:
+               vf->link_forced = true;
+               vf->link_up = false;
+               break;
        default:
-               new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
-               status = -EIO;
+               ret = -EINVAL;
+               goto out_put_vf;
        }
 
-       snap_buf->state = new_state;
+       ice_vc_notify_vf_link_state(vf);
+
+out_put_vf:
+       ice_put_vf(vf);
+       return ret;
+}
+
+/**
+ * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
+ * @pf: PF associated with VFs
+ */
+static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
+{
+       struct ice_vf *vf;
+       unsigned int bkt;
+       int rate = 0;
+
+       rcu_read_lock();
+       ice_for_each_vf_rcu(pf, bkt, vf)
+               rate += vf->min_tx_rate;
+       rcu_read_unlock();
 
-       return status;
+       return rate;
 }
 
 /**
- * ice_mbx_report_malvf - Track and note malicious VF
- * @hw: pointer to the HW struct
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
- * @report_malvf: boolean to indicate if malicious VF must be reported
+ * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
+ * @vf: VF trying to configure min_tx_rate
+ * @min_tx_rate: min Tx rate in Mbps
  *
- * This function will update a bitmap that keeps track of the malicious
- * VFs attached to the PF. A malicious VF must be reported only once if
- * discovered between VF resets or loading so the function checks
- * the input vf_id against the bitmap to verify if the VF has been
- * detected in any previous mailbox iterations.
+ * Check if the min_tx_rate being passed in will cause oversubscription of total
+ * min_tx_rate based on the current link speed and all other VFs configured
+ * min_tx_rate
+ *
+ * Return true if the passed min_tx_rate would cause oversubscription, else
+ * return false
+ */
+static bool
+ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
+{
+       int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
+       int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
+
+       /* this VF's previous rate is being overwritten */
+       all_vfs_min_tx_rate -= vf->min_tx_rate;
+
+       if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
+               dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
+                       min_tx_rate, vf->vf_id,
+                       all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
+                       link_speed_mbps);
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * ice_set_vf_bw - set min/max VF bandwidth
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate in Mbps
+ * @max_tx_rate: Maximum Tx rate in Mbps
  */
 int
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
-                    u16 bitmap_len, u16 vf_id, bool *report_malvf)
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+             int max_tx_rate)
 {
-       if (!all_malvfs || !report_malvf)
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
+       struct ice_vsi *vsi;
+       struct device *dev;
+       struct ice_vf *vf;
+       int ret;
+
+       dev = ice_pf_to_dev(pf);
+
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf)
                return -EINVAL;
 
-       *report_malvf = false;
+       ret = ice_check_vf_ready_for_cfg(vf);
+       if (ret)
+               goto out_put_vf;
+
+       vsi = ice_get_vf_vsi(vf);
+
+       /* when max_tx_rate is zero that means no max Tx rate limiting, so only
+        * check if max_tx_rate is non-zero
+        */
+       if (max_tx_rate && min_tx_rate > max_tx_rate) {
+               dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
+                       min_tx_rate, max_tx_rate);
+               ret = -EINVAL;
+               goto out_put_vf;
+       }
+
+       if (min_tx_rate && ice_is_dcb_active(pf)) {
+               dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
+               ret = -EOPNOTSUPP;
+               goto out_put_vf;
+       }
+
+       if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
+               ret = -EINVAL;
+               goto out_put_vf;
+       }
+
+       if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
+               ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
+               if (ret) {
+                       dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
+                               vf->vf_id);
+                       goto out_put_vf;
+               }
+
+               vf->min_tx_rate = min_tx_rate;
+       }
+
+       if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
+               ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
+               if (ret) {
+                       dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
+                               vf->vf_id);
+                       goto out_put_vf;
+               }
+
+               vf->max_tx_rate = max_tx_rate;
+       }
+
+out_put_vf:
+       ice_put_vf(vf);
+       return ret;
+}
+
+/**
+ * ice_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-255)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int ice_get_vf_stats(struct net_device *netdev, int vf_id,
+                    struct ifla_vf_stats *vf_stats)
+{
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
+       struct ice_eth_stats *stats;
+       struct ice_vsi *vsi;
+       struct ice_vf *vf;
+       int ret;
 
-       if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf)
                return -EINVAL;
 
-       if (vf_id >= bitmap_len)
-               return -EIO;
+       ret = ice_check_vf_ready_for_cfg(vf);
+       if (ret)
+               goto out_put_vf;
 
-       /* If the vf_id is found in the bitmap set bit and boolean to true */
-       if (!test_and_set_bit(vf_id, all_malvfs))
-               *report_malvf = true;
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               ret = -EINVAL;
+               goto out_put_vf;
+       }
 
-       return 0;
+       ice_update_eth_stats(vsi);
+       stats = &vsi->eth_stats;
+
+       memset(vf_stats, 0, sizeof(*vf_stats));
+
+       vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+               stats->rx_multicast;
+       vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+               stats->tx_multicast;
+       vf_stats->rx_bytes   = stats->rx_bytes;
+       vf_stats->tx_bytes   = stats->tx_bytes;
+       vf_stats->broadcast  = stats->rx_broadcast;
+       vf_stats->multicast  = stats->rx_multicast;
+       vf_stats->rx_dropped = stats->rx_discards;
+       vf_stats->tx_dropped = stats->tx_discards;
+
+out_put_vf:
+       ice_put_vf(vf);
+       return ret;
+}
+
+/**
+ * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
+ * @hw: hardware structure used to check the VLAN mode
+ * @vlan_proto: VLAN TPID being checked
+ *
+ * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
+ * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
+ * Mode (SVM), then only ETH_P_8021Q is supported.
+ */
+static bool
+ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
+{
+       bool is_supported = false;
+
+       switch (vlan_proto) {
+       case ETH_P_8021Q:
+               is_supported = true;
+               break;
+       case ETH_P_8021AD:
+               if (ice_is_dvm_ena(hw))
+                       is_supported = true;
+               break;
+       }
+
+       return is_supported;
 }
 
 /**
- * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
- * @snap: pointer to the mailbox snapshot structure
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
+ * ice_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: VLAN ID being set
+ * @qos: priority setting
+ * @vlan_proto: VLAN protocol
  *
- * In case of a VF reset, this function can be called to clear
- * the bit corresponding to the VF ID in the bitmap tracking all
- * malicious VFs attached to the PF. The function also clears the
- * VF counter array at the index of the VF ID. This is to ensure
- * that the new VF loaded is not considered malicious before going
- * through the overflow detection algorithm.
+ * program VF Port VLAN ID and/or QoS
  */
 int
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
-                   u16 bitmap_len, u16 vf_id)
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+                    __be16 vlan_proto)
 {
-       if (!snap || !all_malvfs)
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
+       u16 local_vlan_proto = ntohs(vlan_proto);
+       struct device *dev;
+       struct ice_vf *vf;
+       int ret;
+
+       dev = ice_pf_to_dev(pf);
+
+       if (vlan_id >= VLAN_N_VID || qos > 7) {
+               dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
+                       vf_id, vlan_id, qos);
                return -EINVAL;
+       }
+
+       if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
+               dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
+                       local_vlan_proto);
+               return -EPROTONOSUPPORT;
+       }
 
-       if (bitmap_len < snap->mbx_vf.vfcntr_len)
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf)
                return -EINVAL;
 
-       /* Ensure VF ID value is not larger than bitmap or VF counter length */
-       if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
-               return -EIO;
+       ret = ice_check_vf_ready_for_cfg(vf);
+       if (ret)
+               goto out_put_vf;
 
-       /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
-       clear_bit(vf_id, all_malvfs);
+       if (ice_vf_get_port_vlan_prio(vf) == qos &&
+           ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
+           ice_vf_get_port_vlan_id(vf) == vlan_id) {
+               /* duplicate request, so just return success */
+               dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
+                       vlan_id, qos, local_vlan_proto);
+               ret = 0;
+               goto out_put_vf;
+       }
 
-       /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
-        * This is to ensure that if a VF is unloaded and a new one brought back
-        * up with the same VF ID for a snapshot currently in traversal or detect
-        * state the counter for that VF ID does not increment on top of existing
-        * values in the mailbox overflow detection algorithm.
-        */
-       snap->mbx_vf.vf_cntr[vf_id] = 0;
+       mutex_lock(&vf->cfg_lock);
 
-       return 0;
+       vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
+       if (ice_vf_is_port_vlan_ena(vf))
+               dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
+                        vlan_id, qos, local_vlan_proto, vf_id);
+       else
+               dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
+
+       ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+       mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+       ice_put_vf(vf);
+       return ret;
 }
 
 /**
- * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
- * @hw: pointer to the hardware structure
- * @vf_count: number of VFs allocated on a PF
- *
- * Clear the mailbox snapshot structure and allocate memory
- * for the VF counter array based on the number of VFs allocated
- * on that PF.
+ * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+       struct device *dev;
+
+       dev = ice_pf_to_dev(pf);
+
+       dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+                vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
+                vf->dev_lan_addr.addr,
+                test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+                         ? "on" : "off");
+}
+
+/**
+ * ice_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
  *
- * Assumption: This function will assume ice_get_caps() has already been
- * called to ensure that the vf_count can be compared against the number
- * of VFs supported as defined in the functional capabilities of the device.
+ * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
  */
-int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+void ice_print_vfs_mdd_events(struct ice_pf *pf)
 {
-       struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+       struct device *dev = ice_pf_to_dev(pf);
+       struct ice_hw *hw = &pf->hw;
+       struct ice_vf *vf;
+       unsigned int bkt;
 
-       /* Ensure that the number of VFs allocated is non-zero and
-        * is not greater than the number of supported VFs defined in
-        * the functional capabilities of the PF.
-        */
-       if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
-               return -EINVAL;
+       /* check that there are pending MDD events to print */
+       if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
+               return;
 
-       snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
-                                           sizeof(*snap->mbx_vf.vf_cntr),
-                                           GFP_KERNEL);
-       if (!snap->mbx_vf.vf_cntr)
-               return -ENOMEM;
+       /* VF MDD event logs are rate limited to one second intervals */
+       if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
+               return;
 
-       /* Setting the VF counter length to the number of allocated
-        * VFs for given PF's functional capabilities.
-        */
-       snap->mbx_vf.vfcntr_len = vf_count;
+       pf->vfs.last_printed_mdd_jiffies = jiffies;
 
-       /* Clear mbx_buf in the mailbox snaphot structure and setting the
-        * mailbox snapshot state to a new capture.
-        */
-       memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
-       snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+       mutex_lock(&pf->vfs.table_lock);
+       ice_for_each_vf(pf, bkt, vf) {
+               /* only print Rx MDD event message if there are new events */
+               if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+                       vf->mdd_rx_events.last_printed =
+                                                       vf->mdd_rx_events.count;
+                       ice_print_vf_rx_mdd_event(vf);
+               }
 
-       return 0;
+               /* only print Tx MDD event message if there are new events */
+               if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+                       vf->mdd_tx_events.last_printed =
+                                                       vf->mdd_tx_events.count;
+
+                       dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
+                                vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
+                                vf->dev_lan_addr.addr);
+               }
+       }
+       mutex_unlock(&pf->vfs.table_lock);
 }
 
 /**
- * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
- * @hw: pointer to the hardware structure
+ * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
+ * @pdev: pointer to a pci_dev structure
  *
- * Clear the mailbox snapshot structure and free the VF counter array.
+ * Called when recovering from a PF FLR to restore interrupt capability to
+ * the VFs.
+ */
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+       u16 vf_id;
+       int pos;
+
+       if (!pci_num_vf(pdev))
+               return;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+       if (pos) {
+               struct pci_dev *vfdev;
+
+               pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
+                                    &vf_id);
+               vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+               while (vfdev) {
+                       if (vfdev->is_virtfn && vfdev->physfn == pdev)
+                               pci_restore_msi_state(vfdev);
+                       vfdev = pci_get_device(pdev->vendor, vf_id,
+                                              vfdev);
+               }
+       }
+}
+
+/**
+ * ice_is_malicious_vf - helper function to detect a malicious VF
+ * @pf: ptr to struct ice_pf
+ * @event: pointer to the AQ event
+ * @num_msg_proc: the number of messages processed so far
+ * @num_msg_pending: the number of messages peinding in admin queue
  */
-void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+bool
+ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
+                   u16 num_msg_proc, u16 num_msg_pending)
 {
-       struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+       s16 vf_id = le16_to_cpu(event->desc.retval);
+       struct device *dev = ice_pf_to_dev(pf);
+       struct ice_mbx_data mbxdata;
+       bool malvf = false;
+       struct ice_vf *vf;
+       int status;
+
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf)
+               return false;
+
+       if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+               goto out_put_vf;
 
-       /* Free VF counter array and reset VF counter length */
-       devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
-       snap->mbx_vf.vfcntr_len = 0;
+       mbxdata.num_msg_proc = num_msg_proc;
+       mbxdata.num_pending_arq = num_msg_pending;
+       mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
+#define ICE_MBX_OVERFLOW_WATERMARK 64
+       mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
+
+       /* check to see if we have a malicious VF */
+       status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
+       if (status)
+               goto out_put_vf;
+
+       if (malvf) {
+               bool report_vf = false;
+
+               /* if the VF is malicious and we haven't let the user
+                * know about it, then let them know now
+                */
+               status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs,
+                                             ICE_MAX_SRIOV_VFS, vf_id,
+                                             &report_vf);
+               if (status)
+                       dev_dbg(dev, "Error reporting malicious VF\n");
+
+               if (report_vf) {
+                       struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+
+                       if (pf_vsi)
+                               dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
+                                        &vf->dev_lan_addr.addr[0],
+                                        pf_vsi->netdev->dev_addr);
+               }
+       }
 
-       /* Clear mbx_buf in the mailbox snaphot structure */
-       memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+out_put_vf:
+       ice_put_vf(vf);
+       return malvf;
 }
index 68686a3..955ab81 100644 (file)
 
 #ifndef _ICE_SRIOV_H_
 #define _ICE_SRIOV_H_
+#include "ice_virtchnl_fdir.h"
+#include "ice_vf_lib.h"
+#include "ice_virtchnl.h"
 
-#include "ice_type.h"
-#include "ice_controlq.h"
+/* Static VF transaction/status register def */
+#define VF_DEVICE_STATUS               0xAA
+#define VF_TRANS_PENDING_M             0x20
 
-/* Defining the mailbox message threshold as 63 asynchronous
- * pending messages. Normal VF functionality does not require
- * sending more than 63 asynchronous pending message.
- */
-#define ICE_ASYNC_VF_MSG_THRESHOLD     63
+/* wait defines for polling PF_PCI_CIAD register status */
+#define ICE_PCI_CIAD_WAIT_COUNT                100
+#define ICE_PCI_CIAD_WAIT_DELAY_US     1
+
+/* VF resource constraints */
+#define ICE_MIN_QS_PER_VF              1
+#define ICE_NONQ_VECS_VF               1
+#define ICE_NUM_VF_MSIX_MED            17
+#define ICE_NUM_VF_MSIX_SMALL          5
+#define ICE_NUM_VF_MSIX_MULTIQ_MIN     3
+#define ICE_MIN_INTR_PER_VF            (ICE_MIN_QS_PER_VF + 1)
+#define ICE_MAX_VF_RESET_TRIES         40
+#define ICE_MAX_VF_RESET_SLEEP_MS      20
 
 #ifdef CONFIG_PCI_IOV
+void ice_process_vflr_event(struct ice_pf *pf);
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
 int
-ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
-                     u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
+
+void ice_free_vfs(struct ice_pf *pf);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
+bool
+ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
+                   u16 num_msg_proc, u16 num_msg_pending);
 
-u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
 int
-ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
-                        u16 vf_id, bool *is_mal_vf);
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+                    __be16 vlan_proto);
+
 int
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
-                   u16 bitmap_len, u16 vf_id);
-int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
-void ice_mbx_deinit_snapshot(struct ice_hw *hw);
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+             int max_tx_rate);
+
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
+
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+
 int
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
-                    u16 bitmap_len, u16 vf_id, bool *report_malvf);
+ice_get_vf_stats(struct net_device *netdev, int vf_id,
+                struct ifla_vf_stats *vf_stats);
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_print_vfs_mdd_events(struct ice_pf *pf);
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
 #else /* CONFIG_PCI_IOV */
+static inline void ice_process_vflr_event(struct ice_pf *pf) { }
+static inline void ice_free_vfs(struct ice_pf *pf) { }
+static inline
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline
+void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
+static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+
+static inline bool
+ice_is_malicious_vf(struct ice_pf __always_unused *pf,
+                   struct ice_rq_event_info __always_unused *event,
+                   u16 __always_unused num_msg_proc,
+                   u16 __always_unused num_msg_pending)
+{
+       return false;
+}
+
 static inline int
-ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
-                     u16 __always_unused vfid, u32 __always_unused v_opcode,
-                     u32 __always_unused v_retval, u8 __always_unused *msg,
-                     u16 __always_unused msglen,
-                     struct ice_sq_cd __always_unused *cd)
+ice_sriov_configure(struct pci_dev __always_unused *pdev,
+                   int __always_unused num_vfs)
 {
-       return 0;
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_mac(struct net_device __always_unused *netdev,
+              int __always_unused vf_id, u8 __always_unused *mac)
+{
+       return -EOPNOTSUPP;
 }
 
-static inline u32
-ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
-                               u16 __always_unused link_speed)
+static inline int
+ice_get_vf_cfg(struct net_device __always_unused *netdev,
+              int __always_unused vf_id,
+              struct ifla_vf_info __always_unused *ivi)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_trust(struct net_device __always_unused *netdev,
+                int __always_unused vf_id, bool __always_unused trusted)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
+                    int __always_unused vf_id, u16 __always_unused vid,
+                    u8 __always_unused qos, __be16 __always_unused v_proto)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
+                   int __always_unused vf_id, bool __always_unused ena)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_link_state(struct net_device __always_unused *netdev,
+                     int __always_unused vf_id, int __always_unused link_state)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+             int __always_unused vf_id, int __always_unused min_tx_rate,
+             int __always_unused max_tx_rate)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
+                   struct ice_q_vector __always_unused *q_vector)
 {
        return 0;
 }
 
+static inline int
+ice_get_vf_stats(struct net_device __always_unused *netdev,
+                int __always_unused vf_id,
+                struct ifla_vf_stats __always_unused *vf_stats)
+{
+       return -EOPNOTSUPP;
+}
 #endif /* CONFIG_PCI_IOV */
 #endif /* _ICE_SRIOV_H_ */
index 9c40a8d..25b8f6f 100644 (file)
@@ -41,6 +41,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
        { ICE_IPV4_OFOS,        14 },
        { ICE_NVGRE,            34 },
        { ICE_MAC_IL,           42 },
+       { ICE_ETYPE_IL,         54 },
        { ICE_IPV4_IL,          56 },
        { ICE_TCP_IL,           76 },
        { ICE_PROTOCOL_LAST,    0 },
@@ -65,7 +66,8 @@ static const u8 dummy_gre_tcp_packet[] = {
        0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
-       0x08, 0x00,
+
+       0x08, 0x00,             /* ICE_ETYPE_IL 54 */
 
        0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
        0x00, 0x00, 0x00, 0x00,
@@ -86,6 +88,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
        { ICE_IPV4_OFOS,        14 },
        { ICE_NVGRE,            34 },
        { ICE_MAC_IL,           42 },
+       { ICE_ETYPE_IL,         54 },
        { ICE_IPV4_IL,          56 },
        { ICE_UDP_ILOS,         76 },
        { ICE_PROTOCOL_LAST,    0 },
@@ -110,7 +113,8 @@ static const u8 dummy_gre_udp_packet[] = {
        0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
-       0x08, 0x00,
+
+       0x08, 0x00,             /* ICE_ETYPE_IL 54 */
 
        0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
        0x00, 0x00, 0x00, 0x00,
@@ -131,6 +135,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
        { ICE_GENEVE,           42 },
        { ICE_VXLAN_GPE,        42 },
        { ICE_MAC_IL,           50 },
+       { ICE_ETYPE_IL,         62 },
        { ICE_IPV4_IL,          64 },
        { ICE_TCP_IL,           84 },
        { ICE_PROTOCOL_LAST,    0 },
@@ -158,7 +163,8 @@ static const u8 dummy_udp_tun_tcp_packet[] = {
        0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
-       0x08, 0x00,
+
+       0x08, 0x00,             /* ICE_ETYPE_IL 62 */
 
        0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
        0x00, 0x01, 0x00, 0x00,
@@ -182,6 +188,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
        { ICE_GENEVE,           42 },
        { ICE_VXLAN_GPE,        42 },
        { ICE_MAC_IL,           50 },
+       { ICE_ETYPE_IL,         62 },
        { ICE_IPV4_IL,          64 },
        { ICE_UDP_ILOS,         84 },
        { ICE_PROTOCOL_LAST,    0 },
@@ -209,7 +216,8 @@ static const u8 dummy_udp_tun_udp_packet[] = {
        0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
-       0x08, 0x00,
+
+       0x08, 0x00,             /* ICE_ETYPE_IL 62 */
 
        0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
        0x00, 0x01, 0x00, 0x00,
@@ -221,6 +229,224 @@ static const u8 dummy_udp_tun_udp_packet[] = {
        0x00, 0x08, 0x00, 0x00,
 };
 
+static const struct ice_dummy_pkt_offsets
+dummy_gre_ipv6_tcp_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_ETYPE_OL,         12 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_NVGRE,            34 },
+       { ICE_MAC_IL,           42 },
+       { ICE_ETYPE_IL,         54 },
+       { ICE_IPV6_IL,          56 },
+       { ICE_TCP_IL,           96 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_gre_ipv6_tcp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x08, 0x00,             /* ICE_ETYPE_OL 12 */
+
+       0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x2F, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x86, 0xdd,             /* ICE_ETYPE_IL 54 */
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
+       0x00, 0x08, 0x06, 0x40,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x50, 0x02, 0x20, 0x00,
+       0x00, 0x00, 0x00, 0x00
+};
+
+static const struct ice_dummy_pkt_offsets
+dummy_gre_ipv6_udp_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_ETYPE_OL,         12 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_NVGRE,            34 },
+       { ICE_MAC_IL,           42 },
+       { ICE_ETYPE_IL,         54 },
+       { ICE_IPV6_IL,          56 },
+       { ICE_UDP_ILOS,         96 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_gre_ipv6_udp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x08, 0x00,             /* ICE_ETYPE_OL 12 */
+
+       0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x2F, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x86, 0xdd,             /* ICE_ETYPE_IL 54 */
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
+       0x00, 0x08, 0x11, 0x40,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
+       0x00, 0x08, 0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets
+dummy_udp_tun_ipv6_tcp_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_ETYPE_OL,         12 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_UDP_OF,           34 },
+       { ICE_VXLAN,            42 },
+       { ICE_GENEVE,           42 },
+       { ICE_VXLAN_GPE,        42 },
+       { ICE_MAC_IL,           50 },
+       { ICE_ETYPE_IL,         62 },
+       { ICE_IPV6_IL,          64 },
+       { ICE_TCP_IL,           104 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_udp_tun_ipv6_tcp_packet[] = {
+       0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x08, 0x00,             /* ICE_ETYPE_OL 12 */
+
+       0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
+       0x00, 0x01, 0x00, 0x00,
+       0x40, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+       0x00, 0x5a, 0x00, 0x00,
+
+       0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x86, 0xdd,             /* ICE_ETYPE_IL 62 */
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
+       0x00, 0x08, 0x06, 0x40,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x50, 0x02, 0x20, 0x00,
+       0x00, 0x00, 0x00, 0x00
+};
+
+static const struct ice_dummy_pkt_offsets
+dummy_udp_tun_ipv6_udp_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_ETYPE_OL,         12 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_UDP_OF,           34 },
+       { ICE_VXLAN,            42 },
+       { ICE_GENEVE,           42 },
+       { ICE_VXLAN_GPE,        42 },
+       { ICE_MAC_IL,           50 },
+       { ICE_ETYPE_IL,         62 },
+       { ICE_IPV6_IL,          64 },
+       { ICE_UDP_ILOS,         104 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_udp_tun_ipv6_udp_packet[] = {
+       0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x08, 0x00,             /* ICE_ETYPE_OL 12 */
+
+       0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
+       0x00, 0x01, 0x00, 0x00,
+       0x00, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+       0x00, 0x4e, 0x00, 0x00,
+
+       0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x86, 0xdd,             /* ICE_ETYPE_IL 62 */
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
+       0x00, 0x08, 0x11, 0x40,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
+       0x00, 0x08, 0x00, 0x00,
+};
+
 /* offset info for MAC + IPv4 + UDP dummy packet */
 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
        { ICE_MAC_OFOS,         0 },
@@ -266,106 +492,511 @@ static const u8 dummy_vlan_udp_packet[] = {
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
 
-       0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+       0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+       0x08, 0x00,             /* ICE_ETYPE_OL 16 */
+
+       0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
+       0x00, 0x01, 0x00, 0x00,
+       0x00, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
+       0x00, 0x08, 0x00, 0x00,
+
+       0x00, 0x00,     /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + IPv4 + TCP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_ETYPE_OL,         12 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_TCP_IL,           34 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+/* Dummy packet for MAC + IPv4 + TCP */
+static const u8 dummy_tcp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x08, 0x00,             /* ICE_ETYPE_OL 12 */
+
+       0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
+       0x00, 0x01, 0x00, 0x00,
+       0x00, 0x06, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x50, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00,     /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_VLAN_OFOS,        12 },
+       { ICE_ETYPE_OL,         16 },
+       { ICE_IPV4_OFOS,        18 },
+       { ICE_TCP_IL,           38 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+/* C-tag (801.1Q), IPv4:TCP dummy packet */
+static const u8 dummy_vlan_tcp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+       0x08, 0x00,             /* ICE_ETYPE_OL 16 */
+
+       0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
+       0x00, 0x01, 0x00, 0x00,
+       0x00, 0x06, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x50, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00,     /* 2 bytes for 4 byte alignment */
+};
+
+static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_ETYPE_OL,         12 },
+       { ICE_IPV6_OFOS,        14 },
+       { ICE_TCP_IL,           54 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_tcp_ipv6_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+       0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x50, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* C-tag (802.1Q): IPv6 + TCP */
+static const struct ice_dummy_pkt_offsets
+dummy_vlan_tcp_ipv6_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_VLAN_OFOS,        12 },
+       { ICE_ETYPE_OL,         16 },
+       { ICE_IPV6_OFOS,        18 },
+       { ICE_TCP_IL,           58 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+/* C-tag (802.1Q), IPv6 + TCP dummy packet */
+static const u8 dummy_vlan_tcp_ipv6_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+       0x86, 0xDD,             /* ICE_ETYPE_OL 16 */
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
+       0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x50, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* IPv6 + UDP */
+static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_ETYPE_OL,         12 },
+       { ICE_IPV6_OFOS,        14 },
+       { ICE_UDP_ILOS,         54 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+/* IPv6 + UDP dummy packet */
+static const u8 dummy_udp_ipv6_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+       0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
+       0x00, 0x10, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* C-tag (802.1Q): IPv6 + UDP */
+static const struct ice_dummy_pkt_offsets
+dummy_vlan_udp_ipv6_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_VLAN_OFOS,        12 },
+       { ICE_ETYPE_OL,         16 },
+       { ICE_IPV6_OFOS,        18 },
+       { ICE_UDP_ILOS,         58 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+/* C-tag (802.1Q), IPv6 + UDP dummy packet */
+static const u8 dummy_vlan_udp_ipv6_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
+
+       0x86, 0xDD,             /* ICE_ETYPE_OL 16 */
+
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
+       0x00, 0x08, 0x11, 0x00, /* Next header UDP */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
+       0x00, 0x08, 0x00, 0x00,
+
+       0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_UDP_OF,           34 },
+       { ICE_GTP,              42 },
+       { ICE_IPV4_IL,          62 },
+       { ICE_TCP_IL,           82 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x08, 0x00,
+
+       0x45, 0x00, 0x00, 0x58, /* IP 14 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+       0x00, 0x44, 0x00, 0x00,
+
+       0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x45, 0x00, 0x00, 0x28, /* IP 62 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x06, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* TCP 82 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x50, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_UDP_OF,           34 },
+       { ICE_GTP,              42 },
+       { ICE_IPV4_IL,          62 },
+       { ICE_UDP_ILOS,         82 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x08, 0x00,
+
+       0x45, 0x00, 0x00, 0x4c, /* IP 14 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+       0x00, 0x38, 0x00, 0x00,
+
+       0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x45, 0x00, 0x00, 0x1c, /* IP 62 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* UDP 82 */
+       0x00, 0x08, 0x00, 0x00,
+
+       0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_UDP_OF,           34 },
+       { ICE_GTP,              42 },
+       { ICE_IPV6_IL,          62 },
+       { ICE_TCP_IL,           102 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x08, 0x00,
+
+       0x45, 0x00, 0x00, 0x6c, /* IP 14 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+       0x00, 0x58, 0x00, 0x00,
+
+       0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
 
-       0x08, 0x00,             /* ICE_ETYPE_OL 16 */
+       0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+       0x00, 0x00, 0x00, 0x00,
 
-       0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
-       0x00, 0x01, 0x00, 0x00,
-       0x00, 0x11, 0x00, 0x00,
+       0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
+       0x00, 0x14, 0x06, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
-       0x00, 0x08, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, /* TCP 102 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x50, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00,     /* 2 bytes for 4 byte alignment */
+       0x00, 0x00, /* 2 bytes for 4 byte alignment */
 };
 
-/* offset info for MAC + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
        { ICE_MAC_OFOS,         0 },
-       { ICE_ETYPE_OL,         12 },
        { ICE_IPV4_OFOS,        14 },
-       { ICE_TCP_IL,           34 },
+       { ICE_UDP_OF,           34 },
+       { ICE_GTP,              42 },
+       { ICE_IPV6_IL,          62 },
+       { ICE_UDP_ILOS,         102 },
        { ICE_PROTOCOL_LAST,    0 },
 };
 
-/* Dummy packet for MAC + IPv4 + TCP */
-static const u8 dummy_tcp_packet[] = {
-       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
+       0x08, 0x00,
 
-       0x08, 0x00,             /* ICE_ETYPE_OL 12 */
+       0x45, 0x00, 0x00, 0x60, /* IP 14 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
 
-       0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
-       0x00, 0x01, 0x00, 0x00,
-       0x00, 0x06, 0x00, 0x00,
+       0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+       0x00, 0x4c, 0x00, 0x00,
+
+       0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
        0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
        0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
+       0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
+       0x00, 0x08, 0x11, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
-       0x50, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00,     /* 2 bytes for 4 byte alignment */
+       0x00, 0x00, 0x00, 0x00, /* UDP 102 */
+       0x00, 0x08, 0x00, 0x00,
+
+       0x00, 0x00, /* 2 bytes for 4 byte alignment */
 };
 
-/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
        { ICE_MAC_OFOS,         0 },
-       { ICE_VLAN_OFOS,        12 },
-       { ICE_ETYPE_OL,         16 },
-       { ICE_IPV4_OFOS,        18 },
-       { ICE_TCP_IL,           38 },
+       { ICE_IPV6_OFOS,        14 },
+       { ICE_UDP_OF,           54 },
+       { ICE_GTP,              62 },
+       { ICE_IPV4_IL,          82 },
+       { ICE_TCP_IL,           102 },
        { ICE_PROTOCOL_LAST,    0 },
 };
 
-/* C-tag (801.1Q), IPv4:TCP dummy packet */
-static const u8 dummy_vlan_tcp_packet[] = {
-       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
+       0x86, 0xdd,
 
-       0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+       0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+       0x00, 0x44, 0x11, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
 
-       0x08, 0x00,             /* ICE_ETYPE_OL 16 */
+       0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+       0x00, 0x44, 0x00, 0x00,
 
-       0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
-       0x00, 0x01, 0x00, 0x00,
+       0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x45, 0x00, 0x00, 0x28, /* IP 82 */
+       0x00, 0x00, 0x00, 0x00,
        0x00, 0x06, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
+       0x00, 0x00, 0x00, 0x00, /* TCP 102 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x50, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00,     /* 2 bytes for 4 byte alignment */
+       0x00, 0x00, /* 2 bytes for 4 byte alignment */
 };
 
-static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
        { ICE_MAC_OFOS,         0 },
-       { ICE_ETYPE_OL,         12 },
        { ICE_IPV6_OFOS,        14 },
-       { ICE_TCP_IL,           54 },
+       { ICE_UDP_OF,           54 },
+       { ICE_GTP,              62 },
+       { ICE_IPV4_IL,          82 },
+       { ICE_UDP_ILOS,         102 },
        { ICE_PROTOCOL_LAST,    0 },
 };
 
-static const u8 dummy_tcp_ipv6_packet[] = {
-       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
+       0x86, 0xdd,
 
-       0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
-
-       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
-       0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+       0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+       0x00, 0x38, 0x11, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
@@ -375,38 +1006,68 @@ static const u8 dummy_tcp_ipv6_packet[] = {
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
+       0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+       0x00, 0x38, 0x00, 0x00,
+
+       0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
        0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x45, 0x00, 0x00, 0x1c, /* IP 82 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x11, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
-       0x50, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
 
+       0x00, 0x00, 0x00, 0x00, /* UDP 102 */
+       0x00, 0x08, 0x00, 0x00,
+
        0x00, 0x00, /* 2 bytes for 4 byte alignment */
 };
 
-/* C-tag (802.1Q): IPv6 + TCP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_tcp_ipv6_packet_offsets[] = {
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
        { ICE_MAC_OFOS,         0 },
-       { ICE_VLAN_OFOS,        12 },
-       { ICE_ETYPE_OL,         16 },
-       { ICE_IPV6_OFOS,        18 },
-       { ICE_TCP_IL,           58 },
+       { ICE_IPV6_OFOS,        14 },
+       { ICE_UDP_OF,           54 },
+       { ICE_GTP,              62 },
+       { ICE_IPV6_IL,          82 },
+       { ICE_TCP_IL,           122 },
        { ICE_PROTOCOL_LAST,    0 },
 };
 
-/* C-tag (802.1Q), IPv6 + TCP dummy packet */
-static const u8 dummy_vlan_tcp_ipv6_packet[] = {
-       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
+       0x86, 0xdd,
 
-       0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+       0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+       0x00, 0x58, 0x11, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
 
-       0x86, 0xDD,             /* ICE_ETYPE_OL 16 */
+       0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+       0x00, 0x58, 0x00, 0x00,
 
-       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
-       0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+       0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
+       0x00, 0x14, 0x06, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
@@ -416,7 +1077,7 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = {
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
+       0x00, 0x00, 0x00, 0x00, /* TCP 122 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x50, 0x00, 0x00, 0x00,
@@ -425,25 +1086,25 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = {
        0x00, 0x00, /* 2 bytes for 4 byte alignment */
 };
 
-/* IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
        { ICE_MAC_OFOS,         0 },
-       { ICE_ETYPE_OL,         12 },
        { ICE_IPV6_OFOS,        14 },
-       { ICE_UDP_ILOS,         54 },
+       { ICE_UDP_OF,           54 },
+       { ICE_GTP,              62 },
+       { ICE_IPV6_IL,          82 },
+       { ICE_UDP_ILOS,         122 },
        { ICE_PROTOCOL_LAST,    0 },
 };
 
-/* IPv6 + UDP dummy packet */
-static const u8 dummy_udp_ipv6_packet[] = {
-       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
+       0x86, 0xdd,
 
-       0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
-
-       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
-       0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+       0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+       0x00, 0x4c, 0x11, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
@@ -453,38 +1114,89 @@ static const u8 dummy_udp_ipv6_packet[] = {
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
-       0x00, 0x10, 0x00, 0x00,
+       0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+       0x00, 0x4c, 0x00, 0x00,
 
-       0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+       0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
        0x00, 0x00, 0x00, 0x00,
 
+       0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
+       0x00, 0x08, 0x11, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00, 0x00, 0x00, /* UDP 122 */
+       0x00, 0x08, 0x00, 0x00,
+
        0x00, 0x00, /* 2 bytes for 4 byte alignment */
 };
 
-/* C-tag (802.1Q): IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_udp_ipv6_packet_offsets[] = {
+static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
+       0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x08, 0x00,
+
+       0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
+       0x00, 0x00, 0x40, 0x00,
+       0x40, 0x11, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+
+       0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x85,
+
+       0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
+       0x00, 0x00, 0x40, 0x00,
+       0x40, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00,
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
        { ICE_MAC_OFOS,         0 },
-       { ICE_VLAN_OFOS,        12 },
-       { ICE_ETYPE_OL,         16 },
-       { ICE_IPV6_OFOS,        18 },
-       { ICE_UDP_ILOS,         58 },
+       { ICE_IPV4_OFOS,        14 },
+       { ICE_UDP_OF,           34 },
+       { ICE_GTP_NO_PAY,       42 },
        { ICE_PROTOCOL_LAST,    0 },
 };
 
-/* C-tag (802.1Q), IPv6 + UDP dummy packet */
-static const u8 dummy_vlan_udp_ipv6_packet[] = {
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
+       { ICE_MAC_OFOS,         0 },
+       { ICE_IPV6_OFOS,        14 },
+       { ICE_UDP_OF,           54 },
+       { ICE_GTP_NO_PAY,       62 },
+       { ICE_PROTOCOL_LAST,    0 },
+};
+
+static const u8 dummy_ipv6_gtp_packet[] = {
        0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
+       0x86, 0xdd,
 
-       0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
-
-       0x86, 0xDD,             /* ICE_ETYPE_OL 16 */
-
-       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
-       0x00, 0x08, 0x11, 0x00, /* Next header UDP */
+       0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+       0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
@@ -494,10 +1206,13 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = {
        0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
-       0x00, 0x08, 0x00, 0x00,
+       0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
+       0x00, 0x00, 0x00, 0x00,
 
-       0x00, 0x00, /* 2 bytes for 4 byte alignment */
+       0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
+       0x00, 0x00, 0x00, 0x00,
+
+       0x00, 0x00,
 };
 
 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
@@ -3818,6 +4533,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
        { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
        { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
        { ICE_ETYPE_OL,         { 0 } },
+       { ICE_ETYPE_IL,         { 0 } },
        { ICE_VLAN_OFOS,        { 2, 0 } },
        { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
        { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
@@ -3830,13 +4546,16 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
        { ICE_UDP_ILOS,         { 0, 2 } },
        { ICE_VXLAN,            { 8, 10, 12, 14 } },
        { ICE_GENEVE,           { 8, 10, 12, 14 } },
-       { ICE_NVGRE,            { 0, 2, 4, 6 } },
+       { ICE_NVGRE,            { 0, 2, 4, 6 } },
+       { ICE_GTP,              { 8, 10, 12, 14, 16, 18, 20, 22 } },
+       { ICE_GTP_NO_PAY,       { 8, 10, 12, 14 } },
 };
 
 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
        { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
        { ICE_MAC_IL,           ICE_MAC_IL_HW },
        { ICE_ETYPE_OL,         ICE_ETYPE_OL_HW },
+       { ICE_ETYPE_IL,         ICE_ETYPE_IL_HW },
        { ICE_VLAN_OFOS,        ICE_VLAN_OL_HW },
        { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
        { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
@@ -3847,7 +4566,9 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
        { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
        { ICE_VXLAN,            ICE_UDP_OF_HW },
        { ICE_GENEVE,           ICE_UDP_OF_HW },
-       { ICE_NVGRE,            ICE_GRE_OF_HW },
+       { ICE_NVGRE,            ICE_GRE_OF_HW },
+       { ICE_GTP,              ICE_UDP_OF_HW },
+       { ICE_GTP_NO_PAY,       ICE_UDP_ILOS_HW },
 };
 
 /**
@@ -4506,41 +5227,6 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
        return status;
 }
 
-/**
- * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
- * @hw: pointer to hardware structure
- * @lkups: lookup elements or match criteria for the advanced recipe, one
- *        structure per protocol header
- * @lkups_cnt: number of protocols
- * @bm: bitmap of field vectors to consider
- * @fv_list: pointer to a list that holds the returned field vectors
- */
-static int
-ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
-          unsigned long *bm, struct list_head *fv_list)
-{
-       u8 *prot_ids;
-       int status;
-       u16 i;
-
-       prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
-       if (!prot_ids)
-               return -ENOMEM;
-
-       for (i = 0; i < lkups_cnt; i++)
-               if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
-                       status = -EIO;
-                       goto free_mem;
-               }
-
-       /* Find field vectors that include all specified protocol types */
-       status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
-
-free_mem:
-       kfree(prot_ids);
-       return status;
-}
-
 /**
  * ice_tun_type_match_word - determine if tun type needs a match mask
  * @tun_type: tunnel type
@@ -4552,6 +5238,8 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
        case ICE_SW_TUN_GENEVE:
        case ICE_SW_TUN_VXLAN:
        case ICE_SW_TUN_NVGRE:
+       case ICE_SW_TUN_GTPU:
+       case ICE_SW_TUN_GTPC:
                *mask = ICE_TUN_FLAG_MASK;
                return true;
 
@@ -4617,6 +5305,12 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
        case ICE_SW_TUN_NVGRE:
                prof_type = ICE_PROF_TUN_GRE;
                break;
+       case ICE_SW_TUN_GTPU:
+               prof_type = ICE_PROF_TUN_GTPU;
+               break;
+       case ICE_SW_TUN_GTPC:
+               prof_type = ICE_PROF_TUN_GTPC;
+               break;
        case ICE_SW_TUN_AND_NON_TUN:
        default:
                prof_type = ICE_PROF_ALL;
@@ -4689,11 +5383,11 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
 
        /* Get bitmap of field vectors (profiles) that are compatible with the
         * rule request; only these will be searched in the subsequent call to
-        * ice_get_fv.
+        * ice_get_sw_fv_list.
         */
        ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
 
-       status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+       status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
        if (status)
                goto err_unroll;
 
@@ -4817,34 +5511,126 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
                      const u8 **pkt, u16 *pkt_len,
                      const struct ice_dummy_pkt_offsets **offsets)
 {
-       bool tcp = false, udp = false, ipv6 = false, vlan = false;
+       bool inner_tcp = false, inner_udp = false, outer_ipv6 = false;
+       bool vlan = false, inner_ipv6 = false, gtp_no_pay = false;
        u16 i;
 
        for (i = 0; i < lkups_cnt; i++) {
                if (lkups[i].type == ICE_UDP_ILOS)
-                       udp = true;
+                       inner_udp = true;
                else if (lkups[i].type == ICE_TCP_IL)
-                       tcp = true;
+                       inner_tcp = true;
                else if (lkups[i].type == ICE_IPV6_OFOS)
-                       ipv6 = true;
+                       outer_ipv6 = true;
                else if (lkups[i].type == ICE_VLAN_OFOS)
                        vlan = true;
                else if (lkups[i].type == ICE_ETYPE_OL &&
                         lkups[i].h_u.ethertype.ethtype_id ==
                                cpu_to_be16(ICE_IPV6_ETHER_ID) &&
                         lkups[i].m_u.ethertype.ethtype_id ==
-                                       cpu_to_be16(0xFFFF))
-                       ipv6 = true;
+                               cpu_to_be16(0xFFFF))
+                       outer_ipv6 = true;
+               else if (lkups[i].type == ICE_ETYPE_IL &&
+                        lkups[i].h_u.ethertype.ethtype_id ==
+                               cpu_to_be16(ICE_IPV6_ETHER_ID) &&
+                        lkups[i].m_u.ethertype.ethtype_id ==
+                               cpu_to_be16(0xFFFF))
+                       inner_ipv6 = true;
+               else if (lkups[i].type == ICE_IPV6_IL)
+                       inner_ipv6 = true;
+               else if (lkups[i].type == ICE_GTP_NO_PAY)
+                       gtp_no_pay = true;
+       }
+
+       if (tun_type == ICE_SW_TUN_GTPU) {
+               if (outer_ipv6) {
+                       if (gtp_no_pay) {
+                               *pkt = dummy_ipv6_gtp_packet;
+                               *pkt_len = sizeof(dummy_ipv6_gtp_packet);
+                               *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
+                       } else if (inner_ipv6) {
+                               if (inner_udp) {
+                                       *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
+                                       *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
+                                       *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
+                               } else {
+                                       *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
+                                       *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
+                                       *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
+                               }
+                       } else {
+                               if (inner_udp) {
+                                       *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
+                                       *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
+                                       *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
+                               } else {
+                                       *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
+                                       *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
+                                       *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
+                               }
+                       }
+               } else {
+                       if (gtp_no_pay) {
+                               *pkt = dummy_ipv4_gtpu_ipv4_packet;
+                               *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
+                               *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
+                       } else if (inner_ipv6) {
+                               if (inner_udp) {
+                                       *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
+                                       *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
+                                       *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
+                               } else {
+                                       *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
+                                       *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
+                                       *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
+                               }
+                       } else {
+                               if (inner_udp) {
+                                       *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
+                                       *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
+                                       *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
+                               } else {
+                                       *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
+                                       *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
+                                       *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
+                               }
+                       }
+               }
+               return;
+       }
+
+       if (tun_type == ICE_SW_TUN_GTPC) {
+               if (outer_ipv6) {
+                       *pkt = dummy_ipv6_gtp_packet;
+                       *pkt_len = sizeof(dummy_ipv6_gtp_packet);
+                       *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
+               } else {
+                       *pkt = dummy_ipv4_gtpu_ipv4_packet;
+                       *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
+                       *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
+               }
+               return;
        }
 
        if (tun_type == ICE_SW_TUN_NVGRE) {
-               if (tcp) {
+               if (inner_tcp && inner_ipv6) {
+                       *pkt = dummy_gre_ipv6_tcp_packet;
+                       *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet);
+                       *offsets = dummy_gre_ipv6_tcp_packet_offsets;
+                       return;
+               }
+               if (inner_tcp) {
                        *pkt = dummy_gre_tcp_packet;
                        *pkt_len = sizeof(dummy_gre_tcp_packet);
                        *offsets = dummy_gre_tcp_packet_offsets;
                        return;
                }
-
+               if (inner_ipv6) {
+                       *pkt = dummy_gre_ipv6_udp_packet;
+                       *pkt_len = sizeof(dummy_gre_ipv6_udp_packet);
+                       *offsets = dummy_gre_ipv6_udp_packet_offsets;
+                       return;
+               }
                *pkt = dummy_gre_udp_packet;
                *pkt_len = sizeof(dummy_gre_udp_packet);
                *offsets = dummy_gre_udp_packet_offsets;
@@ -4853,20 +5639,31 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
 
        if (tun_type == ICE_SW_TUN_VXLAN ||
            tun_type == ICE_SW_TUN_GENEVE) {
-               if (tcp) {
+               if (inner_tcp && inner_ipv6) {
+                       *pkt = dummy_udp_tun_ipv6_tcp_packet;
+                       *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet);
+                       *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets;
+                       return;
+               }
+               if (inner_tcp) {
                        *pkt = dummy_udp_tun_tcp_packet;
                        *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
                        *offsets = dummy_udp_tun_tcp_packet_offsets;
                        return;
                }
-
+               if (inner_ipv6) {
+                       *pkt = dummy_udp_tun_ipv6_udp_packet;
+                       *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet);
+                       *offsets = dummy_udp_tun_ipv6_udp_packet_offsets;
+                       return;
+               }
                *pkt = dummy_udp_tun_udp_packet;
                *pkt_len = sizeof(dummy_udp_tun_udp_packet);
                *offsets = dummy_udp_tun_udp_packet_offsets;
                return;
        }
 
-       if (udp && !ipv6) {
+       if (inner_udp && !outer_ipv6) {
                if (vlan) {
                        *pkt = dummy_vlan_udp_packet;
                        *pkt_len = sizeof(dummy_vlan_udp_packet);
@@ -4877,7 +5674,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
                *pkt_len = sizeof(dummy_udp_packet);
                *offsets = dummy_udp_packet_offsets;
                return;
-       } else if (udp && ipv6) {
+       } else if (inner_udp && outer_ipv6) {
                if (vlan) {
                        *pkt = dummy_vlan_udp_ipv6_packet;
                        *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
@@ -4888,7 +5685,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
                *pkt_len = sizeof(dummy_udp_ipv6_packet);
                *offsets = dummy_udp_ipv6_packet_offsets;
                return;
-       } else if ((tcp && ipv6) || ipv6) {
+       } else if ((inner_tcp && outer_ipv6) || outer_ipv6) {
                if (vlan) {
                        *pkt = dummy_vlan_tcp_ipv6_packet;
                        *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
@@ -4965,6 +5762,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
                        len = sizeof(struct ice_ether_hdr);
                        break;
                case ICE_ETYPE_OL:
+               case ICE_ETYPE_IL:
                        len = sizeof(struct ice_ethtype_hdr);
                        break;
                case ICE_VLAN_OFOS:
@@ -4993,6 +5791,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
                case ICE_GENEVE:
                        len = sizeof(struct ice_udp_tnl_hdr);
                        break;
+               case ICE_GTP_NO_PAY:
+               case ICE_GTP:
+                       len = sizeof(struct ice_udp_gtp_hdr);
+                       break;
                default:
                        return -EINVAL;
                }
index 7b42c51..ed3d1d0 100644 (file)
 #define ICE_VSI_INVAL_ID 0xffff
 #define ICE_INVAL_Q_HANDLE 0xFFFF
 
+/* Switch Profile IDs for Profile related switch rules */
+#define ICE_PROFID_IPV4_GTPC_TEID                      41
+#define ICE_PROFID_IPV4_GTPC_NO_TEID                   42
+#define ICE_PROFID_IPV4_GTPU_TEID                      43
+#define ICE_PROFID_IPV6_GTPC_TEID                      44
+#define ICE_PROFID_IPV6_GTPC_NO_TEID                   45
+#define ICE_PROFID_IPV6_GTPU_TEID                      46
+#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER            70
+
 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
        (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
 
index 65cf32e..3acd9f9 100644 (file)
@@ -24,6 +24,12 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
        if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
                lkups_cnt++;
 
+       if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
+               lkups_cnt++;
+
+       if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
+               lkups_cnt++;
+
        if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
                     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
                     ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
@@ -33,9 +39,7 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
        if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
                lkups_cnt++;
 
-       /* currently inner etype filter isn't supported */
-       if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) &&
-           fltr->tunnel_type == TNL_LAST)
+       if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
                lkups_cnt++;
 
        /* are MAC fields specified? */
@@ -64,6 +68,11 @@ static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
        return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
 }
 
+static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
+{
+       return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
+}
+
 static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
 {
        return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
@@ -96,6 +105,11 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
                return ICE_GENEVE;
        case TNL_GRETAP:
                return ICE_NVGRE;
+       case TNL_GTPU:
+               /* NO_PAY profiles will not work with GTP-U */
+               return ICE_GTP;
+       case TNL_GTPC:
+               return ICE_GTP_NO_PAY;
        default:
                return 0;
        }
@@ -111,6 +125,10 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
                return ICE_SW_TUN_GENEVE;
        case TNL_GRETAP:
                return ICE_SW_TUN_NVGRE;
+       case TNL_GTPU:
+               return ICE_SW_TUN_GTPU;
+       case TNL_GTPC:
+               return ICE_SW_TUN_GTPC;
        default:
                return ICE_NON_TUN;
        }
@@ -137,7 +155,15 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
                        break;
                case TNL_GRETAP:
                        list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
-                       memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4);
+                       memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
+                              "\xff\xff\xff\xff", 4);
+                       i++;
+                       break;
+               case TNL_GTPC:
+               case TNL_GTPU:
+                       list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
+                       memcpy(&list[i].m_u.gtp_hdr.teid,
+                              "\xff\xff\xff\xff", 4);
                        i++;
                        break;
                default:
@@ -145,6 +171,33 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
                }
        }
 
+       if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
+               list[i].type = ice_proto_type_from_mac(false);
+               ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
+                               hdr->l2_key.dst_mac);
+               ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
+                               hdr->l2_mask.dst_mac);
+               i++;
+       }
+
+       if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
+           (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
+               list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
+
+               if (fltr->gtp_pdu_info_masks.pdu_type) {
+                       list[i].h_u.gtp_hdr.pdu_type =
+                               fltr->gtp_pdu_info_keys.pdu_type << 4;
+                       memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
+               }
+
+               if (fltr->gtp_pdu_info_masks.qfi) {
+                       list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
+                       memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
+               }
+
+               i++;
+       }
+
        if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
                     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
                list[i].type = ice_proto_type_from_ipv4(false);
@@ -224,8 +277,10 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
 
                headers = &tc_fltr->inner_headers;
                inner = true;
-       } else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
-               list[i].type = ICE_ETYPE_OL;
+       }
+
+       if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
+               list[i].type = ice_proto_type_from_etype(inner);
                list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
                list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
                i++;
@@ -344,6 +399,12 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
        if (netif_is_gretap(tunnel_dev) ||
            netif_is_ip6gretap(tunnel_dev))
                return TNL_GRETAP;
+
+       /* Assume GTP-U by default in case of GTP netdev.
+        * GTP-C may be selected later, based on enc_dst_port.
+        */
+       if (netif_is_gtp(tunnel_dev))
+               return TNL_GTPU;
        return TNL_LAST;
 }
 
@@ -743,6 +804,40 @@ ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
        return NULL;
 }
 
+/**
+ * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
+ * @match: Flow match structure
+ * @fltr: Pointer to filter structure
+ *
+ * GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
+ * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
+ * therefore making GTP-U the default choice (when destination port number is
+ * not specified).
+ */
+static int
+ice_parse_gtp_type(struct flow_match_ports match,
+                  struct ice_tc_flower_fltr *fltr)
+{
+       u16 dst_port;
+
+       if (match.key->dst) {
+               dst_port = be16_to_cpu(match.key->dst);
+
+               switch (dst_port) {
+               case 2152:
+                       break;
+               case 2123:
+                       fltr->tunnel_type = TNL_GTPC;
+                       break;
+               default:
+                       NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static int
 ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
                      struct ice_tc_flower_fltr *fltr)
@@ -798,8 +893,28 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
                struct flow_match_ports match;
 
                flow_rule_match_enc_ports(rule, &match);
-               if (ice_tc_set_port(match, fltr, headers, true))
-                       return -EINVAL;
+
+               if (fltr->tunnel_type != TNL_GTPU) {
+                       if (ice_tc_set_port(match, fltr, headers, true))
+                               return -EINVAL;
+               } else {
+                       if (ice_parse_gtp_type(match, fltr))
+                               return -EINVAL;
+               }
+       }
+
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+               struct flow_match_enc_opts match;
+
+               flow_rule_match_enc_opts(rule, &match);
+
+               memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
+                      sizeof(struct gtp_pdu_session_info));
+
+               memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
+                      sizeof(struct gtp_pdu_session_info));
+
+               fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
        }
 
        return 0;
@@ -837,6 +952,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
              BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
              BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
              BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+             BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
              BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
              BIT(FLOW_DISSECTOR_KEY_PORTS))) {
                NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
@@ -1059,12 +1175,24 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
         * this code won't do anything
         * 2. For non-tunnel, if user didn't specify MAC address, add implicit
         * dest MAC to be lower netdev's active unicast MAC address
+        * 3. For tunnel,  as of now TC-filter through flower classifier doesn't
+        * have provision for user to specify outer DMAC, hence driver to
+        * implicitly add outer dest MAC to be lower netdev's active unicast
+        * MAC address.
         */
-       if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
-               ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
-                               main_vsi->netdev->dev_addr);
-               eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
+       if (fltr->tunnel_type != TNL_LAST &&
+           !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
+               fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
+
+       if (fltr->tunnel_type == TNL_LAST &&
+           !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
                fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+
+       if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+                          ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
+               ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
+                               vsi->netdev->dev_addr);
+               memset(fltr->outer_headers.l2_mask.dst_mac, 0xff, ETH_ALEN);
        }
 
        /* validate specified dest MAC address, make sure either it belongs to
index 3190494..e25e958 100644 (file)
@@ -22,6 +22,7 @@
 #define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT      BIT(15)
 #define ICE_TC_FLWR_FIELD_ENC_DST_MAC          BIT(16)
 #define ICE_TC_FLWR_FIELD_ETH_TYPE_ID          BIT(17)
+#define ICE_TC_FLWR_FIELD_ENC_OPTS             BIT(18)
 
 #define ICE_TC_FLOWER_MASK_32   0xFFFFFFFF
 
@@ -119,6 +120,8 @@ struct ice_tc_flower_fltr {
        struct ice_tc_flower_lyr_2_4_hdrs inner_headers;
        struct ice_vsi *src_vsi;
        __be32 tenant_id;
+       struct gtp_pdu_session_info gtp_pdu_info_keys;
+       struct gtp_pdu_session_info gtp_pdu_info_masks;
        u32 flags;
        u8 tunnel_type;
        struct ice_tc_flower_action     action;
index cf68524..ae98d5a 100644 (file)
@@ -216,6 +216,30 @@ DEFINE_EVENT(ice_xmit_template, name, \
 DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
 DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring_drop);
 
+DECLARE_EVENT_CLASS(ice_tx_tstamp_template,
+                   TP_PROTO(struct sk_buff *skb, int idx),
+
+                   TP_ARGS(skb, idx),
+
+                   TP_STRUCT__entry(__field(void *, skb)
+                                    __field(int, idx)),
+
+                   TP_fast_assign(__entry->skb = skb;
+                                  __entry->idx = idx;),
+
+                   TP_printk("skb %pK idx %d",
+                             __entry->skb, __entry->idx)
+);
+#define DEFINE_TX_TSTAMP_OP_EVENT(name) \
+DEFINE_EVENT(ice_tx_tstamp_template, name, \
+            TP_PROTO(struct sk_buff *skb, int idx), \
+            TP_ARGS(skb, idx))
+
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_request);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete);
+
 /* End tracepoints */
 
 #endif /* _ICE_TRACE_H_ */
index 853f57a..f9bf008 100644 (file)
@@ -223,8 +223,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
        struct ice_tx_buf *tx_buf;
 
        /* get the bql data ready */
-       if (!ice_ring_is_xdp(tx_ring))
-               netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
+       netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
 
        tx_buf = &tx_ring->tx_buf[i];
        tx_desc = ICE_TX_DESC(tx_ring, i);
@@ -313,10 +312,6 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
        tx_ring->next_to_clean = i;
 
        ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
-
-       if (ice_ring_is_xdp(tx_ring))
-               return !!budget;
-
        netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
 
 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
index 28fcab2..f2a518a 100644 (file)
@@ -9,6 +9,7 @@
 #define ICE_CHNL_MAX_TC                16
 
 #include "ice_hw_autogen.h"
+#include "ice_devids.h"
 #include "ice_osdep.h"
 #include "ice_controlq.h"
 #include "ice_lan_tx_rx.h"
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
new file mode 100644 (file)
index 0000000..6578059
--- /dev/null
@@ -0,0 +1,1029 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_virtchnl_allowlist.h"
+
+/* Public functions which may be accessed by all driver files */
+
+/**
+ * ice_get_vf_by_id - Get pointer to VF by ID
+ * @pf: the PF private structure
+ * @vf_id: the VF ID to locate
+ *
+ * Locate and return a pointer to the VF structure associated with a given ID.
+ * Returns NULL if the ID does not have a valid VF structure associated with
+ * it.
+ *
+ * This function takes a reference to the VF, which must be released by
+ * calling ice_put_vf() once the caller is finished accessing the VF structure
+ * returned.
+ */
+struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
+{
+       struct ice_vf *vf;
+
+       rcu_read_lock();
+       hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
+               if (vf->vf_id == vf_id) {
+                       struct ice_vf *found;
+
+                       if (kref_get_unless_zero(&vf->refcnt))
+                               found = vf;
+                       else
+                               found = NULL;
+
+                       rcu_read_unlock();
+                       return found;
+               }
+       }
+       rcu_read_unlock();
+
+       return NULL;
+}
+
+/**
+ * ice_release_vf - Release VF associated with a refcount
+ * @ref: the kref decremented to zero
+ *
+ * Callback function for kref_put to release a VF once its reference count has
+ * hit zero.
+ */
+static void ice_release_vf(struct kref *ref)
+{
+       struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
+
+       vf->vf_ops->free(vf);
+}
+
+/**
+ * ice_put_vf - Release a reference to a VF
+ * @vf: the VF structure to decrease reference count on
+ *
+ * Decrease the reference count for a VF, and free the entry if it is no
+ * longer in use.
+ *
+ * This must be called after ice_get_vf_by_id() once the reference to the VF
+ * structure is no longer used. Otherwise, the VF structure will never be
+ * freed.
+ */
+void ice_put_vf(struct ice_vf *vf)
+{
+       kref_put(&vf->refcnt, ice_release_vf);
+}
+
+/**
+ * ice_has_vfs - Return true if the PF has any associated VFs
+ * @pf: the PF private structure
+ *
+ * Return whether or not the PF has any allocated VFs.
+ *
+ * Note that this function only guarantees that there are no VFs at the point
+ * of calling it. It does not guarantee that no more VFs will be added.
+ */
+bool ice_has_vfs(struct ice_pf *pf)
+{
+       /* A simple check that the hash table is not empty does not require
+        * the mutex or rcu_read_lock.
+        */
+       return !hash_empty(pf->vfs.table);
+}
+
+/**
+ * ice_get_num_vfs - Get number of allocated VFs
+ * @pf: the PF private structure
+ *
+ * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
+ * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
+ * the output of this function.
+ */
+u16 ice_get_num_vfs(struct ice_pf *pf)
+{
+       struct ice_vf *vf;
+       unsigned int bkt;
+       u16 num_vfs = 0;
+
+       rcu_read_lock();
+       ice_for_each_vf_rcu(pf, bkt, vf)
+               num_vfs++;
+       rcu_read_unlock();
+
+       return num_vfs;
+}
+
+/**
+ * ice_get_vf_vsi - get VF's VSI based on the stored index
+ * @vf: VF used to get VSI
+ */
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+       if (vf->lan_vsi_idx == ICE_NO_VSI)
+               return NULL;
+
+       return vf->pf->vsi[vf->lan_vsi_idx];
+}
+
+/**
+ * ice_is_vf_disabled
+ * @vf: pointer to the VF info
+ *
+ * If the PF has been disabled, there is no need resetting VF until PF is
+ * active again. Similarly, if the VF has been disabled, this means something
+ * else is resetting the VF, so we shouldn't continue.
+ *
+ * Returns true if the caller should consider the VF as disabled whether
+ * because that single VF is explicitly disabled or because the PF is
+ * currently disabled.
+ */
+bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+
+       return (test_bit(ICE_VF_DIS, pf->state) ||
+               test_bit(ICE_VF_STATE_DIS, vf->vf_states));
+}
+
+/**
+ * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
+ * @vf: The VF being resseting
+ *
+ * The max poll time is about ~800ms, which is about the maximum time it takes
+ * for a VF to be reset and/or a VF driver to be removed.
+ */
+static void ice_wait_on_vf_reset(struct ice_vf *vf)
+{
+       int i;
+
+       for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
+               if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+                       break;
+               msleep(ICE_MAX_VF_RESET_SLEEP_MS);
+       }
+}
+
+/**
+ * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
+ * @vf: VF to check if it's ready to be configured/queried
+ *
+ * The purpose of this function is to make sure the VF is not in reset, not
+ * disabled, and initialized so it can be configured and/or queried by a host
+ * administrator.
+ */
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+       ice_wait_on_vf_reset(vf);
+
+       if (ice_is_vf_disabled(vf))
+               return -EINVAL;
+
+       if (ice_check_vf_init(vf))
+               return -EBUSY;
+
+       return 0;
+}
+
+/**
+ * ice_trigger_vf_reset - Reset a VF on HW
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ * @is_pfr: true if the reset was triggered due to a previous PFR
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ */
+static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
+{
+       /* Inform VF that it is no longer active, as a warning */
+       clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+       /* Disable VF's configuration API during reset. The flag is re-enabled
+        * when it's safe again to access VF's VSI.
+        */
+       clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+       /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+        * needs to clear them in the case of VFR/VFLR. If this is done for
+        * PFR, it can mess up VF resets because the VF driver may already
+        * have started cleanup by the time we get here.
+        */
+       if (!is_pfr)
+               vf->vf_ops->clear_mbx_register(vf);
+
+       vf->vf_ops->trigger_reset_register(vf, is_vflr);
+}
+
+static void ice_vf_clear_counters(struct ice_vf *vf)
+{
+       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+       vf->num_mac = 0;
+       vsi->num_vlan = 0;
+       memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+       memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+}
+
+/**
+ * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
+ * @vf: VF to perform pre VSI rebuild tasks
+ *
+ * These tasks are items that don't need to be amortized since they are most
+ * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
+ */
+static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
+{
+       ice_vf_clear_counters(vf);
+       vf->vf_ops->clear_reset_trigger(vf);
+}
+
+/**
+ * ice_vf_rebuild_vsi - rebuild the VF's VSI
+ * @vf: VF to rebuild the VSI for
+ *
+ * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
+ * host, PFR, CORER, etc.).
+ */
+static int ice_vf_rebuild_vsi(struct ice_vf *vf)
+{
+       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+       struct ice_pf *pf = vf->pf;
+
+       if (ice_vsi_rebuild(vsi, true)) {
+               dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
+                       vf->vf_id);
+               return -EIO;
+       }
+       /* vsi->idx will remain the same in this case so don't update
+        * vf->lan_vsi_idx
+        */
+       vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+       vf->lan_vsi_num = vsi->vsi_num;
+
+       return 0;
+}
+
+/**
+ * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
+ * @pf: PF structure for accessing VF(s)
+ *
+ * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
+ * else return true
+ */
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+{
+       bool is_vf_promisc = false;
+       struct ice_vf *vf;
+       unsigned int bkt;
+
+       rcu_read_lock();
+       ice_for_each_vf_rcu(pf, bkt, vf) {
+               /* found a VF that has promiscuous mode configured */
+               if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+                   test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+                       is_vf_promisc = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return is_vf_promisc;
+}
+
+/**
+ * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
+ * @vf: the VF to configure
+ * @vsi: the VF's VSI
+ * @promisc_m: the promiscuous mode to enable
+ */
+int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+       struct ice_hw *hw = &vsi->back->hw;
+       int status;
+
+       if (ice_vf_is_port_vlan_ena(vf))
+               status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
+                                                 ice_vf_get_port_vlan_id(vf));
+       else if (ice_vsi_has_non_zero_vlans(vsi))
+               status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
+       else
+               status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+       if (status && status != -EEXIST) {
+               dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+                       vf->vf_id, status);
+               return status;
+       }
+
+       return 0;
+}
+
+/**
+ * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
+ * @vf: the VF to configure
+ * @vsi: the VF's VSI
+ * @promisc_m: the promiscuous mode to disable
+ */
+int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+       struct ice_hw *hw = &vsi->back->hw;
+       int status;
+
+       if (ice_vf_is_port_vlan_ena(vf))
+               status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+                                                   ice_vf_get_port_vlan_id(vf));
+       else if (ice_vsi_has_non_zero_vlans(vsi))
+               status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
+       else
+               status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+       if (status && status != -ENOENT) {
+               dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+                       vf->vf_id, status);
+               return status;
+       }
+
+       return 0;
+}
+
+/**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+ *
+ * First, tell the hardware to reset each VF, then do all the waiting in one
+ * chunk, and finally finish restoring each VF after the wait. This is useful
+ * during PF routines which need to reset all VFs, as otherwise it must perform
+ * these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
+ */
+void ice_reset_all_vfs(struct ice_pf *pf)
+{
+       struct device *dev = ice_pf_to_dev(pf);
+       struct ice_hw *hw = &pf->hw;
+       struct ice_vf *vf;
+       unsigned int bkt;
+
+       /* If we don't have any VFs, then there is nothing to reset */
+       if (!ice_has_vfs(pf))
+               return;
+
+       mutex_lock(&pf->vfs.table_lock);
+
+       /* clear all malicious info if the VFs are getting reset */
+       ice_for_each_vf(pf, bkt, vf)
+               if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+                                       ICE_MAX_SRIOV_VFS, vf->vf_id))
+                       dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+                               vf->vf_id);
+
+       /* If VFs have been disabled, there is no need to reset */
+       if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
+               mutex_unlock(&pf->vfs.table_lock);
+               return;
+       }
+
+       /* Begin reset on all VFs at once */
+       ice_for_each_vf(pf, bkt, vf)
+               ice_trigger_vf_reset(vf, true, true);
+
+       /* HW requires some time to make sure it can flush the FIFO for a VF
+        * when it resets it. Now that we've triggered all of the VFs, iterate
+        * the table again and wait for each VF to complete.
+        */
+       ice_for_each_vf(pf, bkt, vf) {
+               if (!vf->vf_ops->poll_reset_status(vf)) {
+                       /* Display a warning if at least one VF didn't manage
+                        * to reset in time, but continue on with the
+                        * operation.
+                        */
+                       dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
+                       break;
+               }
+       }
+
+       /* free VF resources to begin resetting the VSI state */
+       ice_for_each_vf(pf, bkt, vf) {
+               mutex_lock(&vf->cfg_lock);
+
+               vf->driver_caps = 0;
+               ice_vc_set_default_allowlist(vf);
+
+               ice_vf_fdir_exit(vf);
+               ice_vf_fdir_init(vf);
+               /* clean VF control VSI when resetting VFs since it should be
+                * setup only when VF creates its first FDIR rule.
+                */
+               if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+                       ice_vf_ctrl_invalidate_vsi(vf);
+
+               ice_vf_pre_vsi_rebuild(vf);
+               ice_vf_rebuild_vsi(vf);
+               vf->vf_ops->post_vsi_rebuild(vf);
+
+               mutex_unlock(&vf->cfg_lock);
+       }
+
+       if (ice_is_eswitch_mode_switchdev(pf))
+               if (ice_eswitch_rebuild(pf))
+                       dev_warn(dev, "eswitch rebuild failed\n");
+
+       ice_flush(hw);
+       clear_bit(ICE_VF_DIS, pf->state);
+
+       mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_notify_vf_reset - Notify VF of a reset event
+ * @vf: pointer to the VF structure
+ */
+static void ice_notify_vf_reset(struct ice_vf *vf)
+{
+       struct ice_hw *hw = &vf->pf->hw;
+       struct virtchnl_pf_event pfe;
+
+       /* Bail out if VF is in disabled state, neither initialized, nor active
+        * state - otherwise proceed with notifications
+        */
+       if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+            !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
+           test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+               return;
+
+       pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+       pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+       ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+                             VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
+                             NULL);
+}
+
+/**
+ * ice_reset_vf - Reset a particular VF
+ * @vf: pointer to the VF structure
+ * @flags: flags controlling behavior of the reset
+ *
+ * Flags:
+ *   ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
+ *   ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
+ *   ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
+ *
+ * Returns 0 if the VF is currently in reset, if the resets are disabled, or
+ * if the VF resets successfully. Returns an error code if the VF fails to
+ * rebuild.
+ */
+int ice_reset_vf(struct ice_vf *vf, u32 flags)
+{
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+       struct device *dev;
+       struct ice_hw *hw;
+       u8 promisc_m;
+       int err = 0;
+       bool rsd;
+
+       dev = ice_pf_to_dev(pf);
+       hw = &pf->hw;
+
+       if (flags & ICE_VF_RESET_NOTIFY)
+               ice_notify_vf_reset(vf);
+
+       if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
+               dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
+                       vf->vf_id);
+               return 0;
+       }
+
+       if (ice_is_vf_disabled(vf)) {
+               dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
+                       vf->vf_id);
+               return 0;
+       }
+
+       if (flags & ICE_VF_RESET_LOCK)
+               mutex_lock(&vf->cfg_lock);
+       else
+               lockdep_assert_held(&vf->cfg_lock);
+
+       /* Set VF disable bit state here, before triggering reset */
+       set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+       ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
+
+       vsi = ice_get_vf_vsi(vf);
+
+       ice_dis_vf_qs(vf);
+
+       /* Call Disable LAN Tx queue AQ whether or not queues are
+        * enabled. This is needed for successful completion of VFR.
+        */
+       ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
+                       NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
+
+       /* poll VPGEN_VFRSTAT reg to make sure
+        * that reset is complete
+        */
+       rsd = vf->vf_ops->poll_reset_status(vf);
+
+       /* Display a warning if VF didn't manage to reset in time, but need to
+        * continue on with the operation.
+        */
+       if (!rsd)
+               dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
+
+       vf->driver_caps = 0;
+       ice_vc_set_default_allowlist(vf);
+
+       /* disable promiscuous modes in case they were enabled
+        * ignore any error if disabling process failed
+        */
+       if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+           test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+               if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan)
+                       promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+               else
+                       promisc_m = ICE_UCAST_PROMISC_BITS;
+
+               if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
+                       dev_err(dev, "disabling promiscuous mode failed\n");
+       }
+
+       ice_eswitch_del_vf_mac_rule(vf);
+
+       ice_vf_fdir_exit(vf);
+       ice_vf_fdir_init(vf);
+       /* clean VF control VSI when resetting VF since it should be setup
+        * only when VF creates its first FDIR rule.
+        */
+       if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+               ice_vf_ctrl_vsi_release(vf);
+
+       ice_vf_pre_vsi_rebuild(vf);
+
+       if (vf->vf_ops->vsi_rebuild(vf)) {
+               dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
+                       vf->vf_id);
+               err = -EFAULT;
+               goto out_unlock;
+       }
+
+       vf->vf_ops->post_vsi_rebuild(vf);
+       vsi = ice_get_vf_vsi(vf);
+       ice_eswitch_update_repr(vsi);
+       ice_eswitch_replay_vf_mac_rule(vf);
+
+       /* if the VF has been reset allow it to come up again */
+       if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+                               ICE_MAX_SRIOV_VFS, vf->vf_id))
+               dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+                       vf->vf_id);
+
+out_unlock:
+       if (flags & ICE_VF_RESET_LOCK)
+               mutex_unlock(&vf->cfg_lock);
+
+       return err;
+}
+
+/**
+ * ice_set_vf_state_qs_dis - Set VF queues state to disabled
+ * @vf: pointer to the VF structure
+ */
+void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+       /* Clear Rx/Tx enabled queues flag */
+       bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+       bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+       clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+}
+
+/* Private functions only accessed from other virtualization files */
+
+/**
+ * ice_dis_vf_qs - Disable the VF queues
+ * @vf: pointer to the VF structure
+ */
+void ice_dis_vf_qs(struct ice_vf *vf)
+{
+       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+       ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+       ice_vsi_stop_all_rx_rings(vsi);
+       ice_set_vf_state_qs_dis(vf);
+}
+
+/**
+ * ice_check_vf_init - helper to check if VF init complete
+ * @vf: the pointer to the VF to check
+ */
+int ice_check_vf_init(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+
+       if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+               dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
+                       vf->vf_id);
+               return -EBUSY;
+       }
+       return 0;
+}
+
+/**
+ * ice_vf_get_port_info - Get the VF's port info structure
+ * @vf: VF used to get the port info structure for
+ */
+struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
+{
+       return vf->pf->hw.port_info;
+}
+
+static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
+{
+       struct ice_vsi_ctx *ctx;
+       int err;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->info.sec_flags = vsi->info.sec_flags;
+       ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+       if (enable)
+               ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+       else
+               ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+
+       err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
+       if (err)
+               dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
+                       enable ? "ON" : "OFF", vsi->vsi_num, err);
+       else
+               vsi->info.sec_flags = ctx->info.sec_flags;
+
+       kfree(ctx);
+
+       return err;
+}
+
+/**
+ * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
+ * @vsi: VSI to enable Tx spoof checking for
+ */
+static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
+{
+       struct ice_vsi_vlan_ops *vlan_ops;
+       int err;
+
+       vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+       err = vlan_ops->ena_tx_filtering(vsi);
+       if (err)
+               return err;
+
+       return ice_cfg_mac_antispoof(vsi, true);
+}
+
+/**
+ * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
+ * @vsi: VSI to disable Tx spoof checking for
+ */
+static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
+{
+       struct ice_vsi_vlan_ops *vlan_ops;
+       int err;
+
+       vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+       err = vlan_ops->dis_tx_filtering(vsi);
+       if (err)
+               return err;
+
+       return ice_cfg_mac_antispoof(vsi, false);
+}
+
+/**
+ * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
+ * @vsi: VSI associated to the VF
+ * @enable: whether to enable or disable the spoof checking
+ */
+int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
+{
+       int err;
+
+       if (enable)
+               err = ice_vsi_ena_spoofchk(vsi);
+       else
+               err = ice_vsi_dis_spoofchk(vsi);
+
+       return err;
+}
+
+/**
+ * ice_is_vf_trusted
+ * @vf: pointer to the VF info
+ */
+bool ice_is_vf_trusted(struct ice_vf *vf)
+{
+       return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
+ * @vf: the VF to check
+ *
+ * Returns true if the VF has no Rx and no Tx queues enabled and returns false
+ * otherwise
+ */
+bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
+{
+       return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
+               !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
+}
+
+/**
+ * ice_is_vf_link_up - check if the VF's link is up
+ * @vf: VF to check if link is up
+ */
+bool ice_is_vf_link_up(struct ice_vf *vf)
+{
+       struct ice_port_info *pi = ice_vf_get_port_info(vf);
+
+       if (ice_check_vf_init(vf))
+               return false;
+
+       if (ice_vf_has_no_qs_ena(vf))
+               return false;
+       else if (vf->link_forced)
+               return vf->link_up;
+       else
+               return pi->phy.link_info.link_info &
+                       ICE_AQ_LINK_UP;
+}
+
+/**
+ * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
+ * @vf: VF to configure trust setting for
+ */
+static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
+{
+       if (vf->trusted)
+               set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+       else
+               clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
+ * @vf: VF to add MAC filters for
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
+ */
+static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
+{
+       struct device *dev = ice_pf_to_dev(vf->pf);
+       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+       u8 broadcast[ETH_ALEN];
+       int status;
+
+       if (ice_is_eswitch_mode_switchdev(vf->pf))
+               return 0;
+
+       eth_broadcast_addr(broadcast);
+       status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+       if (status) {
+               dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
+                       vf->vf_id, status);
+               return status;
+       }
+
+       vf->num_mac++;
+
+       if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+               status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
+                                         ICE_FWD_TO_VSI);
+               if (status) {
+                       dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
+                               &vf->hw_lan_addr.addr[0], vf->vf_id,
+                               status);
+                       return status;
+               }
+               vf->num_mac++;
+
+               ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+       }
+
+       return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
+ * @vf: VF to add MAC filters for
+ * @vsi: Pointer to VSI
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds either a VLAN 0 or port VLAN based filter after reset.
+ */
+static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+       struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+       struct device *dev = ice_pf_to_dev(vf->pf);
+       int err;
+
+       if (ice_vf_is_port_vlan_ena(vf)) {
+               err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
+               if (err) {
+                       dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
+                               vf->vf_id, err);
+                       return err;
+               }
+
+               err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
+       } else {
+               err = ice_vsi_add_vlan_zero(vsi);
+       }
+
+       if (err) {
+               dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
+                       ice_vf_is_port_vlan_ena(vf) ?
+                       ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
+               return err;
+       }
+
+       err = vlan_ops->ena_rx_filtering(vsi);
+       if (err)
+               dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
+                        vf->vf_id, vsi->idx, err);
+
+       return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+       struct device *dev = ice_pf_to_dev(vf->pf);
+       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+       int err;
+
+       if (vf->min_tx_rate) {
+               err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+               if (err) {
+                       dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+                               vf->min_tx_rate, vf->vf_id, err);
+                       return err;
+               }
+       }
+
+       if (vf->max_tx_rate) {
+               err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+               if (err) {
+                       dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+                               vf->max_tx_rate, vf->vf_id, err);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
+ * @vsi: Pointer to VSI
+ *
+ * This function moves VSI into corresponding scheduler aggregator node
+ * based on cached value of "aggregator node info" per VSI
+ */
+static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       struct device *dev;
+       int status;
+
+       if (!vsi->agg_node)
+               return;
+
+       dev = ice_pf_to_dev(pf);
+       if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+               dev_dbg(dev,
+                       "agg_id %u already has reached max_num_vsis %u\n",
+                       vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
+               return;
+       }
+
+       status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
+                                    vsi->idx, vsi->tc_cfg.ena_tc);
+       if (status)
+               dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
+                       vsi->idx, vsi->agg_node->agg_id);
+       else
+               vsi->agg_node->num_vsis++;
+}
+
+/**
+ * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
+ * @vf: VF to rebuild host configuration on
+ */
+void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
+{
+       struct device *dev = ice_pf_to_dev(vf->pf);
+       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+       ice_vf_set_host_trust_cfg(vf);
+
+       if (ice_vf_rebuild_host_mac_cfg(vf))
+               dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
+                       vf->vf_id);
+
+       if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
+               dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
+                       vf->vf_id);
+
+       if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+               dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+                       vf->vf_id);
+
+       if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
+               dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
+                       vf->vf_id);
+
+       /* rebuild aggregator node config for main VF VSI */
+       ice_vf_rebuild_aggregator_node_cfg(vsi);
+}
+
+/**
+ * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
+ * @vf: VF that control VSI is being invalidated on
+ */
+void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
+{
+       vf->ctrl_vsi_idx = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
+ * @vf: VF that control VSI is being released on
+ */
+void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
+{
+       ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
+       ice_vf_ctrl_invalidate_vsi(vf);
+}
+
+/**
+ * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
+ * @vf: VF to setup control VSI for
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
+{
+       struct ice_port_info *pi = ice_vf_get_port_info(vf);
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+
+       vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
+       if (!vsi) {
+               dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
+               ice_vf_ctrl_invalidate_vsi(vf);
+       }
+
+       return vsi;
+}
+
+/**
+ * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * @vf: VF to remove access to VSI for
+ */
+void ice_vf_invalidate_vsi(struct ice_vf *vf)
+{
+       vf->lan_vsi_idx = ICE_NO_VSI;
+       vf->lan_vsi_num = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
+ * @vf: VF to set in initialized state
+ *
+ * After this function the VF will be ready to receive/handle the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message
+ */
+void ice_vf_set_initialized(struct ice_vf *vf)
+{
+       ice_set_vf_state_qs_dis(vf);
+       clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+       clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+       clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+       set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+       memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
new file mode 100644 (file)
index 0000000..831b667
--- /dev/null
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_LIB_H_
+#define _ICE_VF_LIB_H_
+
+#include <linux/types.h>
+#include <linux/hashtable.h>
+#include <linux/bitmap.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <net/devlink.h>
+#include <linux/avf/virtchnl.h>
+#include "ice_type.h"
+#include "ice_virtchnl_fdir.h"
+#include "ice_vsi_vlan_ops.h"
+
+#define ICE_MAX_SRIOV_VFS              256
+
+/* VF resource constraints */
+#define ICE_MAX_RSS_QS_PER_VF  16
+
+struct ice_pf;
+struct ice_vf;
+struct ice_virtchnl_ops;
+
+/* VF capabilities */
+enum ice_virtchnl_cap {
+       ICE_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+};
+
+/* Specific VF states */
+enum ice_vf_states {
+       ICE_VF_STATE_INIT = 0,          /* PF is initializing VF */
+       ICE_VF_STATE_ACTIVE,            /* VF resources are allocated for use */
+       ICE_VF_STATE_QS_ENA,            /* VF queue(s) enabled */
+       ICE_VF_STATE_DIS,
+       ICE_VF_STATE_MC_PROMISC,
+       ICE_VF_STATE_UC_PROMISC,
+       ICE_VF_STATES_NBITS
+};
+
+struct ice_time_mac {
+       unsigned long time_modified;
+       u8 addr[ETH_ALEN];
+};
+
+/* VF MDD events print structure */
+struct ice_mdd_vf_events {
+       u16 count;                      /* total count of Rx|Tx events */
+       /* count number of the last printed event */
+       u16 last_printed;
+};
+
+/* VF operations */
+struct ice_vf_ops {
+       enum ice_disq_rst_src reset_type;
+       void (*free)(struct ice_vf *vf);
+       void (*clear_mbx_register)(struct ice_vf *vf);
+       void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
+       bool (*poll_reset_status)(struct ice_vf *vf);
+       void (*clear_reset_trigger)(struct ice_vf *vf);
+       int (*vsi_rebuild)(struct ice_vf *vf);
+       void (*post_vsi_rebuild)(struct ice_vf *vf);
+};
+
+/* Virtchnl/SR-IOV config info */
+struct ice_vfs {
+       DECLARE_HASHTABLE(table, 8);    /* table of VF entries */
+       struct mutex table_lock;        /* Lock for protecting the hash table */
+       u16 num_supported;              /* max supported VFs on this PF */
+       u16 num_qps_per;                /* number of queue pairs per VF */
+       u16 num_msix_per;               /* number of MSI-X vectors per VF */
+       unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
+       DECLARE_BITMAP(malvfs, ICE_MAX_SRIOV_VFS); /* malicious VF indicator */
+};
+
+/* VF information structure */
+struct ice_vf {
+       struct hlist_node entry;
+       struct rcu_head rcu;
+       struct kref refcnt;
+       struct ice_pf *pf;
+
+       /* Used during virtchnl message handling and NDO ops against the VF
+        * that will trigger a VFR
+        */
+       struct mutex cfg_lock;
+
+       u16 vf_id;                      /* VF ID in the PF space */
+       u16 lan_vsi_idx;                /* index into PF struct */
+       u16 ctrl_vsi_idx;
+       struct ice_vf_fdir fdir;
+       /* first vector index of this VF in the PF space */
+       int first_vector_idx;
+       struct ice_sw *vf_sw_id;        /* switch ID the VF VSIs connect to */
+       struct virtchnl_version_info vf_ver;
+       u32 driver_caps;                /* reported by VF driver */
+       struct virtchnl_ether_addr dev_lan_addr;
+       struct virtchnl_ether_addr hw_lan_addr;
+       struct ice_time_mac legacy_last_added_umac;
+       DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
+       DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+       struct ice_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */
+       struct virtchnl_vlan_caps vlan_v2_caps;
+       u8 pf_set_mac:1;                /* VF MAC address set by VMM admin */
+       u8 trusted:1;
+       u8 spoofchk:1;
+       u8 link_forced:1;
+       u8 link_up:1;                   /* only valid if VF link is forced */
+       /* VSI indices - actual VSI pointers are maintained in the PF structure
+        * When assigned, these will be non-zero, because VSI 0 is always
+        * the main LAN VSI for the PF.
+        */
+       u16 lan_vsi_num;                /* ID as used by firmware */
+       unsigned int min_tx_rate;       /* Minimum Tx bandwidth limit in Mbps */
+       unsigned int max_tx_rate;       /* Maximum Tx bandwidth limit in Mbps */
+       DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+
+       unsigned long vf_caps;          /* VF's adv. capabilities */
+       u8 num_req_qs;                  /* num of queue pairs requested by VF */
+       u16 num_mac;
+       u16 num_vf_qs;                  /* num of queue configured per VF */
+       struct ice_mdd_vf_events mdd_rx_events;
+       struct ice_mdd_vf_events mdd_tx_events;
+       DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
+
+       struct ice_repr *repr;
+       const struct ice_virtchnl_ops *virtchnl_ops;
+       const struct ice_vf_ops *vf_ops;
+
+       /* devlink port data */
+       struct devlink_port devlink_port;
+};
+
+/* Flags for controlling behavior of ice_reset_vf */
+enum ice_vf_reset_flags {
+       ICE_VF_RESET_VFLR = BIT(0), /* Indicate a VFLR reset */
+       ICE_VF_RESET_NOTIFY = BIT(1), /* Notify VF prior to reset */
+       ICE_VF_RESET_LOCK = BIT(2), /* Acquire the VF cfg_lock */
+};
+
+static inline u16 ice_vf_get_port_vlan_id(struct ice_vf *vf)
+{
+       return vf->port_vlan_info.vid;
+}
+
+static inline u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf)
+{
+       return vf->port_vlan_info.prio;
+}
+
+static inline bool ice_vf_is_port_vlan_ena(struct ice_vf *vf)
+{
+       return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf));
+}
+
+static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
+{
+       return vf->port_vlan_info.tpid;
+}
+
+/* VF Hash Table access functions
+ *
+ * These functions provide abstraction for interacting with the VF hash table.
+ * In general, direct access to the hash table should be avoided outside of
+ * these functions where possible.
+ *
+ * The VF entries in the hash table are protected by reference counting to
+ * track lifetime of accesses from the table. The ice_get_vf_by_id() function
+ * obtains a reference to the VF structure which must be dropped by using
+ * ice_put_vf().
+ */
+
+/**
+ * ice_for_each_vf - Iterate over each VF entry
+ * @pf: pointer to the PF private structure
+ * @bkt: bucket index used for iteration
+ * @vf: pointer to the VF entry currently being processed in the loop.
+ *
+ * The bkt variable is an unsigned integer iterator used to traverse the VF
+ * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
+ * Use vf->vf_id to get the id number if needed.
+ *
+ * The caller is expected to be under the table_lock mutex for the entire
+ * loop. Use this iterator if your loop is long or if it might sleep.
+ */
+#define ice_for_each_vf(pf, bkt, vf) \
+       hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
+
+/**
+ * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU
+ * @pf: pointer to the PF private structure
+ * @bkt: bucket index used for iteration
+ * @vf: pointer to the VF entry currently being processed in the loop.
+ *
+ * The bkt variable is an unsigned integer iterator used to traverse the VF
+ * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
+ * Use vf->vf_id to get the id number if needed.
+ *
+ * The caller is expected to be under rcu_read_lock() for the entire loop.
+ * Only use this iterator if your loop is short and you can guarantee it does
+ * not sleep.
+ */
+#define ice_for_each_vf_rcu(pf, bkt, vf) \
+       hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
+
+#ifdef CONFIG_PCI_IOV
+struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
+void ice_put_vf(struct ice_vf *vf);
+bool ice_has_vfs(struct ice_pf *pf);
+u16 ice_get_num_vfs(struct ice_pf *pf);
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
+bool ice_is_vf_disabled(struct ice_vf *vf);
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+void ice_set_vf_state_qs_dis(struct ice_vf *vf);
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
+int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
+int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
+int ice_reset_vf(struct ice_vf *vf, u32 flags);
+void ice_reset_all_vfs(struct ice_pf *pf);
+#else /* CONFIG_PCI_IOV */
+static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
+{
+       return NULL;
+}
+
+static inline void ice_put_vf(struct ice_vf *vf)
+{
+}
+
+static inline bool ice_has_vfs(struct ice_pf *pf)
+{
+       return false;
+}
+
+static inline u16 ice_get_num_vfs(struct ice_pf *pf)
+{
+       return 0;
+}
+
+static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+       return NULL;
+}
+
+static inline bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+       return true;
+}
+
+static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+}
+
+static inline bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+{
+       return false;
+}
+
+static inline int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int ice_reset_vf(struct ice_vf *vf, u32 flags)
+{
+       return 0;
+}
+
+static inline void ice_reset_all_vfs(struct ice_pf *pf)
+{
+}
+#endif /* !CONFIG_PCI_IOV */
+
+#endif /* _ICE_VF_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
new file mode 100644 (file)
index 0000000..15887e7
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_LIB_PRIVATE_H_
+#define _ICE_VF_LIB_PRIVATE_H_
+
+#include "ice_vf_lib.h"
+
+/* This header file is for exposing functions in ice_vf_lib.c to other files
+ * which are also conditionally compiled depending on CONFIG_PCI_IOV.
+ * Functions which may be used by other files should be exposed as part of
+ * ice_vf_lib.h
+ *
+ * Functions in this file are exposed only when CONFIG_PCI_IOV is enabled, and
+ * thus this header must not be included by .c files which may be compiled
+ * with CONFIG_PCI_IOV disabled.
+ *
+ * To avoid this, only include this header file directly within .c files that
+ * are conditionally enabled in the "ice-$(CONFIG_PCI_IOV)" block.
+ */
+
+#ifndef CONFIG_PCI_IOV
+#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
+#endif
+
+void ice_dis_vf_qs(struct ice_vf *vf);
+int ice_check_vf_init(struct ice_vf *vf);
+struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf);
+int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable);
+bool ice_is_vf_trusted(struct ice_vf *vf);
+bool ice_vf_has_no_qs_ena(struct ice_vf *vf);
+bool ice_is_vf_link_up(struct ice_vf *vf);
+void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
+void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
+void ice_vf_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_set_initialized(struct ice_vf *vf);
+
+#endif /* _ICE_VF_LIB_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
new file mode 100644 (file)
index 0000000..fc8c93f
--- /dev/null
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_vf_mbx.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+                     u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+       struct ice_aqc_pf_vf_msg *cmd;
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+       cmd = &desc.params.virt;
+       cmd->id = cpu_to_le32(vfid);
+
+       desc.cookie_high = cpu_to_le32(v_opcode);
+       desc.cookie_low = cpu_to_le32(v_retval);
+
+       if (msglen)
+               desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+       return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+       u32 speed;
+
+       if (adv_link_support)
+               switch (link_speed) {
+               case ICE_AQ_LINK_SPEED_10MB:
+                       speed = ICE_LINK_SPEED_10MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_100MB:
+                       speed = ICE_LINK_SPEED_100MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_1000MB:
+                       speed = ICE_LINK_SPEED_1000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_2500MB:
+                       speed = ICE_LINK_SPEED_2500MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_5GB:
+                       speed = ICE_LINK_SPEED_5000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_10GB:
+                       speed = ICE_LINK_SPEED_10000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_20GB:
+                       speed = ICE_LINK_SPEED_20000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_25GB:
+                       speed = ICE_LINK_SPEED_25000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_40GB:
+                       speed = ICE_LINK_SPEED_40000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_50GB:
+                       speed = ICE_LINK_SPEED_50000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_100GB:
+                       speed = ICE_LINK_SPEED_100000MBPS;
+                       break;
+               default:
+                       speed = ICE_LINK_SPEED_UNKNOWN;
+                       break;
+               }
+       else
+               /* Virtchnl speeds are not defined for every speed supported in
+                * the hardware. To maintain compatibility with older AVF
+                * drivers, while reporting the speed the new speed values are
+                * resolved to the closest known virtchnl speeds
+                */
+               switch (link_speed) {
+               case ICE_AQ_LINK_SPEED_10MB:
+               case ICE_AQ_LINK_SPEED_100MB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
+                       break;
+               case ICE_AQ_LINK_SPEED_1000MB:
+               case ICE_AQ_LINK_SPEED_2500MB:
+               case ICE_AQ_LINK_SPEED_5GB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
+                       break;
+               case ICE_AQ_LINK_SPEED_10GB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
+                       break;
+               case ICE_AQ_LINK_SPEED_20GB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
+                       break;
+               case ICE_AQ_LINK_SPEED_25GB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
+                       break;
+               case ICE_AQ_LINK_SPEED_40GB:
+               case ICE_AQ_LINK_SPEED_50GB:
+               case ICE_AQ_LINK_SPEED_100GB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
+                       break;
+               default:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
+                       break;
+               }
+
+       return speed;
+}
+
+/* The mailbox overflow detection algorithm helps to check if there
+ * is a possibility of a malicious VF transmitting too many MBX messages to the
+ * PF.
+ * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
+ * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
+ * The struct ice_mbx_snapshot helps to track and traverse a static window of
+ * messages within the mailbox queue while looking for a malicious VF.
+ *
+ * 2. When the caller starts processing its mailbox queue in response to an
+ * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
+ * the algorithm can be run for the first time for that interrupt. This can be
+ * done via ice_mbx_reset_snapshot().
+ *
+ * 3. For every message read by the caller from the MBX Queue, the caller must
+ * call the detection algorithm's entry function ice_mbx_vf_state_handler().
+ * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
+ * filled as it is required to be passed to the algorithm.
+ *
+ * 4. Every time a message is read from the MBX queue, a VFId is received which
+ * is passed to the state handler. The boolean output is_malvf of the state
+ * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
+ * whether this VF is malicious or not.
+ *
+ * 5. When a VF is identified to be malicious, the caller can send a message
+ * to the system administrator. The caller can invoke ice_mbx_report_malvf()
+ * to help determine if a malicious VF is to be reported or not. This function
+ * requires the caller to maintain a global bitmap to track all malicious VFs
+ * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
+ * to be malicious by ice_mbx_vf_state_handler().
+ *
+ * 6. The global bitmap maintained by PF can be cleared completely if PF is in
+ * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
+ * When a VF is shut down and brought back up, we assume that the new VF
+ * brought up is not malicious and hence report it if found malicious.
+ *
+ * 7. The function ice_mbx_reset_snapshot() is called to reset the information
+ * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ *
+ * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
+ * when driver is unloaded.
+ */
+#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
+/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
+ * the max messages check must be ignored in the algorithm
+ */
+#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+
+/**
+ * ice_mbx_traverse - Pass through mailbox snapshot
+ * @hw: pointer to the HW struct
+ * @new_state: new algorithm state
+ *
+ * Traversing the mailbox static snapshot without checking
+ * for malicious VFs.
+ */
+static void
+ice_mbx_traverse(struct ice_hw *hw,
+                enum ice_mbx_snapshot_state *new_state)
+{
+       struct ice_mbx_snap_buffer_data *snap_buf;
+       u32 num_iterations;
+
+       snap_buf = &hw->mbx_snapshot.mbx_buf;
+
+       /* As mailbox buffer is circular, applying a mask
+        * on the incremented iteration count.
+        */
+       num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
+
+       /* Checking either of the below conditions to exit snapshot traversal:
+        * Condition-1: If the number of iterations in the mailbox is equal to
+        * the mailbox head which would indicate that we have reached the end
+        * of the static snapshot.
+        * Condition-2: If the maximum messages serviced in the mailbox for a
+        * given interrupt is the highest possible value then there is no need
+        * to check if the number of messages processed is equal to it. If not
+        * check if the number of messages processed is greater than or equal
+        * to the maximum number of mailbox entries serviced in current work item.
+        */
+       if (num_iterations == snap_buf->head ||
+           (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
+            ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
+               *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_detect_malvf - Detect malicious VF in snapshot
+ * @hw: pointer to the HW struct
+ * @vf_id: relative virtual function ID
+ * @new_state: new algorithm state
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * This function tracks the number of asynchronous messages
+ * sent per VF and marks the VF as malicious if it exceeds
+ * the permissible number of messages to send.
+ */
+static int
+ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
+                    enum ice_mbx_snapshot_state *new_state,
+                    bool *is_malvf)
+{
+       struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+       if (vf_id >= snap->mbx_vf.vfcntr_len)
+               return -EIO;
+
+       /* increment the message count in the VF array */
+       snap->mbx_vf.vf_cntr[vf_id]++;
+
+       if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
+               *is_malvf = true;
+
+       /* continue to iterate through the mailbox snapshot */
+       ice_mbx_traverse(hw, new_state);
+
+       return 0;
+}
+
+/**
+ * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
+ * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ *
+ * Reset the mailbox snapshot structure and clear VF counter array.
+ */
+static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+{
+       u32 vfcntr_len;
+
+       if (!snap || !snap->mbx_vf.vf_cntr)
+               return;
+
+       /* Clear VF counters. */
+       vfcntr_len = snap->mbx_vf.vfcntr_len;
+       if (vfcntr_len)
+               memset(snap->mbx_vf.vf_cntr, 0,
+                      (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
+
+       /* Reset mailbox snapshot for a new capture. */
+       memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+       snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
+ * @hw: pointer to the HW struct
+ * @mbx_data: pointer to structure containing mailbox data
+ * @vf_id: relative virtual function (VF) ID
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * The function serves as an entry point for the malicious VF
+ * detection algorithm by handling the different states and state
+ * transitions of the algorithm:
+ * New snapshot: This state is entered when creating a new static
+ * snapshot. The data from any previous mailbox snapshot is
+ * cleared and a new capture of the mailbox head and tail is
+ * logged. This will be the new static snapshot to detect
+ * asynchronous messages sent by VFs. On capturing the snapshot
+ * and depending on whether the number of pending messages in that
+ * snapshot exceed the watermark value, the state machine enters
+ * traverse or detect states.
+ * Traverse: If pending message count is below watermark then iterate
+ * through the snapshot without any action on VF.
+ * Detect: If pending message count exceeds watermark traverse
+ * the static snapshot and look for a malicious VF.
+ */
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw,
+                        struct ice_mbx_data *mbx_data, u16 vf_id,
+                        bool *is_malvf)
+{
+       struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+       struct ice_mbx_snap_buffer_data *snap_buf;
+       struct ice_ctl_q_info *cq = &hw->mailboxq;
+       enum ice_mbx_snapshot_state new_state;
+       int status = 0;
+
+       if (!is_malvf || !mbx_data)
+               return -EINVAL;
+
+       /* When entering the mailbox state machine assume that the VF
+        * is not malicious until detected.
+        */
+       *is_malvf = false;
+
+        /* Checking if max messages allowed to be processed while servicing current
+         * interrupt is not less than the defined AVF message threshold.
+         */
+       if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+               return -EINVAL;
+
+       /* The watermark value should not be lesser than the threshold limit
+        * set for the number of asynchronous messages a VF can send to mailbox
+        * nor should it be greater than the maximum number of messages in the
+        * mailbox serviced in current interrupt.
+        */
+       if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
+           mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+               return -EINVAL;
+
+       new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+       snap_buf = &snap->mbx_buf;
+
+       switch (snap_buf->state) {
+       case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
+               /* Clear any previously held data in mailbox snapshot structure. */
+               ice_mbx_reset_snapshot(snap);
+
+               /* Collect the pending ARQ count, number of messages processed and
+                * the maximum number of messages allowed to be processed from the
+                * Mailbox for current interrupt.
+                */
+               snap_buf->num_pending_arq = mbx_data->num_pending_arq;
+               snap_buf->num_msg_proc = mbx_data->num_msg_proc;
+               snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+
+               /* Capture a new static snapshot of the mailbox by logging the
+                * head and tail of snapshot and set num_iterations to the tail
+                * value to mark the start of the iteration through the snapshot.
+                */
+               snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
+                                                 mbx_data->num_pending_arq);
+               snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
+               snap_buf->num_iterations = snap_buf->tail;
+
+               /* Pending ARQ messages returned by ice_clean_rq_elem
+                * is the difference between the head and tail of the
+                * mailbox queue. Comparing this value against the watermark
+                * helps to check if we potentially have malicious VFs.
+                */
+               if (snap_buf->num_pending_arq >=
+                   mbx_data->async_watermark_val) {
+                       new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+                       status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+               } else {
+                       new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+                       ice_mbx_traverse(hw, &new_state);
+               }
+               break;
+
+       case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
+               new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+               ice_mbx_traverse(hw, &new_state);
+               break;
+
+       case ICE_MAL_VF_DETECT_STATE_DETECT:
+               new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+               status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+               break;
+
+       default:
+               new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+               status = -EIO;
+       }
+
+       snap_buf->state = new_state;
+
+       return status;
+}
+
+/**
+ * ice_mbx_report_malvf - Track and note malicious VF
+ * @hw: pointer to the HW struct
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ * @report_malvf: boolean to indicate if malicious VF must be reported
+ *
+ * This function will update a bitmap that keeps track of the malicious
+ * VFs attached to the PF. A malicious VF must be reported only once if
+ * discovered between VF resets or loading so the function checks
+ * the input vf_id against the bitmap to verify if the VF has been
+ * detected in any previous mailbox iterations.
+ */
+int
+ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
+                    u16 bitmap_len, u16 vf_id, bool *report_malvf)
+{
+       if (!all_malvfs || !report_malvf)
+               return -EINVAL;
+
+       *report_malvf = false;
+
+       if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
+               return -EINVAL;
+
+       if (vf_id >= bitmap_len)
+               return -EIO;
+
+       /* If the vf_id is found in the bitmap set bit and boolean to true */
+       if (!test_and_set_bit(vf_id, all_malvfs))
+               *report_malvf = true;
+
+       return 0;
+}
+
+/**
+ * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
+ * @snap: pointer to the mailbox snapshot structure
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ *
+ * In case of a VF reset, this function can be called to clear
+ * the bit corresponding to the VF ID in the bitmap tracking all
+ * malicious VFs attached to the PF. The function also clears the
+ * VF counter array at the index of the VF ID. This is to ensure
+ * that the new VF loaded is not considered malicious before going
+ * through the overflow detection algorithm.
+ */
+int
+ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
+                   u16 bitmap_len, u16 vf_id)
+{
+       if (!snap || !all_malvfs)
+               return -EINVAL;
+
+       if (bitmap_len < snap->mbx_vf.vfcntr_len)
+               return -EINVAL;
+
+       /* Ensure VF ID value is not larger than bitmap or VF counter length */
+       if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
+               return -EIO;
+
+       /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
+       clear_bit(vf_id, all_malvfs);
+
+       /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
+        * This is to ensure that if a VF is unloaded and a new one brought back
+        * up with the same VF ID for a snapshot currently in traversal or detect
+        * state the counter for that VF ID does not increment on top of existing
+        * values in the mailbox overflow detection algorithm.
+        */
+       snap->mbx_vf.vf_cntr[vf_id] = 0;
+
+       return 0;
+}
+
+/**
+ * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ * @vf_count: number of VFs allocated on a PF
+ *
+ * Clear the mailbox snapshot structure and allocate memory
+ * for the VF counter array based on the number of VFs allocated
+ * on that PF.
+ *
+ * Assumption: This function will assume ice_get_caps() has already been
+ * called to ensure that the vf_count can be compared against the number
+ * of VFs supported as defined in the functional capabilities of the device.
+ */
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+{
+       struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+       /* Ensure that the number of VFs allocated is non-zero and
+        * is not greater than the number of supported VFs defined in
+        * the functional capabilities of the PF.
+        */
+       if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
+               return -EINVAL;
+
+       snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
+                                           sizeof(*snap->mbx_vf.vf_cntr),
+                                           GFP_KERNEL);
+       if (!snap->mbx_vf.vf_cntr)
+               return -ENOMEM;
+
+       /* Setting the VF counter length to the number of allocated
+        * VFs for given PF's functional capabilities.
+        */
+       snap->mbx_vf.vfcntr_len = vf_count;
+
+       /* Clear mbx_buf in the mailbox snaphot structure and setting the
+        * mailbox snapshot state to a new capture.
+        */
+       memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+       snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+
+       return 0;
+}
+
+/**
+ * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ *
+ * Clear the mailbox snapshot structure and free the VF counter array.
+ */
+void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+{
+       struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+       /* Free VF counter array and reset VF counter length */
+       devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
+       snap->mbx_vf.vfcntr_len = 0;
+
+       /* Clear mbx_buf in the mailbox snaphot structure */
+       memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
new file mode 100644 (file)
index 0000000..582716e
--- /dev/null
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_VF_MBX_H_
+#define _ICE_VF_MBX_H_
+
+#include "ice_type.h"
+#include "ice_controlq.h"
+
+/* Defining the mailbox message threshold as 63 asynchronous
+ * pending messages. Normal VF functionality does not require
+ * sending more than 63 asynchronous pending message.
+ */
+#define ICE_ASYNC_VF_MSG_THRESHOLD     63
+
+#ifdef CONFIG_PCI_IOV
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+                     u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+                        u16 vf_id, bool *is_mal_vf);
+int
+ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
+                   u16 bitmap_len, u16 vf_id);
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
+void ice_mbx_deinit_snapshot(struct ice_hw *hw);
+int
+ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
+                    u16 bitmap_len, u16 vf_id, bool *report_malvf);
+#else /* CONFIG_PCI_IOV */
+static inline int
+ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
+                     u16 __always_unused vfid, u32 __always_unused v_opcode,
+                     u32 __always_unused v_retval, u8 __always_unused *msg,
+                     u16 __always_unused msglen,
+                     struct ice_sq_cd __always_unused *cd)
+{
+       return 0;
+}
+
+static inline u32
+ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
+                               u16 __always_unused link_speed)
+{
+       return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VF_MBX_H_ */
index b16f946..5ecc0ee 100644 (file)
@@ -6,7 +6,7 @@
 #include "ice_vlan_mode.h"
 #include "ice.h"
 #include "ice_vf_vsi_vlan_ops.h"
-#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
 
 static int
 noop_vlan_arg(struct ice_vsi __always_unused *vsi,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
new file mode 100644 (file)
index 0000000..3f1a638
--- /dev/null
@@ -0,0 +1,3785 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice_virtchnl.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_virtchnl_allowlist.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_vlan.h"
+#include "ice_flex_pipe.h"
+#include "ice_dcb_lib.h"
+
+#define FIELD_SELECTOR(proto_hdr_field) \
+               BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
+
+struct ice_vc_hdr_match_type {
+       u32 vc_hdr;     /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
+       u32 ice_hdr;    /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
+};
+
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
+       {VIRTCHNL_PROTO_HDR_NONE,       ICE_FLOW_SEG_HDR_NONE},
+       {VIRTCHNL_PROTO_HDR_ETH,        ICE_FLOW_SEG_HDR_ETH},
+       {VIRTCHNL_PROTO_HDR_S_VLAN,     ICE_FLOW_SEG_HDR_VLAN},
+       {VIRTCHNL_PROTO_HDR_C_VLAN,     ICE_FLOW_SEG_HDR_VLAN},
+       {VIRTCHNL_PROTO_HDR_IPV4,       ICE_FLOW_SEG_HDR_IPV4 |
+                                       ICE_FLOW_SEG_HDR_IPV_OTHER},
+       {VIRTCHNL_PROTO_HDR_IPV6,       ICE_FLOW_SEG_HDR_IPV6 |
+                                       ICE_FLOW_SEG_HDR_IPV_OTHER},
+       {VIRTCHNL_PROTO_HDR_TCP,        ICE_FLOW_SEG_HDR_TCP},
+       {VIRTCHNL_PROTO_HDR_UDP,        ICE_FLOW_SEG_HDR_UDP},
+       {VIRTCHNL_PROTO_HDR_SCTP,       ICE_FLOW_SEG_HDR_SCTP},
+       {VIRTCHNL_PROTO_HDR_PPPOE,      ICE_FLOW_SEG_HDR_PPPOE},
+       {VIRTCHNL_PROTO_HDR_GTPU_IP,    ICE_FLOW_SEG_HDR_GTPU_IP},
+       {VIRTCHNL_PROTO_HDR_GTPU_EH,    ICE_FLOW_SEG_HDR_GTPU_EH},
+       {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+                                       ICE_FLOW_SEG_HDR_GTPU_DWN},
+       {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+                                       ICE_FLOW_SEG_HDR_GTPU_UP},
+       {VIRTCHNL_PROTO_HDR_L2TPV3,     ICE_FLOW_SEG_HDR_L2TPV3},
+       {VIRTCHNL_PROTO_HDR_ESP,        ICE_FLOW_SEG_HDR_ESP},
+       {VIRTCHNL_PROTO_HDR_AH,         ICE_FLOW_SEG_HDR_AH},
+       {VIRTCHNL_PROTO_HDR_PFCP,       ICE_FLOW_SEG_HDR_PFCP_SESSION},
+};
+
+struct ice_vc_hash_field_match_type {
+       u32 vc_hdr;             /* virtchnl headers
+                                * (VIRTCHNL_PROTO_HDR_XXX)
+                                */
+       u32 vc_hash_field;      /* virtchnl hash fields selector
+                                * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
+                                */
+       u64 ice_hash_field;     /* ice hash fields
+                                * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
+                                */
+};
+
+static const struct
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
+       {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
+       {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
+       {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+               ICE_FLOW_HASH_ETH},
+       {VIRTCHNL_PROTO_HDR_ETH,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
+       {VIRTCHNL_PROTO_HDR_S_VLAN,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
+       {VIRTCHNL_PROTO_HDR_C_VLAN,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
+       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+               ICE_FLOW_HASH_IPV4},
+       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+               ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
+       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
+       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+               ICE_FLOW_HASH_IPV6},
+       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+               ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+       {VIRTCHNL_PROTO_HDR_TCP,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
+       {VIRTCHNL_PROTO_HDR_TCP,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
+       {VIRTCHNL_PROTO_HDR_TCP,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+               ICE_FLOW_HASH_TCP_PORT},
+       {VIRTCHNL_PROTO_HDR_UDP,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
+       {VIRTCHNL_PROTO_HDR_UDP,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
+       {VIRTCHNL_PROTO_HDR_UDP,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+               ICE_FLOW_HASH_UDP_PORT},
+       {VIRTCHNL_PROTO_HDR_SCTP,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
+       {VIRTCHNL_PROTO_HDR_SCTP,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
+       {VIRTCHNL_PROTO_HDR_SCTP,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+               ICE_FLOW_HASH_SCTP_PORT},
+       {VIRTCHNL_PROTO_HDR_PPPOE,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
+       {VIRTCHNL_PROTO_HDR_GTPU_IP,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
+       {VIRTCHNL_PROTO_HDR_L2TPV3,
+               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
+       {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
+       {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
+       {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
+               BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
+};
+
+/**
+ * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
+ * @pf: pointer to the PF structure
+ * @v_opcode: operation code
+ * @v_retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ */
+static void
+ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
+                   enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+       struct ice_hw *hw = &pf->hw;
+       struct ice_vf *vf;
+       unsigned int bkt;
+
+       mutex_lock(&pf->vfs.table_lock);
+       ice_for_each_vf(pf, bkt, vf) {
+               /* Not all vfs are enabled so skip the ones that are not */
+               if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+                   !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+                       continue;
+
+               /* Ignore return value on purpose - a given VF may fail, but
+                * we need to keep going and send to all of them
+                */
+               ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
+                                     msglen, NULL);
+       }
+       mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+                int ice_link_speed, bool link_up)
+{
+       if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+               pfe->event_data.link_event_adv.link_status = link_up;
+               /* Speed in Mbps */
+               pfe->event_data.link_event_adv.link_speed =
+                       ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
+       } else {
+               pfe->event_data.link_event.link_status = link_up;
+               /* Legacy method for virtchnl link speeds */
+               pfe->event_data.link_event.link_speed =
+                       (enum virtchnl_link_speed)
+                       ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
+       }
+}
+
+/**
+ * ice_vc_notify_vf_link_state - Inform a VF of link status
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ */
+void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+{
+       struct virtchnl_pf_event pfe = { 0 };
+       struct ice_hw *hw = &vf->pf->hw;
+
+       pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+       pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+       if (ice_is_vf_link_up(vf))
+               ice_set_pfe_link(vf, &pfe,
+                                hw->port_info->phy.link_info.link_speed, true);
+       else
+               ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
+
+       ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+                             VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
+                             sizeof(pfe), NULL);
+}
+
+/**
+ * ice_vc_notify_link_state - Inform all VFs on a PF of link status
+ * @pf: pointer to the PF structure
+ */
+void ice_vc_notify_link_state(struct ice_pf *pf)
+{
+       struct ice_vf *vf;
+       unsigned int bkt;
+
+       mutex_lock(&pf->vfs.table_lock);
+       ice_for_each_vf(pf, bkt, vf)
+               ice_vc_notify_vf_link_state(vf);
+       mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_vc_notify_reset - Send pending reset message to all VFs
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ */
+void ice_vc_notify_reset(struct ice_pf *pf)
+{
+       struct virtchnl_pf_event pfe;
+
+       if (!ice_has_vfs(pf))
+               return;
+
+       pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+       pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+       ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
+                           (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+}
+
+/**
+ * ice_vc_send_msg_to_vf - Send message to VF
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ */
+int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+                     enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+       struct device *dev;
+       struct ice_pf *pf;
+       int aq_ret;
+
+       pf = vf->pf;
+       dev = ice_pf_to_dev(pf);
+
+       aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
+                                      msg, msglen, NULL);
+       if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
+               dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
+                        vf->vf_id, aq_ret,
+                        ice_aq_str(pf->hw.mailboxq.sq_last_status));
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/**
+ * ice_vc_get_ver_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request the API version used by the PF
+ */
+static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_version_info info = {
+               VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
+       };
+
+       vf->vf_ver = *(struct virtchnl_version_info *)msg;
+       /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+       if (VF_IS_V10(&vf->vf_ver))
+               info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+                                    VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
+                                    sizeof(struct virtchnl_version_info));
+}
+
+/**
+ * ice_vc_get_max_frame_size - get max frame size allowed for VF
+ * @vf: VF used to determine max frame size
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ */
+static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
+{
+       struct ice_port_info *pi = ice_vf_get_port_info(vf);
+       u16 max_frame_size;
+
+       max_frame_size = pi->phy.link_info.max_frame_size;
+
+       if (ice_vf_is_port_vlan_ena(vf))
+               max_frame_size -= VLAN_HLEN;
+
+       return max_frame_size;
+}
+
+/**
+ * ice_vc_get_vf_res_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request its resources
+ */
+static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vf_resource *vfres = NULL;
+       struct ice_hw *hw = &vf->pf->hw;
+       struct ice_vsi *vsi;
+       int len = 0;
+       int ret;
+
+       if (ice_check_vf_init(vf)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto err;
+       }
+
+       len = sizeof(struct virtchnl_vf_resource);
+
+       vfres = kzalloc(len, GFP_KERNEL);
+       if (!vfres) {
+               v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+               len = 0;
+               goto err;
+       }
+       if (VF_IS_V11(&vf->vf_ver))
+               vf->driver_caps = *(u32 *)msg;
+       else
+               vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+                                 VIRTCHNL_VF_OFFLOAD_RSS_REG |
+                                 VIRTCHNL_VF_OFFLOAD_VLAN;
+
+       vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto err;
+       }
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+               /* VLAN offloads based on current device configuration */
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2;
+       } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
+               /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
+                * these two conditions, which amounts to guest VLAN filtering
+                * and offloads being based on the inner VLAN or the
+                * inner/single VLAN respectively and don't allow VF to
+                * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
+                */
+               if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
+                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+               } else if (!ice_is_dvm_ena(hw) &&
+                          !ice_vf_is_port_vlan_ena(vf)) {
+                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+                       /* configure backward compatible support for VFs that
+                        * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
+                        * configured in SVM, and no port VLAN is configured
+                        */
+                       ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
+               } else if (ice_is_dvm_ena(hw)) {
+                       /* configure software offloaded VLAN support when DVM
+                        * is enabled, but no port VLAN is enabled
+                        */
+                       ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
+               }
+       }
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+       } else {
+               if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+               else
+                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+       }
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
+       if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
+
+       vfres->num_vsis = 1;
+       /* Tx and Rx queue are equal for VF */
+       vfres->num_queue_pairs = vsi->num_txq;
+       vfres->max_vectors = vf->pf->vfs.num_msix_per;
+       vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
+       vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+       vfres->max_mtu = ice_vc_get_max_frame_size(vf);
+
+       vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+       vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+       vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
+       ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+                       vf->hw_lan_addr.addr);
+
+       /* match guest capabilities */
+       vf->driver_caps = vfres->vf_cap_flags;
+
+       ice_vc_set_caps_allowlist(vf);
+       ice_vc_set_working_allowlist(vf);
+
+       set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+err:
+       /* send the response back to the VF */
+       ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
+                                   (u8 *)vfres, len);
+
+       kfree(vfres);
+       return ret;
+}
+
+/**
+ * ice_vc_reset_vf_msg
+ * @vf: pointer to the VF info
+ *
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
+ */
+static void ice_vc_reset_vf_msg(struct ice_vf *vf)
+{
+       if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+               ice_reset_vf(vf, 0);
+}
+
+/**
+ * ice_find_vsi_from_id
+ * @pf: the PF structure to search for the VSI
+ * @id: ID of the VSI it is searching for
+ *
+ * searches for the VSI with the given ID
+ */
+static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
+{
+       int i;
+
+       ice_for_each_vsi(pf, i)
+               if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
+                       return pf->vsi[i];
+
+       return NULL;
+}
+
+/**
+ * ice_vc_isvalid_vsi_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI ID
+ *
+ * check for the valid VSI ID
+ */
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+
+       vsi = ice_find_vsi_from_id(pf, vsi_id);
+
+       return (vsi && (vsi->vf == vf));
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI ID
+ * @qid: VSI relative queue ID
+ *
+ * check for the valid queue ID
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+{
+       struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+       /* allocated Tx and Rx queues should be always equal for VF VSI */
+       return (vsi && (qid < vsi->alloc_txq));
+}
+
+/**
+ * ice_vc_isvalid_ring_len
+ * @ring_len: length of ring
+ *
+ * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
+ * or zero
+ */
+static bool ice_vc_isvalid_ring_len(u16 ring_len)
+{
+       return ring_len == 0 ||
+              (ring_len >= ICE_MIN_NUM_DESC &&
+               ring_len <= ICE_MAX_NUM_DESC &&
+               !(ring_len % ICE_REQ_DESC_MULTIPLE));
+}
+
+/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+       bool is_ipv4 = false;
+       bool is_ipv6 = false;
+       bool is_udp = false;
+       u16 ptype = -1;
+       int i = 0;
+
+       while (i < proto->count &&
+              proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+               switch (proto->proto_hdr[i].type) {
+               case VIRTCHNL_PROTO_HDR_ETH:
+                       ptype = ICE_PTYPE_MAC_PAY;
+                       break;
+               case VIRTCHNL_PROTO_HDR_IPV4:
+                       ptype = ICE_PTYPE_IPV4_PAY;
+                       is_ipv4 = true;
+                       break;
+               case VIRTCHNL_PROTO_HDR_IPV6:
+                       ptype = ICE_PTYPE_IPV6_PAY;
+                       is_ipv6 = true;
+                       break;
+               case VIRTCHNL_PROTO_HDR_UDP:
+                       if (is_ipv4)
+                               ptype = ICE_PTYPE_IPV4_UDP_PAY;
+                       else if (is_ipv6)
+                               ptype = ICE_PTYPE_IPV6_UDP_PAY;
+                       is_udp = true;
+                       break;
+               case VIRTCHNL_PROTO_HDR_TCP:
+                       if (is_ipv4)
+                               ptype = ICE_PTYPE_IPV4_TCP_PAY;
+                       else if (is_ipv6)
+                               ptype = ICE_PTYPE_IPV6_TCP_PAY;
+                       break;
+               case VIRTCHNL_PROTO_HDR_SCTP:
+                       if (is_ipv4)
+                               ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+                       else if (is_ipv6)
+                               ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+                       break;
+               case VIRTCHNL_PROTO_HDR_GTPU_IP:
+               case VIRTCHNL_PROTO_HDR_GTPU_EH:
+                       if (is_ipv4)
+                               ptype = ICE_MAC_IPV4_GTPU;
+                       else if (is_ipv6)
+                               ptype = ICE_MAC_IPV6_GTPU;
+                       goto out;
+               case VIRTCHNL_PROTO_HDR_L2TPV3:
+                       if (is_ipv4)
+                               ptype = ICE_MAC_IPV4_L2TPV3;
+                       else if (is_ipv6)
+                               ptype = ICE_MAC_IPV6_L2TPV3;
+                       goto out;
+               case VIRTCHNL_PROTO_HDR_ESP:
+                       if (is_ipv4)
+                               ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+                                               ICE_MAC_IPV4_ESP;
+                       else if (is_ipv6)
+                               ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+                                               ICE_MAC_IPV6_ESP;
+                       goto out;
+               case VIRTCHNL_PROTO_HDR_AH:
+                       if (is_ipv4)
+                               ptype = ICE_MAC_IPV4_AH;
+                       else if (is_ipv6)
+                               ptype = ICE_MAC_IPV6_AH;
+                       goto out;
+               case VIRTCHNL_PROTO_HDR_PFCP:
+                       if (is_ipv4)
+                               ptype = ICE_MAC_IPV4_PFCP_SESSION;
+                       else if (is_ipv6)
+                               ptype = ICE_MAC_IPV6_PFCP_SESSION;
+                       goto out;
+               default:
+                       break;
+               }
+               i++;
+       }
+
+out:
+       return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
+ * ice_vc_parse_rss_cfg - parses hash fields and headers from
+ * a specific virtchnl RSS cfg
+ * @hw: pointer to the hardware
+ * @rss_cfg: pointer to the virtchnl RSS cfg
+ * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
+ * to configure
+ * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
+ *
+ * Return true if all the protocol header and hash fields in the RSS cfg could
+ * be parsed, else return false
+ *
+ * This function parses the virtchnl RSS cfg to be the intended
+ * hash fields and the intended header for RSS configuration
+ */
+static bool
+ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
+                    u32 *addl_hdrs, u64 *hash_flds)
+{
+       const struct ice_vc_hash_field_match_type *hf_list;
+       const struct ice_vc_hdr_match_type *hdr_list;
+       int i, hf_list_len, hdr_list_len;
+
+       hf_list = ice_vc_hash_field_list;
+       hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+       hdr_list = ice_vc_hdr_list;
+       hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
+
+       for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
+               struct virtchnl_proto_hdr *proto_hdr =
+                                       &rss_cfg->proto_hdrs.proto_hdr[i];
+               bool hdr_found = false;
+               int j;
+
+               /* Find matched ice headers according to virtchnl headers. */
+               for (j = 0; j < hdr_list_len; j++) {
+                       struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
+
+                       if (proto_hdr->type == hdr_map.vc_hdr) {
+                               *addl_hdrs |= hdr_map.ice_hdr;
+                               hdr_found = true;
+                       }
+               }
+
+               if (!hdr_found)
+                       return false;
+
+               /* Find matched ice hash fields according to
+                * virtchnl hash fields.
+                */
+               for (j = 0; j < hf_list_len; j++) {
+                       struct ice_vc_hash_field_match_type hf_map = hf_list[j];
+
+                       if (proto_hdr->type == hf_map.vc_hdr &&
+                           proto_hdr->field_selector == hf_map.vc_hash_field) {
+                               *hash_flds |= hf_map.ice_hash_field;
+                               break;
+                       }
+               }
+       }
+
+       return true;
+}
+
+/**
+ * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
+ * RSS offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
+ * else return false
+ */
+static bool ice_vf_adv_rss_offload_ena(u32 caps)
+{
+       return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
+}
+
+/**
+ * ice_vc_handle_rss_cfg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the message buffer
+ * @add: add a RSS config if true, otherwise delete a RSS config
+ *
+ * This function adds/deletes a RSS config
+ */
+static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+{
+       u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
+       struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct device *dev = ice_pf_to_dev(vf->pf);
+       struct ice_hw *hw = &vf->pf->hw;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+               dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
+                       vf->vf_id);
+               v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+               goto error_param;
+       }
+
+       if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
+               dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
+                       vf->vf_id);
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
+           rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
+           rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
+               dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
+                       vf->vf_id);
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
+               struct ice_vsi_ctx *ctx;
+               u8 lut_type, hash_type;
+               int status;
+
+               lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+               hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
+                               ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+
+               ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+               if (!ctx) {
+                       v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+                       goto error_param;
+               }
+
+               ctx->info.q_opt_rss = ((lut_type <<
+                                       ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+                                      ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+                                      (hash_type &
+                                       ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+
+               /* Preserve existing queueing option setting */
+               ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
+                                         ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
+               ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+               ctx->info.q_opt_flags = vsi->info.q_opt_rss;
+
+               ctx->info.valid_sections =
+                               cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+
+               status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+               if (status) {
+                       dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
+                               status, ice_aq_str(hw->adminq.sq_last_status));
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               } else {
+                       vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+               }
+
+               kfree(ctx);
+       } else {
+               u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+               u64 hash_flds = ICE_HASH_INVALID;
+
+               if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
+                                         &hash_flds)) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto error_param;
+               }
+
+               if (add) {
+                       if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
+                                           addl_hdrs)) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
+                                       vsi->vsi_num, v_ret);
+                       }
+               } else {
+                       int status;
+
+                       status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
+                                                addl_hdrs);
+                       /* We just ignore -ENOENT, because if two configurations
+                        * share the same profile remove one of them actually
+                        * removes both, since the profile is deleted.
+                        */
+                       if (status && status != -ENOENT) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
+                                       vf->vf_id, status);
+                       }
+               }
+       }
+
+error_param:
+       return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_rss_key *vrk =
+               (struct virtchnl_rss_key *)msg;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (ice_set_rss_key(vsi, vrk->key))
+               v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+               v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_promiscuous_mode_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure VF VSIs promiscuous mode
+ */
+static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       bool rm_promisc, alluni = false, allmulti = false;
+       struct virtchnl_promisc_info *info =
+           (struct virtchnl_promisc_info *)msg;
+       struct ice_vsi_vlan_ops *vlan_ops;
+       int mcast_err = 0, ucast_err = 0;
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+       struct device *dev;
+       int ret = 0;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       dev = ice_pf_to_dev(pf);
+       if (!ice_is_vf_trusted(vf)) {
+               dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+                       vf->vf_id);
+               /* Leave v_ret alone, lie to the VF on purpose. */
+               goto error_param;
+       }
+
+       if (info->flags & FLAG_VF_UNICAST_PROMISC)
+               alluni = true;
+
+       if (info->flags & FLAG_VF_MULTICAST_PROMISC)
+               allmulti = true;
+
+       rm_promisc = !allmulti && !alluni;
+
+       vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+       if (rm_promisc)
+               ret = vlan_ops->ena_rx_filtering(vsi);
+       else
+               ret = vlan_ops->dis_rx_filtering(vsi);
+       if (ret) {
+               dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+               bool set_dflt_vsi = alluni || allmulti;
+
+               if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
+                       /* only attempt to set the default forwarding VSI if
+                        * it's not currently set
+                        */
+                       ret = ice_set_dflt_vsi(pf->first_sw, vsi);
+               else if (!set_dflt_vsi &&
+                        ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
+                       /* only attempt to free the default forwarding VSI if we
+                        * are the owner
+                        */
+                       ret = ice_clear_dflt_vsi(pf->first_sw);
+
+               if (ret) {
+                       dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
+                               set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
+                       v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+                       goto error_param;
+               }
+       } else {
+               u8 mcast_m, ucast_m;
+
+               if (ice_vf_is_port_vlan_ena(vf) ||
+                   ice_vsi_has_non_zero_vlans(vsi)) {
+                       mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
+                       ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
+               } else {
+                       mcast_m = ICE_MCAST_PROMISC_BITS;
+                       ucast_m = ICE_UCAST_PROMISC_BITS;
+               }
+
+               if (alluni)
+                       ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
+               else
+                       ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
+
+               if (allmulti)
+                       mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+               else
+                       mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+
+               if (ucast_err || mcast_err)
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+       }
+
+       if (!mcast_err) {
+               if (allmulti &&
+                   !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+                       dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
+                                vf->vf_id);
+               else if (!allmulti &&
+                        test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
+                                           vf->vf_states))
+                       dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
+                                vf->vf_id);
+       }
+
+       if (!ucast_err) {
+               if (alluni &&
+                   !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+                       dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
+                                vf->vf_id);
+               else if (!alluni &&
+                        test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
+                                           vf->vf_states))
+                       dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
+                                vf->vf_id);
+       }
+
+error_param:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+                                    v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to get VSI stats
+ */
+static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_queue_select *vqs =
+               (struct virtchnl_queue_select *)msg;
+       struct ice_eth_stats stats = { 0 };
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       ice_update_eth_stats(vsi);
+
+       stats = vsi->eth_stats;
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
+                                    (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+       if ((!vqs->rx_queues && !vqs->tx_queues) ||
+           vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+           vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
+               return false;
+
+       return true;
+}
+
+/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+       struct ice_hw *hw = &vsi->back->hw;
+       u32 pfq = vsi->txq_map[q_idx];
+       u32 reg;
+
+       reg = rd32(hw, QINT_TQCTL(pfq));
+
+       /* MSI-X index 0 in the VF's space is always for the OICR, which means
+        * this is most likely a poll mode VF driver, so don't enable an
+        * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+        */
+       if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+               return;
+
+       wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+       struct ice_hw *hw = &vsi->back->hw;
+       u32 pfq = vsi->rxq_map[q_idx];
+       u32 reg;
+
+       reg = rd32(hw, QINT_RQCTL(pfq));
+
+       /* MSI-X index 0 in the VF's space is always for the OICR, which means
+        * this is most likely a poll mode VF driver, so don't enable an
+        * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+        */
+       if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+               return;
+
+       wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_queue_select *vqs =
+           (struct virtchnl_queue_select *)msg;
+       struct ice_vsi *vsi;
+       unsigned long q_map;
+       u16 vf_q_id;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       /* Enable only Rx rings, Tx rings were enabled by the FW when the
+        * Tx queue group list was configured and the context bits were
+        * programmed using ice_vsi_cfg_txqs
+        */
+       q_map = vqs->rx_queues;
+       for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+               if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto error_param;
+               }
+
+               /* Skip queue if enabled */
+               if (test_bit(vf_q_id, vf->rxq_ena))
+                       continue;
+
+               if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
+                       dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
+                               vf_q_id, vsi->vsi_num);
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto error_param;
+               }
+
+               ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
+               set_bit(vf_q_id, vf->rxq_ena);
+       }
+
+       q_map = vqs->tx_queues;
+       for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+               if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto error_param;
+               }
+
+               /* Skip queue if enabled */
+               if (test_bit(vf_q_id, vf->txq_ena))
+                       continue;
+
+               ice_vf_ena_txq_interrupt(vsi, vf_q_id);
+               set_bit(vf_q_id, vf->txq_ena);
+       }
+
+       /* Set flag to indicate that queues are enabled */
+       if (v_ret == VIRTCHNL_STATUS_SUCCESS)
+               set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific
+ * queue(s)
+ */
+static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_queue_select *vqs =
+           (struct virtchnl_queue_select *)msg;
+       struct ice_vsi *vsi;
+       unsigned long q_map;
+       u16 vf_q_id;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+           !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (vqs->tx_queues) {
+               q_map = vqs->tx_queues;
+
+               for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+                       struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
+                       struct ice_txq_meta txq_meta = { 0 };
+
+                       if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+
+                       /* Skip queue if not enabled */
+                       if (!test_bit(vf_q_id, vf->txq_ena))
+                               continue;
+
+                       ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+                       if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
+                                                ring, &txq_meta)) {
+                               dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+                                       vf_q_id, vsi->vsi_num);
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+
+                       /* Clear enabled queues flag */
+                       clear_bit(vf_q_id, vf->txq_ena);
+               }
+       }
+
+       q_map = vqs->rx_queues;
+       /* speed up Rx queue disable by batching them if possible */
+       if (q_map &&
+           bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
+               if (ice_vsi_stop_all_rx_rings(vsi)) {
+                       dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+                               vsi->vsi_num);
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto error_param;
+               }
+
+               bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+       } else if (q_map) {
+               for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+                       if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+
+                       /* Skip queue if not enabled */
+                       if (!test_bit(vf_q_id, vf->rxq_ena))
+                               continue;
+
+                       if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+                                                    true)) {
+                               dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
+                                       vf_q_id, vsi->vsi_num);
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+
+                       /* Clear enabled queues flag */
+                       clear_bit(vf_q_id, vf->rxq_ena);
+               }
+       }
+
+       /* Clear enabled queues flag */
+       if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
+               clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @vector_id: vector ID
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static int
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+                 struct virtchnl_vector_map *map,
+                 struct ice_q_vector *q_vector)
+{
+       u16 vsi_q_id, vsi_q_id_idx;
+       unsigned long qmap;
+
+       q_vector->num_ring_rx = 0;
+       q_vector->num_ring_tx = 0;
+
+       qmap = map->rxq_map;
+       for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+               vsi_q_id = vsi_q_id_idx;
+
+               if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+                       return VIRTCHNL_STATUS_ERR_PARAM;
+
+               q_vector->num_ring_rx++;
+               q_vector->rx.itr_idx = map->rxitr_idx;
+               vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+               ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+                                     q_vector->rx.itr_idx);
+       }
+
+       qmap = map->txq_map;
+       for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+               vsi_q_id = vsi_q_id_idx;
+
+               if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+                       return VIRTCHNL_STATUS_ERR_PARAM;
+
+               q_vector->num_ring_tx++;
+               q_vector->tx.itr_idx = map->txitr_idx;
+               vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+               ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+                                     q_vector->tx.itr_idx);
+       }
+
+       return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       u16 num_q_vectors_mapped, vsi_id, vector_id;
+       struct virtchnl_irq_map_info *irqmap_info;
+       struct virtchnl_vector_map *map;
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+       int i;
+
+       irqmap_info = (struct virtchnl_irq_map_info *)msg;
+       num_q_vectors_mapped = irqmap_info->num_vectors;
+
+       /* Check to make sure number of VF vectors mapped is not greater than
+        * number of VF vectors originally allocated, and check that
+        * there is actually at least a single VF queue vector mapped
+        */
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+           pf->vfs.num_msix_per < num_q_vectors_mapped ||
+           !num_q_vectors_mapped) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < num_q_vectors_mapped; i++) {
+               struct ice_q_vector *q_vector;
+
+               map = &irqmap_info->vecmap[i];
+
+               vector_id = map->vector_id;
+               vsi_id = map->vsi_id;
+               /* vector_id is always 0-based for each VF, and can never be
+                * larger than or equal to the max allowed interrupts per VF
+                */
+               if (!(vector_id < pf->vfs.num_msix_per) ||
+                   !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+                   (!vector_id && (map->rxq_map || map->txq_map))) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto error_param;
+               }
+
+               /* No need to map VF miscellaneous or rogue vector */
+               if (!vector_id)
+                       continue;
+
+               /* Subtract non queue vector from vector_id passed by VF
+                * to get actual number of VSI queue vector array index
+                */
+               q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
+               if (!q_vector) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto error_param;
+               }
+
+               /* lookout for the invalid queue index */
+               v_ret = (enum virtchnl_status_code)
+                       ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
+               if (v_ret)
+                       goto error_param;
+       }
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vsi_queue_config_info *qci =
+           (struct virtchnl_vsi_queue_config_info *)msg;
+       struct virtchnl_queue_pair_info *qpi;
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+       int i, q_idx;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
+           qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+               dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
+                       vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < qci->num_queue_pairs; i++) {
+               qpi = &qci->qpair[i];
+               if (qpi->txq.vsi_id != qci->vsi_id ||
+                   qpi->rxq.vsi_id != qci->vsi_id ||
+                   qpi->rxq.queue_id != qpi->txq.queue_id ||
+                   qpi->txq.headwb_enabled ||
+                   !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+                   !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
+                   !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto error_param;
+               }
+
+               q_idx = qpi->rxq.queue_id;
+
+               /* make sure selected "q_idx" is in valid range of queues
+                * for selected "vsi"
+                */
+               if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto error_param;
+               }
+
+               /* copy Tx queue info from VF into VSI */
+               if (qpi->txq.ring_len > 0) {
+                       vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+                       vsi->tx_rings[i]->count = qpi->txq.ring_len;
+                       if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+               }
+
+               /* copy Rx queue info from VF into VSI */
+               if (qpi->rxq.ring_len > 0) {
+                       u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+
+                       vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+                       vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+
+                       if (qpi->rxq.databuffer_size != 0 &&
+                           (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+                            qpi->rxq.databuffer_size < 1024)) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+                       vsi->rx_buf_len = qpi->rxq.databuffer_size;
+                       vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
+                       if (qpi->rxq.max_pkt_size > max_frame_size ||
+                           qpi->rxq.max_pkt_size < 64) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+
+                       vsi->max_frame = qpi->rxq.max_pkt_size;
+                       /* add space for the port VLAN since the VF driver is
+                        * not expected to account for it in the MTU
+                        * calculation
+                        */
+                       if (ice_vf_is_port_vlan_ena(vf))
+                               vsi->max_frame += VLAN_HLEN;
+
+                       if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+               }
+       }
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool ice_can_vf_change_mac(struct ice_vf *vf)
+{
+       /* If the VF MAC address has been set administratively (via the
+        * ndo_set_vf_mac command), then deny permission to the VF to
+        * add/delete unicast MAC addresses, unless the VF is trusted
+        */
+       if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
+               return false;
+
+       return true;
+}
+
+/**
+ * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
+ * @vc_ether_addr: used to extract the type
+ */
+static u8
+ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
+{
+       return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
+}
+
+/**
+ * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ */
+static bool
+ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
+{
+       u8 type = ice_vc_ether_addr_type(vc_ether_addr);
+
+       return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
+}
+
+/**
+ * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ *
+ * This function should only be called when the MAC address in
+ * virtchnl_ether_addr is a valid unicast MAC
+ */
+static bool
+ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
+{
+       u8 type = ice_vc_ether_addr_type(vc_ether_addr);
+
+       return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
+}
+
+/**
+ * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to add
+ */
+static void
+ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
+{
+       u8 *mac_addr = vc_ether_addr->addr;
+
+       if (!is_valid_ether_addr(mac_addr))
+               return;
+
+       /* only allow legacy VF drivers to set the device and hardware MAC if it
+        * is zero and allow new VF drivers to set the hardware MAC if the type
+        * was correctly specified over VIRTCHNL
+        */
+       if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
+            is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
+           ice_is_vc_addr_primary(vc_ether_addr)) {
+               ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
+               ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
+       }
+
+       /* hardware and device MACs are already set, but its possible that the
+        * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
+        * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
+        * away for the legacy VF driver case as it will be updated in the
+        * delete flow for this case
+        */
+       if (ice_is_vc_addr_legacy(vc_ether_addr)) {
+               ether_addr_copy(vf->legacy_last_added_umac.addr,
+                               mac_addr);
+               vf->legacy_last_added_umac.time_modified = jiffies;
+       }
+}
+
+/**
+ * ice_vc_add_mac_addr - attempt to add the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
+ */
+static int
+ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
+                   struct virtchnl_ether_addr *vc_ether_addr)
+{
+       struct device *dev = ice_pf_to_dev(vf->pf);
+       u8 *mac_addr = vc_ether_addr->addr;
+       int ret;
+
+       /* device MAC already added */
+       if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
+               return 0;
+
+       if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
+               dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+               return -EPERM;
+       }
+
+       ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
+       if (ret == -EEXIST) {
+               dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
+                       vf->vf_id);
+               /* don't return since we might need to update
+                * the primary MAC in ice_vfhw_mac_add() below
+                */
+       } else if (ret) {
+               dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
+                       mac_addr, vf->vf_id, ret);
+               return ret;
+       } else {
+               vf->num_mac++;
+       }
+
+       ice_vfhw_mac_add(vf, vc_ether_addr);
+
+       return ret;
+}
+
+/**
+ * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
+ * @last_added_umac: structure used to check expiration
+ */
+static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
+{
+#define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME   msecs_to_jiffies(3000)
+       return time_is_before_jiffies(last_added_umac->time_modified +
+                                     ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
+}
+
+/**
+ * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to check
+ *
+ * only update cached hardware MAC for legacy VF drivers on delete
+ * because we cannot guarantee order/type of MAC from the VF driver
+ */
+static void
+ice_update_legacy_cached_mac(struct ice_vf *vf,
+                            struct virtchnl_ether_addr *vc_ether_addr)
+{
+       if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
+           ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
+               return;
+
+       ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
+       ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
+}
+
+/**
+ * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
+ */
+static void
+ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
+{
+       u8 *mac_addr = vc_ether_addr->addr;
+
+       if (!is_valid_ether_addr(mac_addr) ||
+           !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+               return;
+
+       /* allow the device MAC to be repopulated in the add flow and don't
+        * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
+        * to be persistent on VM reboot and across driver unload/load, which
+        * won't work if we clear the hardware MAC here
+        */
+       eth_zero_addr(vf->dev_lan_addr.addr);
+
+       ice_update_legacy_cached_mac(vf, vc_ether_addr);
+}
+
+/**
+ * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
+ */
+static int
+ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
+                   struct virtchnl_ether_addr *vc_ether_addr)
+{
+       struct device *dev = ice_pf_to_dev(vf->pf);
+       u8 *mac_addr = vc_ether_addr->addr;
+       int status;
+
+       if (!ice_can_vf_change_mac(vf) &&
+           ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+               return 0;
+
+       status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
+       if (status == -ENOENT) {
+               dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
+                       vf->vf_id);
+               return -ENOENT;
+       } else if (status) {
+               dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
+                       mac_addr, vf->vf_id, status);
+               return -EIO;
+       }
+
+       ice_vfhw_mac_del(vf, vc_ether_addr);
+
+       vf->num_mac--;
+
+       return 0;
+}
+
+/**
+ * ice_vc_handle_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @set: true if MAC filters are being set, false otherwise
+ *
+ * add guest MAC address filter
+ */
+static int
+ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
+{
+       int (*ice_vc_cfg_mac)
+               (struct ice_vf *vf, struct ice_vsi *vsi,
+                struct virtchnl_ether_addr *virtchnl_ether_addr);
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_ether_addr_list *al =
+           (struct virtchnl_ether_addr_list *)msg;
+       struct ice_pf *pf = vf->pf;
+       enum virtchnl_ops vc_op;
+       struct ice_vsi *vsi;
+       int i;
+
+       if (set) {
+               vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
+               ice_vc_cfg_mac = ice_vc_add_mac_addr;
+       } else {
+               vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+               ice_vc_cfg_mac = ice_vc_del_mac_addr;
+       }
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+           !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto handle_mac_exit;
+       }
+
+       /* If this VF is not privileged, then we can't add more than a
+        * limited number of addresses. Check to make sure that the
+        * additions do not push us over the limit.
+        */
+       if (set && !ice_is_vf_trusted(vf) &&
+           (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
+               dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
+                       vf->vf_id);
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto handle_mac_exit;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto handle_mac_exit;
+       }
+
+       for (i = 0; i < al->num_elements; i++) {
+               u8 *mac_addr = al->list[i].addr;
+               int result;
+
+               if (is_broadcast_ether_addr(mac_addr) ||
+                   is_zero_ether_addr(mac_addr))
+                       continue;
+
+               result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
+               if (result == -EEXIST || result == -ENOENT) {
+                       continue;
+               } else if (result) {
+                       v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+                       goto handle_mac_exit;
+               }
+       }
+
+handle_mac_exit:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * add guest MAC address filter
+ */
+static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+       return ice_vc_handle_mac_addr_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove guest MAC address filter
+ */
+static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+       return ice_vc_handle_mac_addr_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vf_res_request *vfres =
+               (struct virtchnl_vf_res_request *)msg;
+       u16 req_queues = vfres->num_queue_pairs;
+       struct ice_pf *pf = vf->pf;
+       u16 max_allowed_vf_queues;
+       u16 tx_rx_queue_left;
+       struct device *dev;
+       u16 cur_queues;
+
+       dev = ice_pf_to_dev(pf);
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       cur_queues = vf->num_vf_qs;
+       tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
+                                ice_get_avail_rxq_count(pf));
+       max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
+       if (!req_queues) {
+               dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
+                       vf->vf_id);
+       } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
+               dev_err(dev, "VF %d tried to request more than %d queues.\n",
+                       vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+               vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
+       } else if (req_queues > cur_queues &&
+                  req_queues - cur_queues > tx_rx_queue_left) {
+               dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
+                        vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+               vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
+                                              ICE_MAX_RSS_QS_PER_VF);
+       } else {
+               /* request is successful, then reset VF */
+               vf->num_req_qs = req_queues;
+               ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+               dev_info(dev, "VF %d granted request of %u queues.\n",
+                        vf->vf_id, req_queues);
+               return 0;
+       }
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+                                    v_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
+/**
+ * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
+ */
+static bool ice_vf_vlan_offload_ena(u32 caps)
+{
+       return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
+}
+
+/**
+ * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
+ * @vf: VF used to determine if VLAN promiscuous config is allowed
+ */
+static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+{
+       if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+            test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
+           test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
+               return true;
+
+       return false;
+}
+
+/**
+ * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
+ * @vsi: VF's VSI used to enable VLAN promiscuous mode
+ * @vlan: VLAN used to enable VLAN promiscuous
+ *
+ * This function should only be called if VLAN promiscuous mode is allowed,
+ * which can be determined via ice_is_vlan_promisc_allowed().
+ */
+static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+       u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+       int status;
+
+       status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
+                                         vlan->vid);
+       if (status && status != -EEXIST)
+               return status;
+
+       return 0;
+}
+
+/**
+ * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
+ * @vsi: VF's VSI used to disable VLAN promiscuous mode for
+ * @vlan: VLAN used to disable VLAN promiscuous
+ *
+ * This function should only be called if VLAN promiscuous mode is allowed,
+ * which can be determined via ice_is_vlan_promisc_allowed().
+ */
+static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+       u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+       int status;
+
+       status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
+                                           vlan->vid);
+       if (status && status != -ENOENT)
+               return status;
+
+       return 0;
+}
+
+/**
+ * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
+ * @vf: VF to check against
+ * @vsi: VF's VSI
+ *
+ * If the VF is trusted then the VF is allowed to add as many VLANs as it
+ * wants to, so return false.
+ *
+ * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
+ * allowed VLANs for an untrusted VF. Return the result of this comparison.
+ */
+static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+       if (ice_is_vf_trusted(vf))
+               return false;
+
+#define ICE_VF_ADDED_VLAN_ZERO_FLTRS   1
+       return ((ice_vsi_num_non_zero_vlans(vsi) +
+               ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
+}
+
+/**
+ * ice_vc_process_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @add_v: Add VLAN if true, otherwise delete VLAN
+ *
+ * Process virtchnl op to add or remove programmed guest VLAN ID
+ */
+static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vlan_filter_list *vfl =
+           (struct virtchnl_vlan_filter_list *)msg;
+       struct ice_pf *pf = vf->pf;
+       bool vlan_promisc = false;
+       struct ice_vsi *vsi;
+       struct device *dev;
+       int status = 0;
+       int i;
+
+       dev = ice_pf_to_dev(pf);
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < vfl->num_elements; i++) {
+               if (vfl->vlan_id[i] >= VLAN_N_VID) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       dev_err(dev, "invalid VF VLAN id %d\n",
+                               vfl->vlan_id[i]);
+                       goto error_param;
+               }
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
+               dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+                        vf->vf_id);
+               /* There is no need to let VF know about being not trusted,
+                * so we can just return success message here
+                */
+               goto error_param;
+       }
+
+       /* in DVM a VF can add/delete inner VLAN filters when
+        * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
+        */
+       if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       /* in DVM VLAN promiscuous is based on the outer VLAN, which would be
+        * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
+        * allow vlan_promisc = true in SVM and if no port VLAN is configured
+        */
+       vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
+               !ice_is_dvm_ena(&pf->hw) &&
+               !ice_vf_is_port_vlan_ena(vf);
+
+       if (add_v) {
+               for (i = 0; i < vfl->num_elements; i++) {
+                       u16 vid = vfl->vlan_id[i];
+                       struct ice_vlan vlan;
+
+                       if (ice_vf_has_max_vlans(vf, vsi)) {
+                               dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+                                        vf->vf_id);
+                               /* There is no need to let VF know about being
+                                * not trusted, so we can just return success
+                                * message here as well.
+                                */
+                               goto error_param;
+                       }
+
+                       /* we add VLAN 0 by default for each VF so we can enable
+                        * Tx VLAN anti-spoof without triggering MDD events so
+                        * we don't need to add it again here
+                        */
+                       if (!vid)
+                               continue;
+
+                       vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
+                       status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
+                       if (status) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+
+                       /* Enable VLAN filtering on first non-zero VLAN */
+                       if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+                               if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
+                                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                                       dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
+                                               vid, status);
+                                       goto error_param;
+                               }
+                       } else if (vlan_promisc) {
+                               status = ice_vf_ena_vlan_promisc(vsi, &vlan);
+                               if (status) {
+                                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                                       dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
+                                               vid, status);
+                               }
+                       }
+               }
+       } else {
+               /* In case of non_trusted VF, number of VLAN elements passed
+                * to PF for removal might be greater than number of VLANs
+                * filter programmed for that VF - So, use actual number of
+                * VLANS added earlier with add VLAN opcode. In order to avoid
+                * removing VLAN that doesn't exist, which result to sending
+                * erroneous failed message back to the VF
+                */
+               int num_vf_vlan;
+
+               num_vf_vlan = vsi->num_vlan;
+               for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
+                       u16 vid = vfl->vlan_id[i];
+                       struct ice_vlan vlan;
+
+                       /* we add VLAN 0 by default for each VF so we can enable
+                        * Tx VLAN anti-spoof without triggering MDD events so
+                        * we don't want a VIRTCHNL request to remove it
+                        */
+                       if (!vid)
+                               continue;
+
+                       vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
+                       status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
+                       if (status) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+
+                       /* Disable VLAN filtering when only VLAN 0 is left */
+                       if (!ice_vsi_has_non_zero_vlans(vsi))
+                               vsi->inner_vlan_ops.dis_rx_filtering(vsi);
+
+                       if (vlan_promisc)
+                               ice_vf_dis_vlan_promisc(vsi, &vlan);
+               }
+       }
+
+error_param:
+       /* send the response to the VF */
+       if (add_v)
+               return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
+                                            NULL, 0);
+       else
+               return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
+                                            NULL, 0);
+}
+
+/**
+ * ice_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Add and program guest VLAN ID
+ */
+static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+       return ice_vc_process_vlan_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove programmed guest VLAN ID
+ */
+static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+       return ice_vc_process_vlan_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_ena_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Enable VLAN header stripping for a given VF
+ */
+static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+error_param:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+                                    v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Disable VLAN header stripping for a given VF
+ */
+static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (vsi->inner_vlan_ops.dis_stripping(vsi))
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+error_param:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+                                    v_ret, NULL, 0);
+}
+
+/**
+ * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
+ * @vf: VF to enable/disable VLAN stripping for on initialization
+ *
+ * Set the default for VLAN stripping based on whether a port VLAN is configured
+ * and the current VLAN mode of the device.
+ */
+static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
+{
+       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+       if (!vsi)
+               return -EINVAL;
+
+       /* don't modify stripping if port VLAN is configured in SVM since the
+        * port VLAN is based on the inner/single VLAN in SVM
+        */
+       if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
+               return 0;
+
+       if (ice_vf_vlan_offload_ena(vf->driver_caps))
+               return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
+       else
+               return vsi->inner_vlan_ops.dis_stripping(vsi);
+}
+
+static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
+{
+       if (vf->trusted)
+               return VLAN_N_VID;
+       else
+               return ICE_MAX_VLAN_PER_VF;
+}
+
+/**
+ * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
+ * @vf: VF that being checked for
+ *
+ * When the device is in double VLAN mode, check whether or not the outer VLAN
+ * is allowed.
+ */
+static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
+{
+       if (ice_vf_is_port_vlan_ena(vf))
+               return true;
+
+       return false;
+}
+
+/**
+ * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
+ * @vf: VF that capabilities are being set for
+ * @caps: VLAN capabilities to populate
+ *
+ * Determine VLAN capabilities support based on whether a port VLAN is
+ * configured. If a port VLAN is configured then the VF should use the inner
+ * filtering/offload capabilities since the port VLAN is using the outer VLAN
+ * capabilies.
+ */
+static void
+ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
+{
+       struct virtchnl_vlan_supported_caps *supported_caps;
+
+       if (ice_vf_outer_vlan_not_allowed(vf)) {
+               /* until support for inner VLAN filtering is added when a port
+                * VLAN is configured, only support software offloaded inner
+                * VLANs when a port VLAN is confgured in DVM
+                */
+               supported_caps = &caps->filtering.filtering_support;
+               supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+
+               supported_caps = &caps->offloads.stripping_support;
+               supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+                                       VIRTCHNL_VLAN_TOGGLE |
+                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+               supported_caps = &caps->offloads.insertion_support;
+               supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+                                       VIRTCHNL_VLAN_TOGGLE |
+                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+               caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+               caps->offloads.ethertype_match =
+                       VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+       } else {
+               supported_caps = &caps->filtering.filtering_support;
+               supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+               supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+                                       VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+                                       VIRTCHNL_VLAN_ETHERTYPE_9100 |
+                                       VIRTCHNL_VLAN_ETHERTYPE_AND;
+               caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+                                                VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+                                                VIRTCHNL_VLAN_ETHERTYPE_9100;
+
+               supported_caps = &caps->offloads.stripping_support;
+               supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
+                                       VIRTCHNL_VLAN_ETHERTYPE_8100 |
+                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+               supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
+                                       VIRTCHNL_VLAN_ETHERTYPE_8100 |
+                                       VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+                                       VIRTCHNL_VLAN_ETHERTYPE_9100 |
+                                       VIRTCHNL_VLAN_ETHERTYPE_XOR |
+                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
+
+               supported_caps = &caps->offloads.insertion_support;
+               supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
+                                       VIRTCHNL_VLAN_ETHERTYPE_8100 |
+                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+               supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
+                                       VIRTCHNL_VLAN_ETHERTYPE_8100 |
+                                       VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+                                       VIRTCHNL_VLAN_ETHERTYPE_9100 |
+                                       VIRTCHNL_VLAN_ETHERTYPE_XOR |
+                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
+
+               caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+               caps->offloads.ethertype_match =
+                       VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+       }
+
+       caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
+}
+
+/**
+ * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
+ * @vf: VF that capabilities are being set for
+ * @caps: VLAN capabilities to populate
+ *
+ * Determine VLAN capabilities support based on whether a port VLAN is
+ * configured. If a port VLAN is configured then the VF does not have any VLAN
+ * filtering or offload capabilities since the port VLAN is using the inner VLAN
+ * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
+ * VLAN fitlering and offload capabilities.
+ */
+static void
+ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
+{
+       struct virtchnl_vlan_supported_caps *supported_caps;
+
+       if (ice_vf_is_port_vlan_ena(vf)) {
+               supported_caps = &caps->filtering.filtering_support;
+               supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+               supported_caps = &caps->offloads.stripping_support;
+               supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+               supported_caps = &caps->offloads.insertion_support;
+               supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+               caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
+               caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
+               caps->filtering.max_filters = 0;
+       } else {
+               supported_caps = &caps->filtering.filtering_support;
+               supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
+               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+               caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+               supported_caps = &caps->offloads.stripping_support;
+               supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+                                       VIRTCHNL_VLAN_TOGGLE |
+                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+               supported_caps = &caps->offloads.insertion_support;
+               supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+                                       VIRTCHNL_VLAN_TOGGLE |
+                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+               caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+               caps->offloads.ethertype_match =
+                       VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+               caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
+       }
+}
+
+/**
+ * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
+ * @vf: VF to determine VLAN capabilities for
+ *
+ * This will only be called if the VF and PF successfully negotiated
+ * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
+ *
+ * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
+ * is configured or not.
+ */
+static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vlan_caps *caps = NULL;
+       int err, len = 0;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       caps = kzalloc(sizeof(*caps), GFP_KERNEL);
+       if (!caps) {
+               v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+               goto out;
+       }
+       len = sizeof(*caps);
+
+       if (ice_is_dvm_ena(&vf->pf->hw))
+               ice_vc_set_dvm_caps(vf, caps);
+       else
+               ice_vc_set_svm_caps(vf, caps);
+
+       /* store negotiated caps to prevent invalid VF messages */
+       memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
+
+out:
+       err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
+                                   v_ret, (u8 *)caps, len);
+       kfree(caps);
+       return err;
+}
+
+/**
+ * ice_vc_validate_vlan_tpid - validate VLAN TPID
+ * @filtering_caps: negotiated/supported VLAN filtering capabilities
+ * @tpid: VLAN TPID used for validation
+ *
+ * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
+ * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
+ */
+static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
+{
+       enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
+
+       switch (tpid) {
+       case ETH_P_8021Q:
+               vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
+               break;
+       case ETH_P_8021AD:
+               vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
+               break;
+       case ETH_P_QINQ1:
+               vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
+               break;
+       }
+
+       if (!(filtering_caps & vlan_ethertype))
+               return false;
+
+       return true;
+}
+
+/**
+ * ice_vc_is_valid_vlan - validate the virtchnl_vlan
+ * @vc_vlan: virtchnl_vlan to validate
+ *
+ * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
+ * false. Otherwise return true.
+ */
+static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
+{
+       if (!vc_vlan->tci || !vc_vlan->tpid)
+               return false;
+
+       return true;
+}
+
+/**
+ * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
+ * @vfc: negotiated/supported VLAN filtering capabilities
+ * @vfl: VLAN filter list from VF to validate
+ *
+ * Validate all of the filters in the VLAN filter list from the VF. If any of
+ * the checks fail then return false. Otherwise return true.
+ */
+static bool
+ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
+                                struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+       u16 i;
+
+       if (!vfl->num_elements)
+               return false;
+
+       for (i = 0; i < vfl->num_elements; i++) {
+               struct virtchnl_vlan_supported_caps *filtering_support =
+                       &vfc->filtering_support;
+               struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+               struct virtchnl_vlan *outer = &vlan_fltr->outer;
+               struct virtchnl_vlan *inner = &vlan_fltr->inner;
+
+               if ((ice_vc_is_valid_vlan(outer) &&
+                    filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
+                   (ice_vc_is_valid_vlan(inner) &&
+                    filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
+                       return false;
+
+               if ((outer->tci_mask &&
+                    !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
+                   (inner->tci_mask &&
+                    !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
+                       return false;
+
+               if (((outer->tci & VLAN_PRIO_MASK) &&
+                    !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
+                   ((inner->tci & VLAN_PRIO_MASK) &&
+                    !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
+                       return false;
+
+               if ((ice_vc_is_valid_vlan(outer) &&
+                    !ice_vc_validate_vlan_tpid(filtering_support->outer,
+                                               outer->tpid)) ||
+                   (ice_vc_is_valid_vlan(inner) &&
+                    !ice_vc_validate_vlan_tpid(filtering_support->inner,
+                                               inner->tpid)))
+                       return false;
+       }
+
+       return true;
+}
+
+/**
+ * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
+ * @vc_vlan: struct virtchnl_vlan to transform
+ */
+static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
+{
+       struct ice_vlan vlan = { 0 };
+
+       vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+       vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
+       vlan.tpid = vc_vlan->tpid;
+
+       return vlan;
+}
+
+/**
+ * ice_vc_vlan_action - action to perform on the virthcnl_vlan
+ * @vsi: VF's VSI used to perform the action
+ * @vlan_action: function to perform the action with (i.e. add/del)
+ * @vlan: VLAN filter to perform the action with
+ */
+static int
+ice_vc_vlan_action(struct ice_vsi *vsi,
+                  int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
+                  struct ice_vlan *vlan)
+{
+       int err;
+
+       err = vlan_action(vsi, vlan);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+/**
+ * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
+ * @vf: VF used to delete the VLAN(s)
+ * @vsi: VF's VSI used to delete the VLAN(s)
+ * @vfl: virthchnl filter list used to delete the filters
+ */
+static int
+ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
+                struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+       bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
+       int err;
+       u16 i;
+
+       for (i = 0; i < vfl->num_elements; i++) {
+               struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+               struct virtchnl_vlan *vc_vlan;
+
+               vc_vlan = &vlan_fltr->outer;
+               if (ice_vc_is_valid_vlan(vc_vlan)) {
+                       struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+                       err = ice_vc_vlan_action(vsi,
+                                                vsi->outer_vlan_ops.del_vlan,
+                                                &vlan);
+                       if (err)
+                               return err;
+
+                       if (vlan_promisc)
+                               ice_vf_dis_vlan_promisc(vsi, &vlan);
+               }
+
+               vc_vlan = &vlan_fltr->inner;
+               if (ice_vc_is_valid_vlan(vc_vlan)) {
+                       struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+                       err = ice_vc_vlan_action(vsi,
+                                                vsi->inner_vlan_ops.del_vlan,
+                                                &vlan);
+                       if (err)
+                               return err;
+
+                       /* no support for VLAN promiscuous on inner VLAN unless
+                        * we are in Single VLAN Mode (SVM)
+                        */
+                       if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
+                               ice_vf_dis_vlan_promisc(vsi, &vlan);
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ */
+static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_vlan_filter_list_v2 *vfl =
+               (struct virtchnl_vlan_filter_list_v2 *)msg;
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct ice_vsi *vsi;
+
+       if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
+                                             vfl)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       if (ice_vc_del_vlans(vf, vsi, vfl))
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+out:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
+                                    0);
+}
+
+/**
+ * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
+ * @vf: VF used to add the VLAN(s)
+ * @vsi: VF's VSI used to add the VLAN(s)
+ * @vfl: virthchnl filter list used to add the filters
+ */
+static int
+ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
+                struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+       bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
+       int err;
+       u16 i;
+
+       for (i = 0; i < vfl->num_elements; i++) {
+               struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+               struct virtchnl_vlan *vc_vlan;
+
+               vc_vlan = &vlan_fltr->outer;
+               if (ice_vc_is_valid_vlan(vc_vlan)) {
+                       struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+                       err = ice_vc_vlan_action(vsi,
+                                                vsi->outer_vlan_ops.add_vlan,
+                                                &vlan);
+                       if (err)
+                               return err;
+
+                       if (vlan_promisc) {
+                               err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+                               if (err)
+                                       return err;
+                       }
+               }
+
+               vc_vlan = &vlan_fltr->inner;
+               if (ice_vc_is_valid_vlan(vc_vlan)) {
+                       struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+                       err = ice_vc_vlan_action(vsi,
+                                                vsi->inner_vlan_ops.add_vlan,
+                                                &vlan);
+                       if (err)
+                               return err;
+
+                       /* no support for VLAN promiscuous on inner VLAN unless
+                        * we are in Single VLAN Mode (SVM)
+                        */
+                       if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
+                               err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+                               if (err)
+                                       return err;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
+ * @vsi: VF VSI used to get number of existing VLAN filters
+ * @vfc: negotiated/supported VLAN filtering capabilities
+ * @vfl: VLAN filter list from VF to validate
+ *
+ * Validate all of the filters in the VLAN filter list from the VF during the
+ * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
+ * Otherwise return true.
+ */
+static bool
+ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
+                                    struct virtchnl_vlan_filtering_caps *vfc,
+                                    struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+       u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
+
+       if (num_requested_filters > vfc->max_filters)
+               return false;
+
+       return ice_vc_validate_vlan_filter_list(vfc, vfl);
+}
+
+/**
+ * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ */
+static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vlan_filter_list_v2 *vfl =
+               (struct virtchnl_vlan_filter_list_v2 *)msg;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       if (!ice_vc_validate_add_vlan_filter_list(vsi,
+                                                 &vf->vlan_v2_caps.filtering,
+                                                 vfl)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       if (ice_vc_add_vlans(vf, vsi, vfl))
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+out:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
+                                    0);
+}
+
+/**
+ * ice_vc_valid_vlan_setting - validate VLAN setting
+ * @negotiated_settings: negotiated VLAN settings during VF init
+ * @ethertype_setting: ethertype(s) requested for the VLAN setting
+ */
+static bool
+ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
+{
+       if (ethertype_setting && !(negotiated_settings & ethertype_setting))
+               return false;
+
+       /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
+        * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
+        */
+       if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
+           hweight32(ethertype_setting) > 1)
+               return false;
+
+       /* ability to modify the VLAN setting was not negotiated */
+       if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
+               return false;
+
+       return true;
+}
+
+/**
+ * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
+ * @caps: negotiated VLAN settings during VF init
+ * @msg: message to validate
+ *
+ * Used to validate any VLAN virtchnl message sent as a
+ * virtchnl_vlan_setting structure. Validates the message against the
+ * negotiated/supported caps during VF driver init.
+ */
+static bool
+ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
+                             struct virtchnl_vlan_setting *msg)
+{
+       if ((!msg->outer_ethertype_setting &&
+            !msg->inner_ethertype_setting) ||
+           (!caps->outer && !caps->inner))
+               return false;
+
+       if (msg->outer_ethertype_setting &&
+           !ice_vc_valid_vlan_setting(caps->outer,
+                                      msg->outer_ethertype_setting))
+               return false;
+
+       if (msg->inner_ethertype_setting &&
+           !ice_vc_valid_vlan_setting(caps->inner,
+                                      msg->inner_ethertype_setting))
+               return false;
+
+       return true;
+}
+
+/**
+ * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
+ * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
+ * @tpid: VLAN TPID to populate
+ */
+static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
+{
+       switch (ethertype_setting) {
+       case VIRTCHNL_VLAN_ETHERTYPE_8100:
+               *tpid = ETH_P_8021Q;
+               break;
+       case VIRTCHNL_VLAN_ETHERTYPE_88A8:
+               *tpid = ETH_P_8021AD;
+               break;
+       case VIRTCHNL_VLAN_ETHERTYPE_9100:
+               *tpid = ETH_P_QINQ1;
+               break;
+       default:
+               *tpid = 0;
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
+ * @vsi: VF's VSI used to enable the VLAN offload
+ * @ena_offload: function used to enable the VLAN offload
+ * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
+ */
+static int
+ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
+                       int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
+                       u32 ethertype_setting)
+{
+       u16 tpid;
+       int err;
+
+       err = ice_vc_get_tpid(ethertype_setting, &tpid);
+       if (err)
+               return err;
+
+       err = ena_offload(vsi, tpid);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
+#define ICE_L2TSEL_BIT_OFFSET          23
+enum ice_l2tsel {
+       ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
+       ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
+};
+
+/**
+ * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
+ * @vsi: VSI used to update l2tsel on
+ * @l2tsel: l2tsel setting requested
+ *
+ * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
+ * This will modify which descriptor field the first offloaded VLAN will be
+ * stripped into.
+ */
+static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
+{
+       struct ice_hw *hw = &vsi->back->hw;
+       u32 l2tsel_bit;
+       int i;
+
+       if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
+               l2tsel_bit = 0;
+       else
+               l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
+
+       for (i = 0; i < vsi->alloc_rxq; i++) {
+               u16 pfq = vsi->rxq_map[i];
+               u32 qrx_context_offset;
+               u32 regval;
+
+               qrx_context_offset =
+                       QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
+
+               regval = rd32(hw, qrx_context_offset);
+               regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
+               regval |= l2tsel_bit;
+               wr32(hw, qrx_context_offset, regval);
+       }
+}
+
+/**
+ * ice_vc_ena_vlan_stripping_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ */
+static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vlan_supported_caps *stripping_support;
+       struct virtchnl_vlan_setting *strip_msg =
+               (struct virtchnl_vlan_setting *)msg;
+       u32 ethertype_setting;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
+       if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       ethertype_setting = strip_msg->outer_ethertype_setting;
+       if (ethertype_setting) {
+               if (ice_vc_ena_vlan_offload(vsi,
+                                           vsi->outer_vlan_ops.ena_stripping,
+                                           ethertype_setting)) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto out;
+               } else {
+                       enum ice_l2tsel l2tsel =
+                               ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
+
+                       /* PF tells the VF that the outer VLAN tag is always
+                        * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
+                        * inner is always extracted to
+                        * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
+                        * support outer stripping so the first tag always ends
+                        * up in L2TAG2_2ND and the second/inner tag, if
+                        * enabled, is extracted in L2TAG1.
+                        */
+                       ice_vsi_update_l2tsel(vsi, l2tsel);
+               }
+       }
+
+       ethertype_setting = strip_msg->inner_ethertype_setting;
+       if (ethertype_setting &&
+           ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
+                                   ethertype_setting)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+out:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
+                                    v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ */
+static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vlan_supported_caps *stripping_support;
+       struct virtchnl_vlan_setting *strip_msg =
+               (struct virtchnl_vlan_setting *)msg;
+       u32 ethertype_setting;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
+       if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       ethertype_setting = strip_msg->outer_ethertype_setting;
+       if (ethertype_setting) {
+               if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto out;
+               } else {
+                       enum ice_l2tsel l2tsel =
+                               ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
+
+                       /* PF tells the VF that the outer VLAN tag is always
+                        * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
+                        * inner is always extracted to
+                        * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
+                        * support inner stripping while outer stripping is
+                        * disabled so that the first and only tag is extracted
+                        * in L2TAG1.
+                        */
+                       ice_vsi_update_l2tsel(vsi, l2tsel);
+               }
+       }
+
+       ethertype_setting = strip_msg->inner_ethertype_setting;
+       if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+out:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
+                                    v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_ena_vlan_insertion_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ */
+static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vlan_supported_caps *insertion_support;
+       struct virtchnl_vlan_setting *insertion_msg =
+               (struct virtchnl_vlan_setting *)msg;
+       u32 ethertype_setting;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
+       if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       ethertype_setting = insertion_msg->outer_ethertype_setting;
+       if (ethertype_setting &&
+           ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
+                                   ethertype_setting)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       ethertype_setting = insertion_msg->inner_ethertype_setting;
+       if (ethertype_setting &&
+           ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
+                                   ethertype_setting)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+out:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
+                                    v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_insertion_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ */
+static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vlan_supported_caps *insertion_support;
+       struct virtchnl_vlan_setting *insertion_msg =
+               (struct virtchnl_vlan_setting *)msg;
+       u32 ethertype_setting;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
+       if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       ethertype_setting = insertion_msg->outer_ethertype_setting;
+       if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+       ethertype_setting = insertion_msg->inner_ethertype_setting;
+       if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto out;
+       }
+
+out:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
+                                    v_ret, NULL, 0);
+}
+
+static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
+       .get_ver_msg = ice_vc_get_ver_msg,
+       .get_vf_res_msg = ice_vc_get_vf_res_msg,
+       .reset_vf = ice_vc_reset_vf_msg,
+       .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
+       .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
+       .cfg_qs_msg = ice_vc_cfg_qs_msg,
+       .ena_qs_msg = ice_vc_ena_qs_msg,
+       .dis_qs_msg = ice_vc_dis_qs_msg,
+       .request_qs_msg = ice_vc_request_qs_msg,
+       .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+       .config_rss_key = ice_vc_config_rss_key,
+       .config_rss_lut = ice_vc_config_rss_lut,
+       .get_stats_msg = ice_vc_get_stats_msg,
+       .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
+       .add_vlan_msg = ice_vc_add_vlan_msg,
+       .remove_vlan_msg = ice_vc_remove_vlan_msg,
+       .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+       .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
+       .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+       .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+       .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+       .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
+       .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
+       .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
+       .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
+       .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
+       .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
+       .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+};
+
+/**
+ * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
+ * @vf: the VF to switch ops
+ */
+void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
+{
+       vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
+}
+
+/**
+ * ice_vc_repr_add_mac
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * When port representors are created, we do not add MAC rule
+ * to firmware, we store it so that PF could report same
+ * MAC as VF.
+ */
+static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
+{
+       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_ether_addr_list *al =
+           (struct virtchnl_ether_addr_list *)msg;
+       struct ice_vsi *vsi;
+       struct ice_pf *pf;
+       int i;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+           !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto handle_mac_exit;
+       }
+
+       pf = vf->pf;
+
+       vsi = ice_get_vf_vsi(vf);
+       if (!vsi) {
+               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto handle_mac_exit;
+       }
+
+       for (i = 0; i < al->num_elements; i++) {
+               u8 *mac_addr = al->list[i].addr;
+               int result;
+
+               if (!is_unicast_ether_addr(mac_addr) ||
+                   ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
+                       continue;
+
+               if (vf->pf_set_mac) {
+                       dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
+                       v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+                       goto handle_mac_exit;
+               }
+
+               result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
+               if (result) {
+                       dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
+                               mac_addr, vf->vf_id, result);
+                       goto handle_mac_exit;
+               }
+
+               ice_vfhw_mac_add(vf, &al->list[i]);
+               vf->num_mac++;
+               break;
+       }
+
+handle_mac_exit:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+                                    v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_repr_del_mac - response with success for deleting MAC
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * Respond with success to not break normal VF flow.
+ * For legacy VF driver try to update cached MAC address.
+ */
+static int
+ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
+{
+       struct virtchnl_ether_addr_list *al =
+               (struct virtchnl_ether_addr_list *)msg;
+
+       ice_update_legacy_cached_mac(vf, &al->list[0]);
+
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+                                    VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
+{
+       dev_dbg(ice_pf_to_dev(vf->pf),
+               "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
+                                    VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
+{
+       dev_dbg(ice_pf_to_dev(vf->pf),
+               "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
+                                    VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
+{
+       dev_dbg(ice_pf_to_dev(vf->pf),
+               "Can't enable VLAN stripping in switchdev mode for VF %d\n",
+               vf->vf_id);
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+                                    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+                                    NULL, 0);
+}
+
+static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
+{
+       dev_dbg(ice_pf_to_dev(vf->pf),
+               "Can't disable VLAN stripping in switchdev mode for VF %d\n",
+               vf->vf_id);
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+                                    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+                                    NULL, 0);
+}
+
+static int
+ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
+{
+       dev_dbg(ice_pf_to_dev(vf->pf),
+               "Can't config promiscuous mode in switchdev mode for VF %d\n",
+               vf->vf_id);
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+                                    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+                                    NULL, 0);
+}
+
+static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
+       .get_ver_msg = ice_vc_get_ver_msg,
+       .get_vf_res_msg = ice_vc_get_vf_res_msg,
+       .reset_vf = ice_vc_reset_vf_msg,
+       .add_mac_addr_msg = ice_vc_repr_add_mac,
+       .del_mac_addr_msg = ice_vc_repr_del_mac,
+       .cfg_qs_msg = ice_vc_cfg_qs_msg,
+       .ena_qs_msg = ice_vc_ena_qs_msg,
+       .dis_qs_msg = ice_vc_dis_qs_msg,
+       .request_qs_msg = ice_vc_request_qs_msg,
+       .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+       .config_rss_key = ice_vc_config_rss_key,
+       .config_rss_lut = ice_vc_config_rss_lut,
+       .get_stats_msg = ice_vc_get_stats_msg,
+       .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
+       .add_vlan_msg = ice_vc_repr_add_vlan,
+       .remove_vlan_msg = ice_vc_repr_del_vlan,
+       .ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping,
+       .dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping,
+       .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+       .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+       .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+       .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
+       .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
+       .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
+       .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
+       .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
+       .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
+       .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+};
+
+/**
+ * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
+ * @vf: the VF to switch ops
+ */
+void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
+{
+       vf->virtchnl_ops = &ice_virtchnl_repr_ops;
+}
+
+/**
+ * ice_vc_process_vf_msg - Process request from VF
+ * @pf: pointer to the PF structure
+ * @event: pointer to the AQ event
+ *
+ * called from the common asq/arq handler to
+ * process request from VF
+ */
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+       u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
+       s16 vf_id = le16_to_cpu(event->desc.retval);
+       const struct ice_virtchnl_ops *ops;
+       u16 msglen = event->msg_len;
+       u8 *msg = event->msg_buf;
+       struct ice_vf *vf = NULL;
+       struct device *dev;
+       int err = 0;
+
+       dev = ice_pf_to_dev(pf);
+
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf) {
+               dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
+                       vf_id, v_opcode, msglen);
+               return;
+       }
+
+       /* Check if VF is disabled. */
+       if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
+               err = -EPERM;
+               goto error_handler;
+       }
+
+       ops = vf->virtchnl_ops;
+
+       /* Perform basic checks on the msg */
+       err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+       if (err) {
+               if (err == VIRTCHNL_STATUS_ERR_PARAM)
+                       err = -EPERM;
+               else
+                       err = -EINVAL;
+       }
+
+       if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
+               ice_vc_send_msg_to_vf(vf, v_opcode,
+                                     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
+                                     0);
+               ice_put_vf(vf);
+               return;
+       }
+
+error_handler:
+       if (err) {
+               ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
+                                     NULL, 0);
+               dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
+                       vf_id, v_opcode, msglen, err);
+               ice_put_vf(vf);
+               return;
+       }
+
+       /* VF is being configured in another context that triggers a VFR, so no
+        * need to process this message
+        */
+       if (!mutex_trylock(&vf->cfg_lock)) {
+               dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
+                        vf->vf_id);
+               ice_put_vf(vf);
+               return;
+       }
+
+       switch (v_opcode) {
+       case VIRTCHNL_OP_VERSION:
+               err = ops->get_ver_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_GET_VF_RESOURCES:
+               err = ops->get_vf_res_msg(vf, msg);
+               if (ice_vf_init_vlan_stripping(vf))
+                       dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
+                               vf->vf_id);
+               ice_vc_notify_vf_link_state(vf);
+               break;
+       case VIRTCHNL_OP_RESET_VF:
+               ops->reset_vf(vf);
+               break;
+       case VIRTCHNL_OP_ADD_ETH_ADDR:
+               err = ops->add_mac_addr_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_DEL_ETH_ADDR:
+               err = ops->del_mac_addr_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+               err = ops->cfg_qs_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_ENABLE_QUEUES:
+               err = ops->ena_qs_msg(vf, msg);
+               ice_vc_notify_vf_link_state(vf);
+               break;
+       case VIRTCHNL_OP_DISABLE_QUEUES:
+               err = ops->dis_qs_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_REQUEST_QUEUES:
+               err = ops->request_qs_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+               err = ops->cfg_irq_map_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_CONFIG_RSS_KEY:
+               err = ops->config_rss_key(vf, msg);
+               break;
+       case VIRTCHNL_OP_CONFIG_RSS_LUT:
+               err = ops->config_rss_lut(vf, msg);
+               break;
+       case VIRTCHNL_OP_GET_STATS:
+               err = ops->get_stats_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+               err = ops->cfg_promiscuous_mode_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_ADD_VLAN:
+               err = ops->add_vlan_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_DEL_VLAN:
+               err = ops->remove_vlan_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+               err = ops->ena_vlan_stripping(vf);
+               break;
+       case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+               err = ops->dis_vlan_stripping(vf);
+               break;
+       case VIRTCHNL_OP_ADD_FDIR_FILTER:
+               err = ops->add_fdir_fltr_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_DEL_FDIR_FILTER:
+               err = ops->del_fdir_fltr_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_ADD_RSS_CFG:
+               err = ops->handle_rss_cfg_msg(vf, msg, true);
+               break;
+       case VIRTCHNL_OP_DEL_RSS_CFG:
+               err = ops->handle_rss_cfg_msg(vf, msg, false);
+               break;
+       case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+               err = ops->get_offload_vlan_v2_caps(vf);
+               break;
+       case VIRTCHNL_OP_ADD_VLAN_V2:
+               err = ops->add_vlan_v2_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_DEL_VLAN_V2:
+               err = ops->remove_vlan_v2_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+               err = ops->ena_vlan_stripping_v2_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+               err = ops->dis_vlan_stripping_v2_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+               err = ops->ena_vlan_insertion_v2_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+               err = ops->dis_vlan_insertion_v2_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_UNKNOWN:
+       default:
+               dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
+                       vf_id);
+               err = ice_vc_send_msg_to_vf(vf, v_opcode,
+                                           VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+                                           NULL, 0);
+               break;
+       }
+       if (err) {
+               /* Helper function cares less about error return values here
+                * as it is busy with pending work.
+                */
+               dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
+                        vf_id, v_opcode, err);
+       }
+
+       mutex_unlock(&vf->cfg_lock);
+       ice_put_vf(vf);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
new file mode 100644 (file)
index 0000000..b5a3fd8
--- /dev/null
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_H_
+#define _ICE_VIRTCHNL_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/if_ether.h>
+#include <linux/avf/virtchnl.h>
+#include "ice_vf_lib.h"
+
+/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
+#define ICE_MAX_VLAN_PER_VF            8
+
+/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
+ * broadcast, and 16 for additional unicast/multicast filters
+ */
+#define ICE_MAX_MACADDR_PER_VF         18
+
+struct ice_virtchnl_ops {
+       int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+       int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+       void (*reset_vf)(struct ice_vf *vf);
+       int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+       int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+       int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
+       int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
+       int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
+       int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
+       int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
+       int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
+       int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+       int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
+       int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
+       int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
+       int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+       int (*ena_vlan_stripping)(struct ice_vf *vf);
+       int (*dis_vlan_stripping)(struct ice_vf *vf);
+       int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
+       int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+       int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+       int (*get_offload_vlan_v2_caps)(struct ice_vf *vf);
+       int (*add_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
+       int (*remove_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
+       int (*ena_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
+       int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
+       int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+       int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+};
+
+#ifdef CONFIG_PCI_IOV
+void ice_virtchnl_set_dflt_ops(struct ice_vf *vf);
+void ice_virtchnl_set_repr_ops(struct ice_vf *vf);
+void ice_vc_notify_vf_link_state(struct ice_vf *vf);
+void ice_vc_notify_link_state(struct ice_pf *pf);
+void ice_vc_notify_reset(struct ice_pf *pf);
+int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+                     enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
+#else /* CONFIG_PCI_IOV */
+static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
+static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
+static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
+static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
+static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+
+static inline int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+                     enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+       return false;
+}
+#endif /* !CONFIG_PCI_IOV */
+
+#endif /* _ICE_VIRTCHNL_H_ */
index 07989f1..8e38ee2 100644 (file)
@@ -5,6 +5,7 @@
 #include "ice_base.h"
 #include "ice_lib.h"
 #include "ice_flow.h"
+#include "ice_vf_lib_private.h"
 
 #define to_fltr_conf_from_desc(p) \
        container_of(p, struct virtchnl_fdir_fltr_conf, input)
index f4e629f..c5bcc8d 100644 (file)
@@ -6,6 +6,7 @@
 
 struct ice_vf;
 struct ice_pf;
+struct ice_vsi;
 
 enum ice_fdir_ctx_stat {
        ICE_FDIR_CTX_READY,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
deleted file mode 100644 (file)
index 4840570..0000000
+++ /dev/null
@@ -1,6631 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018, Intel Corporation. */
-
-#include "ice.h"
-#include "ice_base.h"
-#include "ice_lib.h"
-#include "ice_fltr.h"
-#include "ice_dcb_lib.h"
-#include "ice_flow.h"
-#include "ice_eswitch.h"
-#include "ice_virtchnl_allowlist.h"
-#include "ice_flex_pipe.h"
-#include "ice_vf_vsi_vlan_ops.h"
-#include "ice_vlan.h"
-
-#define FIELD_SELECTOR(proto_hdr_field) \
-               BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
-
-struct ice_vc_hdr_match_type {
-       u32 vc_hdr;     /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
-       u32 ice_hdr;    /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
-       {VIRTCHNL_PROTO_HDR_NONE,       ICE_FLOW_SEG_HDR_NONE},
-       {VIRTCHNL_PROTO_HDR_ETH,        ICE_FLOW_SEG_HDR_ETH},
-       {VIRTCHNL_PROTO_HDR_S_VLAN,     ICE_FLOW_SEG_HDR_VLAN},
-       {VIRTCHNL_PROTO_HDR_C_VLAN,     ICE_FLOW_SEG_HDR_VLAN},
-       {VIRTCHNL_PROTO_HDR_IPV4,       ICE_FLOW_SEG_HDR_IPV4 |
-                                       ICE_FLOW_SEG_HDR_IPV_OTHER},
-       {VIRTCHNL_PROTO_HDR_IPV6,       ICE_FLOW_SEG_HDR_IPV6 |
-                                       ICE_FLOW_SEG_HDR_IPV_OTHER},
-       {VIRTCHNL_PROTO_HDR_TCP,        ICE_FLOW_SEG_HDR_TCP},
-       {VIRTCHNL_PROTO_HDR_UDP,        ICE_FLOW_SEG_HDR_UDP},
-       {VIRTCHNL_PROTO_HDR_SCTP,       ICE_FLOW_SEG_HDR_SCTP},
-       {VIRTCHNL_PROTO_HDR_PPPOE,      ICE_FLOW_SEG_HDR_PPPOE},
-       {VIRTCHNL_PROTO_HDR_GTPU_IP,    ICE_FLOW_SEG_HDR_GTPU_IP},
-       {VIRTCHNL_PROTO_HDR_GTPU_EH,    ICE_FLOW_SEG_HDR_GTPU_EH},
-       {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
-                                       ICE_FLOW_SEG_HDR_GTPU_DWN},
-       {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
-                                       ICE_FLOW_SEG_HDR_GTPU_UP},
-       {VIRTCHNL_PROTO_HDR_L2TPV3,     ICE_FLOW_SEG_HDR_L2TPV3},
-       {VIRTCHNL_PROTO_HDR_ESP,        ICE_FLOW_SEG_HDR_ESP},
-       {VIRTCHNL_PROTO_HDR_AH,         ICE_FLOW_SEG_HDR_AH},
-       {VIRTCHNL_PROTO_HDR_PFCP,       ICE_FLOW_SEG_HDR_PFCP_SESSION},
-};
-
-struct ice_vc_hash_field_match_type {
-       u32 vc_hdr;             /* virtchnl headers
-                                * (VIRTCHNL_PROTO_HDR_XXX)
-                                */
-       u32 vc_hash_field;      /* virtchnl hash fields selector
-                                * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
-                                */
-       u64 ice_hash_field;     /* ice hash fields
-                                * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
-                                */
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
-       {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
-       {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
-       {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
-               ICE_FLOW_HASH_ETH},
-       {VIRTCHNL_PROTO_HDR_ETH,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
-       {VIRTCHNL_PROTO_HDR_S_VLAN,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
-       {VIRTCHNL_PROTO_HDR_C_VLAN,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
-       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
-       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
-       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
-               ICE_FLOW_HASH_IPV4},
-       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
-       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
-       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
-               ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
-       {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
-       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
-       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
-       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
-               ICE_FLOW_HASH_IPV6},
-       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
-       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
-       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
-               ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
-       {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
-       {VIRTCHNL_PROTO_HDR_TCP,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
-       {VIRTCHNL_PROTO_HDR_TCP,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
-       {VIRTCHNL_PROTO_HDR_TCP,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
-               ICE_FLOW_HASH_TCP_PORT},
-       {VIRTCHNL_PROTO_HDR_UDP,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
-       {VIRTCHNL_PROTO_HDR_UDP,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
-       {VIRTCHNL_PROTO_HDR_UDP,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
-               ICE_FLOW_HASH_UDP_PORT},
-       {VIRTCHNL_PROTO_HDR_SCTP,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
-       {VIRTCHNL_PROTO_HDR_SCTP,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
-       {VIRTCHNL_PROTO_HDR_SCTP,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
-               ICE_FLOW_HASH_SCTP_PORT},
-       {VIRTCHNL_PROTO_HDR_PPPOE,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
-       {VIRTCHNL_PROTO_HDR_GTPU_IP,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
-       {VIRTCHNL_PROTO_HDR_L2TPV3,
-               FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
-       {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
-       {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
-       {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
-               BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
-};
-
-/**
- * ice_get_vf_vsi - get VF's VSI based on the stored index
- * @vf: VF used to get VSI
- */
-struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
-{
-       return vf->pf->vsi[vf->lan_vsi_idx];
-}
-
-/**
- * ice_get_vf_by_id - Get pointer to VF by ID
- * @pf: the PF private structure
- * @vf_id: the VF ID to locate
- *
- * Locate and return a pointer to the VF structure associated with a given ID.
- * Returns NULL if the ID does not have a valid VF structure associated with
- * it.
- *
- * This function takes a reference to the VF, which must be released by
- * calling ice_put_vf() once the caller is finished accessing the VF structure
- * returned.
- */
-struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
-{
-       struct ice_vf *vf;
-
-       rcu_read_lock();
-       hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
-               if (vf->vf_id == vf_id) {
-                       struct ice_vf *found;
-
-                       if (kref_get_unless_zero(&vf->refcnt))
-                               found = vf;
-                       else
-                               found = NULL;
-
-                       rcu_read_unlock();
-                       return found;
-               }
-       }
-       rcu_read_unlock();
-
-       return NULL;
-}
-
-/**
- * ice_release_vf - Release VF associated with a refcount
- * @ref: the kref decremented to zero
- *
- * Callback function for kref_put to release a VF once its reference count has
- * hit zero.
- */
-static void ice_release_vf(struct kref *ref)
-{
-       struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
-
-       mutex_destroy(&vf->cfg_lock);
-
-       kfree_rcu(vf, rcu);
-}
-
-/**
- * ice_put_vf - Release a reference to a VF
- * @vf: the VF structure to decrease reference count on
- *
- * This must be called after ice_get_vf_by_id() once the reference to the VF
- * structure is no longer used. Otherwise, the VF structure will never be
- * freed.
- */
-void ice_put_vf(struct ice_vf *vf)
-{
-       kref_put(&vf->refcnt, ice_release_vf);
-}
-
-/**
- * ice_has_vfs - Return true if the PF has any associated VFs
- * @pf: the PF private structure
- *
- * Return whether or not the PF has any allocated VFs.
- *
- * Note that this function only guarantees that there are no VFs at the point
- * of calling it. It does not guarantee that no more VFs will be added.
- */
-bool ice_has_vfs(struct ice_pf *pf)
-{
-       /* A simple check that the hash table is not empty does not require
-        * the mutex or rcu_read_lock.
-        */
-       return !hash_empty(pf->vfs.table);
-}
-
-/**
- * ice_get_num_vfs - Get number of allocated VFs
- * @pf: the PF private structure
- *
- * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
- * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
- * the output of this function.
- */
-u16 ice_get_num_vfs(struct ice_pf *pf)
-{
-       struct ice_vf *vf;
-       unsigned int bkt;
-       u16 num_vfs = 0;
-
-       rcu_read_lock();
-       ice_for_each_vf_rcu(pf, bkt, vf)
-               num_vfs++;
-       rcu_read_unlock();
-
-       return num_vfs;
-}
-
-/**
- * ice_check_vf_init - helper to check if VF init complete
- * @pf: pointer to the PF structure
- * @vf: the pointer to the VF to check
- */
-static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
-{
-       if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
-               dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
-                       vf->vf_id);
-               return -EBUSY;
-       }
-       return 0;
-}
-
-/**
- * ice_free_vf_entries - Free all VF entries from the hash table
- * @pf: pointer to the PF structure
- *
- * Iterate over the VF hash table, removing and releasing all VF entries.
- * Called during VF teardown or as cleanup during failed VF initialization.
- */
-static void ice_free_vf_entries(struct ice_pf *pf)
-{
-       struct ice_vfs *vfs = &pf->vfs;
-       struct hlist_node *tmp;
-       struct ice_vf *vf;
-       unsigned int bkt;
-
-       /* Remove all VFs from the hash table and release their main
-        * reference. Once all references to the VF are dropped, ice_put_vf()
-        * will call ice_release_vf which will remove the VF memory.
-        */
-       lockdep_assert_held(&vfs->table_lock);
-
-       hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
-               hash_del_rcu(&vf->entry);
-               ice_put_vf(vf);
-       }
-}
-
-/**
- * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
- * @pf: pointer to the PF structure
- * @v_opcode: operation code
- * @v_retval: return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- */
-static void
-ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
-                   enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
-{
-       struct ice_hw *hw = &pf->hw;
-       struct ice_vf *vf;
-       unsigned int bkt;
-
-       mutex_lock(&pf->vfs.table_lock);
-       ice_for_each_vf(pf, bkt, vf) {
-               /* Not all vfs are enabled so skip the ones that are not */
-               if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
-                   !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
-                       continue;
-
-               /* Ignore return value on purpose - a given VF may fail, but
-                * we need to keep going and send to all of them
-                */
-               ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
-                                     msglen, NULL);
-       }
-       mutex_unlock(&pf->vfs.table_lock);
-}
-
-/**
- * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
- * @vf: pointer to the VF structure
- * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
- * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
- * @link_up: whether or not to set the link up/down
- */
-static void
-ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
-                int ice_link_speed, bool link_up)
-{
-       if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
-               pfe->event_data.link_event_adv.link_status = link_up;
-               /* Speed in Mbps */
-               pfe->event_data.link_event_adv.link_speed =
-                       ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
-       } else {
-               pfe->event_data.link_event.link_status = link_up;
-               /* Legacy method for virtchnl link speeds */
-               pfe->event_data.link_event.link_speed =
-                       (enum virtchnl_link_speed)
-                       ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
-       }
-}
-
-/**
- * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
- * @vf: the VF to check
- *
- * Returns true if the VF has no Rx and no Tx queues enabled and returns false
- * otherwise
- */
-static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
-{
-       return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
-               !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
-}
-
-/**
- * ice_is_vf_link_up - check if the VF's link is up
- * @vf: VF to check if link is up
- */
-static bool ice_is_vf_link_up(struct ice_vf *vf)
-{
-       struct ice_pf *pf = vf->pf;
-
-       if (ice_check_vf_init(pf, vf))
-               return false;
-
-       if (ice_vf_has_no_qs_ena(vf))
-               return false;
-       else if (vf->link_forced)
-               return vf->link_up;
-       else
-               return pf->hw.port_info->phy.link_info.link_info &
-                       ICE_AQ_LINK_UP;
-}
-
-/**
- * ice_vc_notify_vf_link_state - Inform a VF of link status
- * @vf: pointer to the VF structure
- *
- * send a link status message to a single VF
- */
-void ice_vc_notify_vf_link_state(struct ice_vf *vf)
-{
-       struct virtchnl_pf_event pfe = { 0 };
-       struct ice_hw *hw = &vf->pf->hw;
-
-       pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
-       pfe.severity = PF_EVENT_SEVERITY_INFO;
-
-       if (ice_is_vf_link_up(vf))
-               ice_set_pfe_link(vf, &pfe,
-                                hw->port_info->phy.link_info.link_speed, true);
-       else
-               ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
-
-       ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
-                             VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
-                             sizeof(pfe), NULL);
-}
-
-/**
- * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
- * @vf: VF to remove access to VSI for
- */
-static void ice_vf_invalidate_vsi(struct ice_vf *vf)
-{
-       vf->lan_vsi_idx = ICE_NO_VSI;
-       vf->lan_vsi_num = ICE_NO_VSI;
-}
-
-/**
- * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
- * @vf: invalidate this VF's VSI after freeing it
- */
-static void ice_vf_vsi_release(struct ice_vf *vf)
-{
-       ice_vsi_release(ice_get_vf_vsi(vf));
-       ice_vf_invalidate_vsi(vf);
-}
-
-/**
- * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
- * @vf: VF that control VSI is being invalidated on
- */
-static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
-{
-       vf->ctrl_vsi_idx = ICE_NO_VSI;
-}
-
-/**
- * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
- * @vf: VF that control VSI is being released on
- */
-static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
-{
-       ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
-       ice_vf_ctrl_invalidate_vsi(vf);
-}
-
-/**
- * ice_free_vf_res - Free a VF's resources
- * @vf: pointer to the VF info
- */
-static void ice_free_vf_res(struct ice_vf *vf)
-{
-       struct ice_pf *pf = vf->pf;
-       int i, last_vector_idx;
-
-       /* First, disable VF's configuration API to prevent OS from
-        * accessing the VF's VSI after it's freed or invalidated.
-        */
-       clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
-       ice_vf_fdir_exit(vf);
-       /* free VF control VSI */
-       if (vf->ctrl_vsi_idx != ICE_NO_VSI)
-               ice_vf_ctrl_vsi_release(vf);
-
-       /* free VSI and disconnect it from the parent uplink */
-       if (vf->lan_vsi_idx != ICE_NO_VSI) {
-               ice_vf_vsi_release(vf);
-               vf->num_mac = 0;
-       }
-
-       last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
-
-       /* clear VF MDD event information */
-       memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
-       memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
-
-       /* Disable interrupts so that VF starts in a known state */
-       for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
-               wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
-               ice_flush(&pf->hw);
-       }
-       /* reset some of the state variables keeping track of the resources */
-       clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
-       clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
-}
-
-/**
- * ice_dis_vf_mappings
- * @vf: pointer to the VF structure
- */
-static void ice_dis_vf_mappings(struct ice_vf *vf)
-{
-       struct ice_pf *pf = vf->pf;
-       struct ice_vsi *vsi;
-       struct device *dev;
-       int first, last, v;
-       struct ice_hw *hw;
-
-       hw = &pf->hw;
-       vsi = ice_get_vf_vsi(vf);
-
-       dev = ice_pf_to_dev(pf);
-       wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
-       wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
-
-       first = vf->first_vector_idx;
-       last = first + pf->vfs.num_msix_per - 1;
-       for (v = first; v <= last; v++) {
-               u32 reg;
-
-               reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
-                       GLINT_VECT2FUNC_IS_PF_M) |
-                      ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
-                       GLINT_VECT2FUNC_PF_NUM_M));
-               wr32(hw, GLINT_VECT2FUNC(v), reg);
-       }
-
-       if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
-               wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
-       else
-               dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
-
-       if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
-               wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
-       else
-               dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
-}
-
-/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
-       struct ice_res_tracker *res;
-
-       if (!pf)
-               return -EINVAL;
-
-       res = pf->irq_tracker;
-       if (!res)
-               return -EINVAL;
-
-       /* give back irq_tracker resources used */
-       WARN_ON(pf->sriov_base_vector < res->num_entries);
-
-       pf->sriov_base_vector = 0;
-
-       return 0;
-}
-
-/**
- * ice_set_vf_state_qs_dis - Set VF queues state to disabled
- * @vf: pointer to the VF structure
- */
-void ice_set_vf_state_qs_dis(struct ice_vf *vf)
-{
-       /* Clear Rx/Tx enabled queues flag */
-       bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
-       bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
-       clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-}
-
-/**
- * ice_dis_vf_qs - Disable the VF queues
- * @vf: pointer to the VF structure
- */
-static void ice_dis_vf_qs(struct ice_vf *vf)
-{
-       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
-       ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
-       ice_vsi_stop_all_rx_rings(vsi);
-       ice_set_vf_state_qs_dis(vf);
-}
-
-/**
- * ice_free_vfs - Free all VFs
- * @pf: pointer to the PF structure
- */
-void ice_free_vfs(struct ice_pf *pf)
-{
-       struct device *dev = ice_pf_to_dev(pf);
-       struct ice_vfs *vfs = &pf->vfs;
-       struct ice_hw *hw = &pf->hw;
-       struct ice_vf *vf;
-       unsigned int bkt;
-
-       if (!ice_has_vfs(pf))
-               return;
-
-       while (test_and_set_bit(ICE_VF_DIS, pf->state))
-               usleep_range(1000, 2000);
-
-       /* Disable IOV before freeing resources. This lets any VF drivers
-        * running in the host get themselves cleaned up before we yank
-        * the carpet out from underneath their feet.
-        */
-       if (!pci_vfs_assigned(pf->pdev))
-               pci_disable_sriov(pf->pdev);
-       else
-               dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
-
-       mutex_lock(&vfs->table_lock);
-
-       ice_eswitch_release(pf);
-
-       ice_for_each_vf(pf, bkt, vf) {
-               mutex_lock(&vf->cfg_lock);
-
-               ice_dis_vf_qs(vf);
-
-               if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
-                       /* disable VF qp mappings and set VF disable state */
-                       ice_dis_vf_mappings(vf);
-                       set_bit(ICE_VF_STATE_DIS, vf->vf_states);
-                       ice_free_vf_res(vf);
-               }
-
-               if (!pci_vfs_assigned(pf->pdev)) {
-                       u32 reg_idx, bit_idx;
-
-                       reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
-                       bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
-                       wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
-               }
-
-               /* clear malicious info since the VF is getting released */
-               if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
-                                       ICE_MAX_VF_COUNT, vf->vf_id))
-                       dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
-                               vf->vf_id);
-
-               mutex_unlock(&vf->cfg_lock);
-       }
-
-       if (ice_sriov_free_msix_res(pf))
-               dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
-       vfs->num_qps_per = 0;
-       ice_free_vf_entries(pf);
-
-       mutex_unlock(&vfs->table_lock);
-
-       clear_bit(ICE_VF_DIS, pf->state);
-       clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
-}
-
-/**
- * ice_trigger_vf_reset - Reset a VF on HW
- * @vf: pointer to the VF structure
- * @is_vflr: true if VFLR was issued, false if not
- * @is_pfr: true if the reset was triggered due to a previous PFR
- *
- * Trigger hardware to start a reset for a particular VF. Expects the caller
- * to wait the proper amount of time to allow hardware to reset the VF before
- * it cleans up and restores VF functionality.
- */
-static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
-{
-       struct ice_pf *pf = vf->pf;
-       u32 reg, reg_idx, bit_idx;
-       unsigned int vf_abs_id, i;
-       struct device *dev;
-       struct ice_hw *hw;
-
-       dev = ice_pf_to_dev(pf);
-       hw = &pf->hw;
-       vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
-
-       /* Inform VF that it is no longer active, as a warning */
-       clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
-
-       /* Disable VF's configuration API during reset. The flag is re-enabled
-        * when it's safe again to access VF's VSI.
-        */
-       clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
-
-       /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
-        * needs to clear them in the case of VFR/VFLR. If this is done for
-        * PFR, it can mess up VF resets because the VF driver may already
-        * have started cleanup by the time we get here.
-        */
-       if (!is_pfr) {
-               wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
-               wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
-       }
-
-       /* In the case of a VFLR, the HW has already reset the VF and we
-        * just need to clean up, so don't hit the VFRTRIG register.
-        */
-       if (!is_vflr) {
-               /* reset VF using VPGEN_VFRTRIG reg */
-               reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
-               reg |= VPGEN_VFRTRIG_VFSWR_M;
-               wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
-       }
-       /* clear the VFLR bit in GLGEN_VFLRSTAT */
-       reg_idx = (vf_abs_id) / 32;
-       bit_idx = (vf_abs_id) % 32;
-       wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
-       ice_flush(hw);
-
-       wr32(hw, PF_PCI_CIAA,
-            VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
-       for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
-               reg = rd32(hw, PF_PCI_CIAD);
-               /* no transactions pending so stop polling */
-               if ((reg & VF_TRANS_PENDING_M) == 0)
-                       break;
-
-               dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
-               udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
-       }
-}
-
-/**
- * ice_vf_get_port_info - Get the VF's port info structure
- * @vf: VF used to get the port info structure for
- */
-static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
-{
-       return vf->pf->hw.port_info;
-}
-
-/**
- * ice_vf_vsi_setup - Set up a VF VSI
- * @vf: VF to setup VSI for
- *
- * Returns pointer to the successfully allocated VSI struct on success,
- * otherwise returns NULL on failure.
- */
-static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
-{
-       struct ice_port_info *pi = ice_vf_get_port_info(vf);
-       struct ice_pf *pf = vf->pf;
-       struct ice_vsi *vsi;
-
-       vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL);
-
-       if (!vsi) {
-               dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
-               ice_vf_invalidate_vsi(vf);
-               return NULL;
-       }
-
-       vf->lan_vsi_idx = vsi->idx;
-       vf->lan_vsi_num = vsi->vsi_num;
-
-       return vsi;
-}
-
-/**
- * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
- * @vf: VF to setup control VSI for
- *
- * Returns pointer to the successfully allocated VSI struct on success,
- * otherwise returns NULL on failure.
- */
-struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
-{
-       struct ice_port_info *pi = ice_vf_get_port_info(vf);
-       struct ice_pf *pf = vf->pf;
-       struct ice_vsi *vsi;
-
-       vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
-       if (!vsi) {
-               dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
-               ice_vf_ctrl_invalidate_vsi(vf);
-       }
-
-       return vsi;
-}
-
-/**
- * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
- * @pf: pointer to PF structure
- * @vf: pointer to VF that the first MSIX vector index is being calculated for
- *
- * This returns the first MSIX vector index in PF space that is used by this VF.
- * This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration will have to increment by
- * 1 to avoid meddling with the OICR index.
- */
-static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
-{
-       return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
-}
-
-/**
- * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
- * @vf: VF to re-apply the configuration for
- *
- * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
- * needs to re-apply the host configured Tx rate limiting configuration.
- */
-static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
-{
-       struct device *dev = ice_pf_to_dev(vf->pf);
-       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-       int err;
-
-       if (vf->min_tx_rate) {
-               err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
-               if (err) {
-                       dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
-                               vf->min_tx_rate, vf->vf_id, err);
-                       return err;
-               }
-       }
-
-       if (vf->max_tx_rate) {
-               err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
-               if (err) {
-                       dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
-                               vf->max_tx_rate, vf->vf_id, err);
-                       return err;
-               }
-       }
-
-       return 0;
-}
-
-static u16 ice_vf_get_port_vlan_id(struct ice_vf *vf)
-{
-       return vf->port_vlan_info.vid;
-}
-
-static u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf)
-{
-       return vf->port_vlan_info.prio;
-}
-
-bool ice_vf_is_port_vlan_ena(struct ice_vf *vf)
-{
-       return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf));
-}
-
-static u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
-{
-       return vf->port_vlan_info.tpid;
-}
-
-/**
- * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
- * @vf: VF to add MAC filters for
- * @vsi: Pointer to VSI
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds either a VLAN 0 or port VLAN based filter after reset.
- */
-static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
-{
-       struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
-       struct device *dev = ice_pf_to_dev(vf->pf);
-       int err;
-
-       if (ice_vf_is_port_vlan_ena(vf)) {
-               err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
-               if (err) {
-                       dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
-                               vf->vf_id, err);
-                       return err;
-               }
-
-               err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
-       } else {
-               err = ice_vsi_add_vlan_zero(vsi);
-       }
-
-       if (err) {
-               dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
-                       ice_vf_is_port_vlan_ena(vf) ?
-                       ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
-               return err;
-       }
-
-       err = vlan_ops->ena_rx_filtering(vsi);
-       if (err)
-               dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
-                        vf->vf_id, vsi->idx, err);
-
-       return 0;
-}
-
-static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
-{
-       struct ice_vsi_ctx *ctx;
-       int err;
-
-       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-       if (!ctx)
-               return -ENOMEM;
-
-       ctx->info.sec_flags = vsi->info.sec_flags;
-       ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
-
-       if (enable)
-               ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
-       else
-               ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
-
-       err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
-       if (err)
-               dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
-                       enable ? "ON" : "OFF", vsi->vsi_num, err);
-       else
-               vsi->info.sec_flags = ctx->info.sec_flags;
-
-       kfree(ctx);
-
-       return err;
-}
-
-/**
- * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
- * @vsi: VSI to enable Tx spoof checking for
- */
-static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
-{
-       struct ice_vsi_vlan_ops *vlan_ops;
-       int err;
-
-       vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
-
-       err = vlan_ops->ena_tx_filtering(vsi);
-       if (err)
-               return err;
-
-       return ice_cfg_mac_antispoof(vsi, true);
-}
-
-/**
- * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
- * @vsi: VSI to disable Tx spoof checking for
- */
-static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
-{
-       struct ice_vsi_vlan_ops *vlan_ops;
-       int err;
-
-       vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
-
-       err = vlan_ops->dis_tx_filtering(vsi);
-       if (err)
-               return err;
-
-       return ice_cfg_mac_antispoof(vsi, false);
-}
-
-/**
- * ice_vf_set_spoofchk_cfg - apply Tx spoof checking setting
- * @vf: VF set spoofchk for
- * @vsi: VSI associated to the VF
- */
-static int
-ice_vf_set_spoofchk_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
-{
-       int err;
-
-       if (vf->spoofchk)
-               err = ice_vsi_ena_spoofchk(vsi);
-       else
-               err = ice_vsi_dis_spoofchk(vsi);
-
-       return err;
-}
-
-/**
- * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
- */
-static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
-{
-       struct device *dev = ice_pf_to_dev(vf->pf);
-       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-       u8 broadcast[ETH_ALEN];
-       int status;
-
-       if (ice_is_eswitch_mode_switchdev(vf->pf))
-               return 0;
-
-       eth_broadcast_addr(broadcast);
-       status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
-       if (status) {
-               dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
-                       vf->vf_id, status);
-               return status;
-       }
-
-       vf->num_mac++;
-
-       if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
-               status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
-                                         ICE_FWD_TO_VSI);
-               if (status) {
-                       dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
-                               &vf->hw_lan_addr.addr[0], vf->vf_id,
-                               status);
-                       return status;
-               }
-               vf->num_mac++;
-
-               ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
-       }
-
-       return 0;
-}
-
-/**
- * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
- * @vf: VF to configure trust setting for
- */
-static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
-{
-       if (vf->trusted)
-               set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-       else
-               clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
- * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
- * @vf: VF to enable MSIX mappings for
- *
- * Some of the registers need to be indexed/configured using hardware global
- * device values and other registers need 0-based values, which represent PF
- * based values.
- */
-static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
-{
-       int device_based_first_msix, device_based_last_msix;
-       int pf_based_first_msix, pf_based_last_msix, v;
-       struct ice_pf *pf = vf->pf;
-       int device_based_vf_id;
-       struct ice_hw *hw;
-       u32 reg;
-
-       hw = &pf->hw;
-       pf_based_first_msix = vf->first_vector_idx;
-       pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
-
-       device_based_first_msix = pf_based_first_msix +
-               pf->hw.func_caps.common_cap.msix_vector_first_id;
-       device_based_last_msix =
-               (device_based_first_msix + pf->vfs.num_msix_per) - 1;
-       device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-
-       reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
-               VPINT_ALLOC_FIRST_M) |
-              ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
-               VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
-       wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
-
-       reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
-                & VPINT_ALLOC_PCI_FIRST_M) |
-              ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
-               VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
-       wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
-
-       /* map the interrupts to its functions */
-       for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
-               reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
-                       GLINT_VECT2FUNC_VF_NUM_M) |
-                      ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
-                       GLINT_VECT2FUNC_PF_NUM_M));
-               wr32(hw, GLINT_VECT2FUNC(v), reg);
-       }
-
-       /* Map mailbox interrupt to VF MSI-X vector 0 */
-       wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
- * @vf: VF to enable the mappings for
- * @max_txq: max Tx queues allowed on the VF's VSI
- * @max_rxq: max Rx queues allowed on the VF's VSI
- */
-static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
-{
-       struct device *dev = ice_pf_to_dev(vf->pf);
-       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-       struct ice_hw *hw = &vf->pf->hw;
-       u32 reg;
-
-       /* set regardless of mapping mode */
-       wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
-
-       /* VF Tx queues allocation */
-       if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
-               /* set the VF PF Tx queue range
-                * VFNUMQ value should be set to (number of queues - 1). A value
-                * of 0 means 1 queue and a value of 255 means 256 queues
-                */
-               reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
-                       VPLAN_TX_QBASE_VFFIRSTQ_M) |
-                      (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
-                       VPLAN_TX_QBASE_VFNUMQ_M));
-               wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
-       } else {
-               dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
-       }
-
-       /* set regardless of mapping mode */
-       wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
-
-       /* VF Rx queues allocation */
-       if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
-               /* set the VF PF Rx queue range
-                * VFNUMQ value should be set to (number of queues - 1). A value
-                * of 0 means 1 queue and a value of 255 means 256 queues
-                */
-               reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
-                       VPLAN_RX_QBASE_VFFIRSTQ_M) |
-                      (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
-                       VPLAN_RX_QBASE_VFNUMQ_M));
-               wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
-       } else {
-               dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
-       }
-}
-
-/**
- * ice_ena_vf_mappings - enable VF MSIX and queue mapping
- * @vf: pointer to the VF structure
- */
-static void ice_ena_vf_mappings(struct ice_vf *vf)
-{
-       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
-       ice_ena_vf_msix_mappings(vf);
-       ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
-}
-
-/**
- * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
- * @vf: VF to calculate the register index for
- * @q_vector: a q_vector associated to the VF
- */
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
-{
-       struct ice_pf *pf;
-
-       if (!vf || !q_vector)
-               return -EINVAL;
-
-       pf = vf->pf;
-
-       /* always add one to account for the OICR being the first MSIX */
-       return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
-               q_vector->v_idx + 1;
-}
-
-/**
- * ice_get_max_valid_res_idx - Get the max valid resource index
- * @res: pointer to the resource to find the max valid index for
- *
- * Start from the end of the ice_res_tracker and return right when we find the
- * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
- * valid for SR-IOV because it is the only consumer that manipulates the
- * res->end and this is always called when res->end is set to res->num_entries.
- */
-static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
-{
-       int i;
-
-       if (!res)
-               return -EINVAL;
-
-       for (i = res->num_entries - 1; i >= 0; i--)
-               if (res->list[i] & ICE_RES_VALID_BIT)
-                       return i;
-
-       return 0;
-}
-
-/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
-       u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
-       int vectors_used = pf->irq_tracker->num_entries;
-       int sriov_base_vector;
-
-       sriov_base_vector = total_vectors - num_msix_needed;
-
-       /* make sure we only grab irq_tracker entries from the list end and
-        * that we have enough available MSIX vectors
-        */
-       if (sriov_base_vector < vectors_used)
-               return -EINVAL;
-
-       pf->sriov_base_vector = sriov_base_vector;
-
-       return 0;
-}
-
-/**
- * ice_set_per_vf_res - check if vectors and queues are available
- * @pf: pointer to the PF structure
- * @num_vfs: the number of SR-IOV VFs being configured
- *
- * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
- * get more vectors and can enable more queues per VF. Note that this does not
- * grab any vectors from the SW pool already allocated. Also note, that all
- * vector counts include one for each VF's miscellaneous interrupt vector
- * (i.e. OICR).
- *
- * Minimum VFs - 2 vectors, 1 queue pair
- * Small VFs - 5 vectors, 4 queue pairs
- * Medium VFs - 17 vectors, 16 queue pairs
- *
- * Second, determine number of queue pairs per VF by starting with a pre-defined
- * maximum each VF supports. If this is not possible, then we adjust based on
- * queue pairs available on the device.
- *
- * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
- * by each VF during VF initialization and reset.
- */
-static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
-{
-       int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
-       u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
-       int msix_avail_per_vf, msix_avail_for_sriov;
-       struct device *dev = ice_pf_to_dev(pf);
-
-       lockdep_assert_held(&pf->vfs.table_lock);
-
-       if (!num_vfs || max_valid_res_idx < 0)
-               return -EINVAL;
-
-       /* determine MSI-X resources per VF */
-       msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
-               pf->irq_tracker->num_entries;
-       msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
-       if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
-               num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
-       } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
-               num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
-       } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
-               num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
-       } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
-               num_msix_per_vf = ICE_MIN_INTR_PER_VF;
-       } else {
-               dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
-                       msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
-                       num_vfs);
-               return -EIO;
-       }
-
-       num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
-                       ICE_MAX_RSS_QS_PER_VF);
-       avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
-       if (!avail_qs)
-               num_txq = 0;
-       else if (num_txq > avail_qs)
-               num_txq = rounddown_pow_of_two(avail_qs);
-
-       num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
-                       ICE_MAX_RSS_QS_PER_VF);
-       avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
-       if (!avail_qs)
-               num_rxq = 0;
-       else if (num_rxq > avail_qs)
-               num_rxq = rounddown_pow_of_two(avail_qs);
-
-       if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
-               dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
-                       ICE_MIN_QS_PER_VF, num_vfs);
-               return -EIO;
-       }
-
-       if (ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs)) {
-               dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
-                       num_vfs);
-               return -EINVAL;
-       }
-
-       /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
-       pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
-       pf->vfs.num_msix_per = num_msix_per_vf;
-       dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
-                num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
-
-       return 0;
-}
-
-/**
- * ice_clear_vf_reset_trigger - enable VF to access hardware
- * @vf: VF to enabled hardware access for
- */
-static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
-{
-       struct ice_hw *hw = &vf->pf->hw;
-       u32 reg;
-
-       reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
-       reg &= ~VPGEN_VFRTRIG_VFSWR_M;
-       wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
-       ice_flush(hw);
-}
-
-static int
-ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
-{
-       struct ice_hw *hw = &vsi->back->hw;
-       int status;
-
-       if (ice_vf_is_port_vlan_ena(vf))
-               status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
-                                                 ice_vf_get_port_vlan_id(vf));
-       else if (ice_vsi_has_non_zero_vlans(vsi))
-               status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
-       else
-               status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
-
-       if (status && status != -EEXIST) {
-               dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
-                       vf->vf_id, status);
-               return status;
-       }
-
-       return 0;
-}
-
-static int
-ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
-{
-       struct ice_hw *hw = &vsi->back->hw;
-       int status;
-
-       if (ice_vf_is_port_vlan_ena(vf))
-               status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
-                                                   ice_vf_get_port_vlan_id(vf));
-       else if (ice_vsi_has_non_zero_vlans(vsi))
-               status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
-       else
-               status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
-
-       if (status && status != -ENOENT) {
-               dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
-                       vf->vf_id, status);
-               return status;
-       }
-
-       return 0;
-}
-
-static void ice_vf_clear_counters(struct ice_vf *vf)
-{
-       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
-       vf->num_mac = 0;
-       vsi->num_vlan = 0;
-       memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
-       memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
-}
-
-/**
- * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
- * @vf: VF to perform pre VSI rebuild tasks
- *
- * These tasks are items that don't need to be amortized since they are most
- * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
- */
-static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
-{
-       ice_vf_clear_counters(vf);
-       ice_clear_vf_reset_trigger(vf);
-}
-
-/**
- * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
- * @vsi: Pointer to VSI
- *
- * This function moves VSI into corresponding scheduler aggregator node
- * based on cached value of "aggregator node info" per VSI
- */
-static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf = vsi->back;
-       struct device *dev;
-       int status;
-
-       if (!vsi->agg_node)
-               return;
-
-       dev = ice_pf_to_dev(pf);
-       if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
-               dev_dbg(dev,
-                       "agg_id %u already has reached max_num_vsis %u\n",
-                       vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
-               return;
-       }
-
-       status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
-                                    vsi->idx, vsi->tc_cfg.ena_tc);
-       if (status)
-               dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
-                       vsi->idx, vsi->agg_node->agg_id);
-       else
-               vsi->agg_node->num_vsis++;
-}
-
-/**
- * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
- * @vf: VF to rebuild host configuration on
- */
-static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
-{
-       struct device *dev = ice_pf_to_dev(vf->pf);
-       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
-       ice_vf_set_host_trust_cfg(vf);
-
-       if (ice_vf_rebuild_host_mac_cfg(vf))
-               dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
-                       vf->vf_id);
-
-       if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
-               dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
-                       vf->vf_id);
-
-       if (ice_vf_rebuild_host_tx_rate_cfg(vf))
-               dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
-                       vf->vf_id);
-
-       if (ice_vf_set_spoofchk_cfg(vf, vsi))
-               dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
-                       vf->vf_id);
-
-       /* rebuild aggregator node config for main VF VSI */
-       ice_vf_rebuild_aggregator_node_cfg(vsi);
-}
-
-/**
- * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
- * @vf: VF to release and setup the VSI for
- *
- * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
- * configuration change, etc.).
- */
-static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
-{
-       ice_vf_vsi_release(vf);
-       if (!ice_vf_vsi_setup(vf))
-               return -ENOMEM;
-
-       return 0;
-}
-
-/**
- * ice_vf_rebuild_vsi - rebuild the VF's VSI
- * @vf: VF to rebuild the VSI for
- *
- * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
- * host, PFR, CORER, etc.).
- */
-static int ice_vf_rebuild_vsi(struct ice_vf *vf)
-{
-       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-       struct ice_pf *pf = vf->pf;
-
-       if (ice_vsi_rebuild(vsi, true)) {
-               dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
-                       vf->vf_id);
-               return -EIO;
-       }
-       /* vsi->idx will remain the same in this case so don't update
-        * vf->lan_vsi_idx
-        */
-       vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
-       vf->lan_vsi_num = vsi->vsi_num;
-
-       return 0;
-}
-
-/**
- * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
- * @vf: VF to set in initialized state
- *
- * After this function the VF will be ready to receive/handle the
- * VIRTCHNL_OP_GET_VF_RESOURCES message
- */
-static void ice_vf_set_initialized(struct ice_vf *vf)
-{
-       ice_set_vf_state_qs_dis(vf);
-       clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
-       clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
-       clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
-       set_bit(ICE_VF_STATE_INIT, vf->vf_states);
-       memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
-}
-
-/**
- * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
- * @vf: VF to perform tasks on
- */
-static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
-{
-       struct ice_pf *pf = vf->pf;
-       struct ice_hw *hw;
-
-       hw = &pf->hw;
-
-       ice_vf_rebuild_host_cfg(vf);
-
-       ice_vf_set_initialized(vf);
-       ice_ena_vf_mappings(vf);
-       wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
-}
-
-/**
- * ice_reset_all_vfs - reset all allocated VFs in one go
- * @pf: pointer to the PF structure
- * @is_vflr: true if VFLR was issued, false if not
- *
- * First, tell the hardware to reset each VF, then do all the waiting in one
- * chunk, and finally finish restoring each VF after the wait. This is useful
- * during PF routines which need to reset all VFs, as otherwise it must perform
- * these resets in a serialized fashion.
- *
- * Returns true if any VFs were reset, and false otherwise.
- */
-bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
-{
-       struct device *dev = ice_pf_to_dev(pf);
-       struct ice_hw *hw = &pf->hw;
-       struct ice_vf *vf;
-       unsigned int bkt;
-
-       /* If we don't have any VFs, then there is nothing to reset */
-       if (!ice_has_vfs(pf))
-               return false;
-
-       mutex_lock(&pf->vfs.table_lock);
-
-       /* clear all malicious info if the VFs are getting reset */
-       ice_for_each_vf(pf, bkt, vf)
-               if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
-                                       ICE_MAX_VF_COUNT, vf->vf_id))
-                       dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
-                               vf->vf_id);
-
-       /* If VFs have been disabled, there is no need to reset */
-       if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
-               mutex_unlock(&pf->vfs.table_lock);
-               return false;
-       }
-
-       /* Begin reset on all VFs at once */
-       ice_for_each_vf(pf, bkt, vf)
-               ice_trigger_vf_reset(vf, is_vflr, true);
-
-       /* HW requires some time to make sure it can flush the FIFO for a VF
-        * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
-        * sequence to make sure that it has completed. We'll keep track of
-        * the VFs using a simple iterator that increments once that VF has
-        * finished resetting.
-        */
-       ice_for_each_vf(pf, bkt, vf) {
-               bool done = false;
-               unsigned int i;
-               u32 reg;
-
-               for (i = 0; i < 10; i++) {
-                       reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
-                       if (reg & VPGEN_VFRSTAT_VFRD_M) {
-                               done = true;
-                               break;
-                       }
-
-                       /* only delay if check failed */
-                       usleep_range(10, 20);
-               }
-
-               if (!done) {
-                       /* Display a warning if at least one VF didn't manage
-                        * to reset in time, but continue on with the
-                        * operation.
-                        */
-                       dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
-                       break;
-               }
-       }
-
-       /* free VF resources to begin resetting the VSI state */
-       ice_for_each_vf(pf, bkt, vf) {
-               mutex_lock(&vf->cfg_lock);
-
-               vf->driver_caps = 0;
-               ice_vc_set_default_allowlist(vf);
-
-               ice_vf_fdir_exit(vf);
-               ice_vf_fdir_init(vf);
-               /* clean VF control VSI when resetting VFs since it should be
-                * setup only when VF creates its first FDIR rule.
-                */
-               if (vf->ctrl_vsi_idx != ICE_NO_VSI)
-                       ice_vf_ctrl_invalidate_vsi(vf);
-
-               ice_vf_pre_vsi_rebuild(vf);
-               ice_vf_rebuild_vsi(vf);
-               ice_vf_post_vsi_rebuild(vf);
-
-               mutex_unlock(&vf->cfg_lock);
-       }
-
-       if (ice_is_eswitch_mode_switchdev(pf))
-               if (ice_eswitch_rebuild(pf))
-                       dev_warn(dev, "eswitch rebuild failed\n");
-
-       ice_flush(hw);
-       clear_bit(ICE_VF_DIS, pf->state);
-
-       mutex_unlock(&pf->vfs.table_lock);
-
-       return true;
-}
-
-/**
- * ice_is_vf_disabled
- * @vf: pointer to the VF info
- *
- * Returns true if the PF or VF is disabled, false otherwise.
- */
-bool ice_is_vf_disabled(struct ice_vf *vf)
-{
-       struct ice_pf *pf = vf->pf;
-
-       /* If the PF has been disabled, there is no need resetting VF until
-        * PF is active again. Similarly, if the VF has been disabled, this
-        * means something else is resetting the VF, so we shouldn't continue.
-        * Otherwise, set disable VF state bit for actual reset, and continue.
-        */
-       return (test_bit(ICE_VF_DIS, pf->state) ||
-               test_bit(ICE_VF_STATE_DIS, vf->vf_states));
-}
-
-/**
- * ice_reset_vf - Reset a particular VF
- * @vf: pointer to the VF structure
- * @is_vflr: true if VFLR was issued, false if not
- *
- * Returns true if the VF is currently in reset, resets successfully, or resets
- * are disabled and false otherwise.
- */
-bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
-{
-       struct ice_pf *pf = vf->pf;
-       struct ice_vsi *vsi;
-       struct device *dev;
-       struct ice_hw *hw;
-       bool rsd = false;
-       u8 promisc_m;
-       u32 reg;
-       int i;
-
-       lockdep_assert_held(&vf->cfg_lock);
-
-       dev = ice_pf_to_dev(pf);
-
-       if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
-               dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
-                       vf->vf_id);
-               return true;
-       }
-
-       if (ice_is_vf_disabled(vf)) {
-               dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
-                       vf->vf_id);
-               return true;
-       }
-
-       /* Set VF disable bit state here, before triggering reset */
-       set_bit(ICE_VF_STATE_DIS, vf->vf_states);
-       ice_trigger_vf_reset(vf, is_vflr, false);
-
-       vsi = ice_get_vf_vsi(vf);
-
-       ice_dis_vf_qs(vf);
-
-       /* Call Disable LAN Tx queue AQ whether or not queues are
-        * enabled. This is needed for successful completion of VFR.
-        */
-       ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
-                       NULL, ICE_VF_RESET, vf->vf_id, NULL);
-
-       hw = &pf->hw;
-       /* poll VPGEN_VFRSTAT reg to make sure
-        * that reset is complete
-        */
-       for (i = 0; i < 10; i++) {
-               /* VF reset requires driver to first reset the VF and then
-                * poll the status register to make sure that the reset
-                * completed successfully.
-                */
-               reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
-               if (reg & VPGEN_VFRSTAT_VFRD_M) {
-                       rsd = true;
-                       break;
-               }
-
-               /* only sleep if the reset is not done */
-               usleep_range(10, 20);
-       }
-
-       vf->driver_caps = 0;
-       ice_vc_set_default_allowlist(vf);
-
-       /* Display a warning if VF didn't manage to reset in time, but need to
-        * continue on with the operation.
-        */
-       if (!rsd)
-               dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
-
-       /* disable promiscuous modes in case they were enabled
-        * ignore any error if disabling process failed
-        */
-       if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
-           test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
-               if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan)
-                       promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
-               else
-                       promisc_m = ICE_UCAST_PROMISC_BITS;
-
-               if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
-                       dev_err(dev, "disabling promiscuous mode failed\n");
-       }
-
-       ice_eswitch_del_vf_mac_rule(vf);
-
-       ice_vf_fdir_exit(vf);
-       ice_vf_fdir_init(vf);
-       /* clean VF control VSI when resetting VF since it should be setup
-        * only when VF creates its first FDIR rule.
-        */
-       if (vf->ctrl_vsi_idx != ICE_NO_VSI)
-               ice_vf_ctrl_vsi_release(vf);
-
-       ice_vf_pre_vsi_rebuild(vf);
-
-       if (ice_vf_rebuild_vsi_with_release(vf)) {
-               dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
-               return false;
-       }
-
-       ice_vf_post_vsi_rebuild(vf);
-       vsi = ice_get_vf_vsi(vf);
-       ice_eswitch_update_repr(vsi);
-       ice_eswitch_replay_vf_mac_rule(vf);
-
-       /* if the VF has been reset allow it to come up again */
-       if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
-                               ICE_MAX_VF_COUNT, vf->vf_id))
-               dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
-
-       return true;
-}
-
-/**
- * ice_vc_notify_link_state - Inform all VFs on a PF of link status
- * @pf: pointer to the PF structure
- */
-void ice_vc_notify_link_state(struct ice_pf *pf)
-{
-       struct ice_vf *vf;
-       unsigned int bkt;
-
-       mutex_lock(&pf->vfs.table_lock);
-       ice_for_each_vf(pf, bkt, vf)
-               ice_vc_notify_vf_link_state(vf);
-       mutex_unlock(&pf->vfs.table_lock);
-}
-
-/**
- * ice_vc_notify_reset - Send pending reset message to all VFs
- * @pf: pointer to the PF structure
- *
- * indicate a pending reset to all VFs on a given PF
- */
-void ice_vc_notify_reset(struct ice_pf *pf)
-{
-       struct virtchnl_pf_event pfe;
-
-       if (!ice_has_vfs(pf))
-               return;
-
-       pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
-       pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
-       ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
-                           (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
-}
-
-/**
- * ice_vc_notify_vf_reset - Notify VF of a reset event
- * @vf: pointer to the VF structure
- */
-static void ice_vc_notify_vf_reset(struct ice_vf *vf)
-{
-       struct virtchnl_pf_event pfe;
-       struct ice_pf *pf = vf->pf;
-
-       /* Bail out if VF is in disabled state, neither initialized, nor active
-        * state - otherwise proceed with notifications
-        */
-       if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
-            !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
-           test_bit(ICE_VF_STATE_DIS, vf->vf_states))
-               return;
-
-       pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
-       pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
-       ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
-                             VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
-                             NULL);
-}
-
-/**
- * ice_init_vf_vsi_res - initialize/setup VF VSI resources
- * @vf: VF to initialize/setup the VSI for
- *
- * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
- * VF VSI's broadcast filter and is only used during initial VF creation.
- */
-static int ice_init_vf_vsi_res(struct ice_vf *vf)
-{
-       struct ice_vsi_vlan_ops *vlan_ops;
-       struct ice_pf *pf = vf->pf;
-       u8 broadcast[ETH_ALEN];
-       struct ice_vsi *vsi;
-       struct device *dev;
-       int err;
-
-       vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
-
-       dev = ice_pf_to_dev(pf);
-       vsi = ice_vf_vsi_setup(vf);
-       if (!vsi)
-               return -ENOMEM;
-
-       err = ice_vsi_add_vlan_zero(vsi);
-       if (err) {
-               dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
-                        vf->vf_id);
-               goto release_vsi;
-       }
-
-       vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
-       err = vlan_ops->ena_rx_filtering(vsi);
-       if (err) {
-               dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
-                        vf->vf_id);
-               goto release_vsi;
-       }
-
-       eth_broadcast_addr(broadcast);
-       err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
-       if (err) {
-               dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
-                       vf->vf_id, err);
-               goto release_vsi;
-       }
-
-       err = ice_vf_set_spoofchk_cfg(vf, vsi);
-       if (err) {
-               dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
-                        vf->vf_id);
-               goto release_vsi;
-       }
-
-       vf->num_mac = 1;
-
-       return 0;
-
-release_vsi:
-       ice_vf_vsi_release(vf);
-       return err;
-}
-
-/**
- * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
- * @pf: PF the VFs are associated with
- */
-static int ice_start_vfs(struct ice_pf *pf)
-{
-       struct ice_hw *hw = &pf->hw;
-       unsigned int bkt, it_cnt;
-       struct ice_vf *vf;
-       int retval;
-
-       lockdep_assert_held(&pf->vfs.table_lock);
-
-       it_cnt = 0;
-       ice_for_each_vf(pf, bkt, vf) {
-               ice_clear_vf_reset_trigger(vf);
-
-               retval = ice_init_vf_vsi_res(vf);
-               if (retval) {
-                       dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
-                               vf->vf_id, retval);
-                       goto teardown;
-               }
-
-               set_bit(ICE_VF_STATE_INIT, vf->vf_states);
-               ice_ena_vf_mappings(vf);
-               wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
-               it_cnt++;
-       }
-
-       ice_flush(hw);
-       return 0;
-
-teardown:
-       ice_for_each_vf(pf, bkt, vf) {
-               if (it_cnt == 0)
-                       break;
-
-               ice_dis_vf_mappings(vf);
-               ice_vf_vsi_release(vf);
-               it_cnt--;
-       }
-
-       return retval;
-}
-
-/**
- * ice_create_vf_entries - Allocate and insert VF entries
- * @pf: pointer to the PF structure
- * @num_vfs: the number of VFs to allocate
- *
- * Allocate new VF entries and insert them into the hash table. Set some
- * basic default fields for initializing the new VFs.
- *
- * After this function exits, the hash table will have num_vfs entries
- * inserted.
- *
- * Returns 0 on success or an integer error code on failure.
- */
-static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
-{
-       struct ice_vfs *vfs = &pf->vfs;
-       struct ice_vf *vf;
-       u16 vf_id;
-       int err;
-
-       lockdep_assert_held(&vfs->table_lock);
-
-       for (vf_id = 0; vf_id < num_vfs; vf_id++) {
-               vf = kzalloc(sizeof(*vf), GFP_KERNEL);
-               if (!vf) {
-                       err = -ENOMEM;
-                       goto err_free_entries;
-               }
-               kref_init(&vf->refcnt);
-
-               vf->pf = pf;
-               vf->vf_id = vf_id;
-
-               vf->vf_sw_id = pf->first_sw;
-               /* assign default capabilities */
-               set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
-               vf->spoofchk = true;
-               vf->num_vf_qs = pf->vfs.num_qps_per;
-               ice_vc_set_default_allowlist(vf);
-
-               /* ctrl_vsi_idx will be set to a valid value only when VF
-                * creates its first fdir rule.
-                */
-               ice_vf_ctrl_invalidate_vsi(vf);
-               ice_vf_fdir_init(vf);
-
-               ice_vc_set_dflt_vf_ops(&vf->vc_ops);
-
-               mutex_init(&vf->cfg_lock);
-
-               hash_add_rcu(vfs->table, &vf->entry, vf_id);
-       }
-
-       return 0;
-
-err_free_entries:
-       ice_free_vf_entries(pf);
-       return err;
-}
-
-/**
- * ice_ena_vfs - enable VFs so they are ready to be used
- * @pf: pointer to the PF structure
- * @num_vfs: number of VFs to enable
- */
-static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
-{
-       struct device *dev = ice_pf_to_dev(pf);
-       struct ice_hw *hw = &pf->hw;
-       int ret;
-
-       /* Disable global interrupt 0 so we don't try to handle the VFLR. */
-       wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
-            ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
-       set_bit(ICE_OICR_INTR_DIS, pf->state);
-       ice_flush(hw);
-
-       ret = pci_enable_sriov(pf->pdev, num_vfs);
-       if (ret)
-               goto err_unroll_intr;
-
-       mutex_lock(&pf->vfs.table_lock);
-
-       if (ice_set_per_vf_res(pf, num_vfs)) {
-               dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
-                       num_vfs);
-               ret = -ENOSPC;
-               goto err_unroll_sriov;
-       }
-
-       ret = ice_create_vf_entries(pf, num_vfs);
-       if (ret) {
-               dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
-                       num_vfs);
-               goto err_unroll_sriov;
-       }
-
-       if (ice_start_vfs(pf)) {
-               dev_err(dev, "Failed to start VF(s)\n");
-               ret = -EAGAIN;
-               goto err_unroll_vf_entries;
-       }
-
-       clear_bit(ICE_VF_DIS, pf->state);
-
-       ret = ice_eswitch_configure(pf);
-       if (ret)
-               goto err_unroll_sriov;
-
-       /* rearm global interrupts */
-       if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
-               ice_irq_dynamic_ena(hw, NULL, NULL);
-
-       mutex_unlock(&pf->vfs.table_lock);
-
-       return 0;
-
-err_unroll_vf_entries:
-       ice_free_vf_entries(pf);
-err_unroll_sriov:
-       mutex_unlock(&pf->vfs.table_lock);
-       pci_disable_sriov(pf->pdev);
-err_unroll_intr:
-       /* rearm interrupts here */
-       ice_irq_dynamic_ena(hw, NULL, NULL);
-       clear_bit(ICE_OICR_INTR_DIS, pf->state);
-       return ret;
-}
-
-/**
- * ice_pci_sriov_ena - Enable or change number of VFs
- * @pf: pointer to the PF structure
- * @num_vfs: number of VFs to allocate
- *
- * Returns 0 on success and negative on failure
- */
-static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
-{
-       int pre_existing_vfs = pci_num_vf(pf->pdev);
-       struct device *dev = ice_pf_to_dev(pf);
-       int err;
-
-       if (pre_existing_vfs && pre_existing_vfs != num_vfs)
-               ice_free_vfs(pf);
-       else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
-               return 0;
-
-       if (num_vfs > pf->vfs.num_supported) {
-               dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
-                       num_vfs, pf->vfs.num_supported);
-               return -EOPNOTSUPP;
-       }
-
-       dev_info(dev, "Enabling %d VFs\n", num_vfs);
-       err = ice_ena_vfs(pf, num_vfs);
-       if (err) {
-               dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
-               return err;
-       }
-
-       set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
-       return 0;
-}
-
-/**
- * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
- * @pf: PF to enabled SR-IOV on
- */
-static int ice_check_sriov_allowed(struct ice_pf *pf)
-{
-       struct device *dev = ice_pf_to_dev(pf);
-
-       if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
-               dev_err(dev, "This device is not capable of SR-IOV\n");
-               return -EOPNOTSUPP;
-       }
-
-       if (ice_is_safe_mode(pf)) {
-               dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
-               return -EOPNOTSUPP;
-       }
-
-       if (!ice_pf_state_is_nominal(pf)) {
-               dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
-               return -EBUSY;
-       }
-
-       return 0;
-}
-
-/**
- * ice_sriov_configure - Enable or change number of VFs via sysfs
- * @pdev: pointer to a pci_dev structure
- * @num_vfs: number of VFs to allocate or 0 to free VFs
- *
- * This function is called when the user updates the number of VFs in sysfs. On
- * success return whatever num_vfs was set to by the caller. Return negative on
- * failure.
- */
-int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
-       struct ice_pf *pf = pci_get_drvdata(pdev);
-       struct device *dev = ice_pf_to_dev(pf);
-       int err;
-
-       err = ice_check_sriov_allowed(pf);
-       if (err)
-               return err;
-
-       if (!num_vfs) {
-               if (!pci_vfs_assigned(pdev)) {
-                       ice_mbx_deinit_snapshot(&pf->hw);
-                       ice_free_vfs(pf);
-                       if (pf->lag)
-                               ice_enable_lag(pf->lag);
-                       return 0;
-               }
-
-               dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
-               return -EBUSY;
-       }
-
-       err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
-       if (err)
-               return err;
-
-       err = ice_pci_sriov_ena(pf, num_vfs);
-       if (err) {
-               ice_mbx_deinit_snapshot(&pf->hw);
-               return err;
-       }
-
-       if (pf->lag)
-               ice_disable_lag(pf->lag);
-       return num_vfs;
-}
-
-/**
- * ice_process_vflr_event - Free VF resources via IRQ calls
- * @pf: pointer to the PF structure
- *
- * called from the VFLR IRQ handler to
- * free up VF resources and state variables
- */
-void ice_process_vflr_event(struct ice_pf *pf)
-{
-       struct ice_hw *hw = &pf->hw;
-       struct ice_vf *vf;
-       unsigned int bkt;
-       u32 reg;
-
-       if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
-           !ice_has_vfs(pf))
-               return;
-
-       mutex_lock(&pf->vfs.table_lock);
-       ice_for_each_vf(pf, bkt, vf) {
-               u32 reg_idx, bit_idx;
-
-               reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
-               bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
-               /* read GLGEN_VFLRSTAT register to find out the flr VFs */
-               reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
-               if (reg & BIT(bit_idx)) {
-                       /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
-                       mutex_lock(&vf->cfg_lock);
-                       ice_reset_vf(vf, true);
-                       mutex_unlock(&vf->cfg_lock);
-               }
-       }
-       mutex_unlock(&pf->vfs.table_lock);
-}
-
-/**
- * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
- * @vf: pointer to the VF info
- */
-static void ice_vc_reset_vf(struct ice_vf *vf)
-{
-       ice_vc_notify_vf_reset(vf);
-       ice_reset_vf(vf, false);
-}
-
-/**
- * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
- * @pf: PF used to index all VFs
- * @pfq: queue index relative to the PF's function space
- *
- * If no VF is found who owns the pfq then return NULL, otherwise return a
- * pointer to the VF who owns the pfq
- *
- * If this function returns non-NULL, it acquires a reference count of the VF
- * structure. The caller is responsible for calling ice_put_vf() to drop this
- * reference.
- */
-static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
-{
-       struct ice_vf *vf;
-       unsigned int bkt;
-
-       rcu_read_lock();
-       ice_for_each_vf_rcu(pf, bkt, vf) {
-               struct ice_vsi *vsi;
-               u16 rxq_idx;
-
-               vsi = ice_get_vf_vsi(vf);
-
-               ice_for_each_rxq(vsi, rxq_idx)
-                       if (vsi->rxq_map[rxq_idx] == pfq) {
-                               struct ice_vf *found;
-
-                               if (kref_get_unless_zero(&vf->refcnt))
-                                       found = vf;
-                               else
-                                       found = NULL;
-                               rcu_read_unlock();
-                               return found;
-                       }
-       }
-       rcu_read_unlock();
-
-       return NULL;
-}
-
-/**
- * ice_globalq_to_pfq - convert from global queue index to PF space queue index
- * @pf: PF used for conversion
- * @globalq: global queue index used to convert to PF space queue index
- */
-static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
-{
-       return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
-}
-
-/**
- * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
- * @pf: PF that the LAN overflow event happened on
- * @event: structure holding the event information for the LAN overflow event
- *
- * Determine if the LAN overflow event was caused by a VF queue. If it was not
- * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
- * reset on the offending VF.
- */
-void
-ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
-       u32 gldcb_rtctq, queue;
-       struct ice_vf *vf;
-
-       gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
-       dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
-
-       /* event returns device global Rx queue number */
-       queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
-               GLDCB_RTCTQ_RXQNUM_S;
-
-       vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
-       if (!vf)
-               return;
-
-       mutex_lock(&vf->cfg_lock);
-       ice_vc_reset_vf(vf);
-       mutex_unlock(&vf->cfg_lock);
-
-       ice_put_vf(vf);
-}
-
-/**
- * ice_vc_send_msg_to_vf - Send message to VF
- * @vf: pointer to the VF info
- * @v_opcode: virtual channel opcode
- * @v_retval: virtual channel return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * send msg to VF
- */
-int
-ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
-                     enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
-{
-       struct device *dev;
-       struct ice_pf *pf;
-       int aq_ret;
-
-       pf = vf->pf;
-       dev = ice_pf_to_dev(pf);
-
-       /* single place to detect unsuccessful return values */
-       if (v_retval) {
-               vf->num_inval_msgs++;
-               dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
-                        v_opcode, v_retval);
-               if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
-                       dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
-                               vf->vf_id);
-                       dev_err(dev, "Use PF Control I/F to enable the VF\n");
-                       set_bit(ICE_VF_STATE_DIS, vf->vf_states);
-                       return -EIO;
-               }
-       } else {
-               vf->num_valid_msgs++;
-               /* reset the invalid counter, if a valid message is received. */
-               vf->num_inval_msgs = 0;
-       }
-
-       aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
-                                      msg, msglen, NULL);
-       if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
-               dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
-                        vf->vf_id, aq_ret,
-                        ice_aq_str(pf->hw.mailboxq.sq_last_status));
-               return -EIO;
-       }
-
-       return 0;
-}
-
-/**
- * ice_vc_get_ver_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to request the API version used by the PF
- */
-static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
-{
-       struct virtchnl_version_info info = {
-               VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
-       };
-
-       vf->vf_ver = *(struct virtchnl_version_info *)msg;
-       /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
-       if (VF_IS_V10(&vf->vf_ver))
-               info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
-
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
-                                    VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
-                                    sizeof(struct virtchnl_version_info));
-}
-
-/**
- * ice_vc_get_max_frame_size - get max frame size allowed for VF
- * @vf: VF used to determine max frame size
- *
- * Max frame size is determined based on the current port's max frame size and
- * whether a port VLAN is configured on this VF. The VF is not aware whether
- * it's in a port VLAN so the PF needs to account for this in max frame size
- * checks and sending the max frame size to the VF.
- */
-static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
-{
-       struct ice_port_info *pi = ice_vf_get_port_info(vf);
-       u16 max_frame_size;
-
-       max_frame_size = pi->phy.link_info.max_frame_size;
-
-       if (ice_vf_is_port_vlan_ena(vf))
-               max_frame_size -= VLAN_HLEN;
-
-       return max_frame_size;
-}
-
-/**
- * ice_vc_get_vf_res_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to request its resources
- */
-static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_vf_resource *vfres = NULL;
-       struct ice_pf *pf = vf->pf;
-       struct ice_vsi *vsi;
-       int len = 0;
-       int ret;
-
-       if (ice_check_vf_init(pf, vf)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto err;
-       }
-
-       len = sizeof(struct virtchnl_vf_resource);
-
-       vfres = kzalloc(len, GFP_KERNEL);
-       if (!vfres) {
-               v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
-               len = 0;
-               goto err;
-       }
-       if (VF_IS_V11(&vf->vf_ver))
-               vf->driver_caps = *(u32 *)msg;
-       else
-               vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
-                                 VIRTCHNL_VF_OFFLOAD_RSS_REG |
-                                 VIRTCHNL_VF_OFFLOAD_VLAN;
-
-       vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto err;
-       }
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
-               /* VLAN offloads based on current device configuration */
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2;
-       } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
-               /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
-                * these two conditions, which amounts to guest VLAN filtering
-                * and offloads being based on the inner VLAN or the
-                * inner/single VLAN respectively and don't allow VF to
-                * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
-                */
-               if (ice_is_dvm_ena(&pf->hw) && ice_vf_is_port_vlan_ena(vf)) {
-                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
-               } else if (!ice_is_dvm_ena(&pf->hw) &&
-                          !ice_vf_is_port_vlan_ena(vf)) {
-                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
-                       /* configure backward compatible support for VFs that
-                        * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
-                        * configured in SVM, and no port VLAN is configured
-                        */
-                       ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
-               } else if (ice_is_dvm_ena(&pf->hw)) {
-                       /* configure software offloaded VLAN support when DVM
-                        * is enabled, but no port VLAN is enabled
-                        */
-                       ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
-               }
-       }
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
-       } else {
-               if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
-                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
-               else
-                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
-       }
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
-
-       if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
-               vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
-
-       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
-               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
-
-       vfres->num_vsis = 1;
-       /* Tx and Rx queue are equal for VF */
-       vfres->num_queue_pairs = vsi->num_txq;
-       vfres->max_vectors = pf->vfs.num_msix_per;
-       vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
-       vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
-       vfres->max_mtu = ice_vc_get_max_frame_size(vf);
-
-       vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
-       vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
-       vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
-       ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
-                       vf->hw_lan_addr.addr);
-
-       /* match guest capabilities */
-       vf->driver_caps = vfres->vf_cap_flags;
-
-       ice_vc_set_caps_allowlist(vf);
-       ice_vc_set_working_allowlist(vf);
-
-       set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
-
-err:
-       /* send the response back to the VF */
-       ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
-                                   (u8 *)vfres, len);
-
-       kfree(vfres);
-       return ret;
-}
-
-/**
- * ice_vc_reset_vf_msg
- * @vf: pointer to the VF info
- *
- * called from the VF to reset itself,
- * unlike other virtchnl messages, PF driver
- * doesn't send the response back to the VF
- */
-static void ice_vc_reset_vf_msg(struct ice_vf *vf)
-{
-       if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
-               ice_reset_vf(vf, false);
-}
-
-/**
- * ice_find_vsi_from_id
- * @pf: the PF structure to search for the VSI
- * @id: ID of the VSI it is searching for
- *
- * searches for the VSI with the given ID
- */
-static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
-{
-       int i;
-
-       ice_for_each_vsi(pf, i)
-               if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
-                       return pf->vsi[i];
-
-       return NULL;
-}
-
-/**
- * ice_vc_isvalid_vsi_id
- * @vf: pointer to the VF info
- * @vsi_id: VF relative VSI ID
- *
- * check for the valid VSI ID
- */
-bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
-{
-       struct ice_pf *pf = vf->pf;
-       struct ice_vsi *vsi;
-
-       vsi = ice_find_vsi_from_id(pf, vsi_id);
-
-       return (vsi && (vsi->vf == vf));
-}
-
-/**
- * ice_vc_isvalid_q_id
- * @vf: pointer to the VF info
- * @vsi_id: VSI ID
- * @qid: VSI relative queue ID
- *
- * check for the valid queue ID
- */
-static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
-{
-       struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
-       /* allocated Tx and Rx queues should be always equal for VF VSI */
-       return (vsi && (qid < vsi->alloc_txq));
-}
-
-/**
- * ice_vc_isvalid_ring_len
- * @ring_len: length of ring
- *
- * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
- * or zero
- */
-static bool ice_vc_isvalid_ring_len(u16 ring_len)
-{
-       return ring_len == 0 ||
-              (ring_len >= ICE_MIN_NUM_DESC &&
-               ring_len <= ICE_MAX_NUM_DESC &&
-               !(ring_len % ICE_REQ_DESC_MULTIPLE));
-}
-
-/**
- * ice_vc_validate_pattern
- * @vf: pointer to the VF info
- * @proto: virtchnl protocol headers
- *
- * validate the pattern is supported or not.
- *
- * Return: true on success, false on error.
- */
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
-{
-       bool is_ipv4 = false;
-       bool is_ipv6 = false;
-       bool is_udp = false;
-       u16 ptype = -1;
-       int i = 0;
-
-       while (i < proto->count &&
-              proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
-               switch (proto->proto_hdr[i].type) {
-               case VIRTCHNL_PROTO_HDR_ETH:
-                       ptype = ICE_PTYPE_MAC_PAY;
-                       break;
-               case VIRTCHNL_PROTO_HDR_IPV4:
-                       ptype = ICE_PTYPE_IPV4_PAY;
-                       is_ipv4 = true;
-                       break;
-               case VIRTCHNL_PROTO_HDR_IPV6:
-                       ptype = ICE_PTYPE_IPV6_PAY;
-                       is_ipv6 = true;
-                       break;
-               case VIRTCHNL_PROTO_HDR_UDP:
-                       if (is_ipv4)
-                               ptype = ICE_PTYPE_IPV4_UDP_PAY;
-                       else if (is_ipv6)
-                               ptype = ICE_PTYPE_IPV6_UDP_PAY;
-                       is_udp = true;
-                       break;
-               case VIRTCHNL_PROTO_HDR_TCP:
-                       if (is_ipv4)
-                               ptype = ICE_PTYPE_IPV4_TCP_PAY;
-                       else if (is_ipv6)
-                               ptype = ICE_PTYPE_IPV6_TCP_PAY;
-                       break;
-               case VIRTCHNL_PROTO_HDR_SCTP:
-                       if (is_ipv4)
-                               ptype = ICE_PTYPE_IPV4_SCTP_PAY;
-                       else if (is_ipv6)
-                               ptype = ICE_PTYPE_IPV6_SCTP_PAY;
-                       break;
-               case VIRTCHNL_PROTO_HDR_GTPU_IP:
-               case VIRTCHNL_PROTO_HDR_GTPU_EH:
-                       if (is_ipv4)
-                               ptype = ICE_MAC_IPV4_GTPU;
-                       else if (is_ipv6)
-                               ptype = ICE_MAC_IPV6_GTPU;
-                       goto out;
-               case VIRTCHNL_PROTO_HDR_L2TPV3:
-                       if (is_ipv4)
-                               ptype = ICE_MAC_IPV4_L2TPV3;
-                       else if (is_ipv6)
-                               ptype = ICE_MAC_IPV6_L2TPV3;
-                       goto out;
-               case VIRTCHNL_PROTO_HDR_ESP:
-                       if (is_ipv4)
-                               ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
-                                               ICE_MAC_IPV4_ESP;
-                       else if (is_ipv6)
-                               ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
-                                               ICE_MAC_IPV6_ESP;
-                       goto out;
-               case VIRTCHNL_PROTO_HDR_AH:
-                       if (is_ipv4)
-                               ptype = ICE_MAC_IPV4_AH;
-                       else if (is_ipv6)
-                               ptype = ICE_MAC_IPV6_AH;
-                       goto out;
-               case VIRTCHNL_PROTO_HDR_PFCP:
-                       if (is_ipv4)
-                               ptype = ICE_MAC_IPV4_PFCP_SESSION;
-                       else if (is_ipv6)
-                               ptype = ICE_MAC_IPV6_PFCP_SESSION;
-                       goto out;
-               default:
-                       break;
-               }
-               i++;
-       }
-
-out:
-       return ice_hw_ptype_ena(&vf->pf->hw, ptype);
-}
-
-/**
- * ice_vc_parse_rss_cfg - parses hash fields and headers from
- * a specific virtchnl RSS cfg
- * @hw: pointer to the hardware
- * @rss_cfg: pointer to the virtchnl RSS cfg
- * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
- * to configure
- * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
- *
- * Return true if all the protocol header and hash fields in the RSS cfg could
- * be parsed, else return false
- *
- * This function parses the virtchnl RSS cfg to be the intended
- * hash fields and the intended header for RSS configuration
- */
-static bool
-ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
-                    u32 *addl_hdrs, u64 *hash_flds)
-{
-       const struct ice_vc_hash_field_match_type *hf_list;
-       const struct ice_vc_hdr_match_type *hdr_list;
-       int i, hf_list_len, hdr_list_len;
-
-       hf_list = ice_vc_hash_field_list;
-       hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
-       hdr_list = ice_vc_hdr_list;
-       hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
-
-       for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
-               struct virtchnl_proto_hdr *proto_hdr =
-                                       &rss_cfg->proto_hdrs.proto_hdr[i];
-               bool hdr_found = false;
-               int j;
-
-               /* Find matched ice headers according to virtchnl headers. */
-               for (j = 0; j < hdr_list_len; j++) {
-                       struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
-
-                       if (proto_hdr->type == hdr_map.vc_hdr) {
-                               *addl_hdrs |= hdr_map.ice_hdr;
-                               hdr_found = true;
-                       }
-               }
-
-               if (!hdr_found)
-                       return false;
-
-               /* Find matched ice hash fields according to
-                * virtchnl hash fields.
-                */
-               for (j = 0; j < hf_list_len; j++) {
-                       struct ice_vc_hash_field_match_type hf_map = hf_list[j];
-
-                       if (proto_hdr->type == hf_map.vc_hdr &&
-                           proto_hdr->field_selector == hf_map.vc_hash_field) {
-                               *hash_flds |= hf_map.ice_hash_field;
-                               break;
-                       }
-               }
-       }
-
-       return true;
-}
-
-/**
- * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
- * RSS offloads
- * @caps: VF driver negotiated capabilities
- *
- * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
- * else return false
- */
-static bool ice_vf_adv_rss_offload_ena(u32 caps)
-{
-       return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
-}
-
-/**
- * ice_vc_handle_rss_cfg
- * @vf: pointer to the VF info
- * @msg: pointer to the message buffer
- * @add: add a RSS config if true, otherwise delete a RSS config
- *
- * This function adds/deletes a RSS config
- */
-static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
-{
-       u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
-       struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct device *dev = ice_pf_to_dev(vf->pf);
-       struct ice_hw *hw = &vf->pf->hw;
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
-               dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
-                       vf->vf_id);
-               v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
-               goto error_param;
-       }
-
-       if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
-               dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
-                       vf->vf_id);
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
-           rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
-           rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
-               dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
-                       vf->vf_id);
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
-               struct ice_vsi_ctx *ctx;
-               u8 lut_type, hash_type;
-               int status;
-
-               lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
-               hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
-                               ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
-
-               ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-               if (!ctx) {
-                       v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
-                       goto error_param;
-               }
-
-               ctx->info.q_opt_rss = ((lut_type <<
-                                       ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
-                                      ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
-                                      (hash_type &
-                                       ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
-
-               /* Preserve existing queueing option setting */
-               ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
-                                         ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
-               ctx->info.q_opt_tc = vsi->info.q_opt_tc;
-               ctx->info.q_opt_flags = vsi->info.q_opt_rss;
-
-               ctx->info.valid_sections =
-                               cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
-
-               status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
-               if (status) {
-                       dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
-                               status, ice_aq_str(hw->adminq.sq_last_status));
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               } else {
-                       vsi->info.q_opt_rss = ctx->info.q_opt_rss;
-               }
-
-               kfree(ctx);
-       } else {
-               u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
-               u64 hash_flds = ICE_HASH_INVALID;
-
-               if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
-                                         &hash_flds)) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto error_param;
-               }
-
-               if (add) {
-                       if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
-                                           addl_hdrs)) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
-                                       vsi->vsi_num, v_ret);
-                       }
-               } else {
-                       int status;
-
-                       status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
-                                                addl_hdrs);
-                       /* We just ignore -ENOENT, because if two configurations
-                        * share the same profile remove one of them actually
-                        * removes both, since the profile is deleted.
-                        */
-                       if (status && status != -ENOENT) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
-                                       vf->vf_id, status);
-                       }
-               }
-       }
-
-error_param:
-       return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_key
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS key
- */
-static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_rss_key *vrk =
-               (struct virtchnl_rss_key *)msg;
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (ice_set_rss_key(vsi, vrk->key))
-               v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
-                                    NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_lut
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS LUT
- */
-static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
-{
-       struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
-               v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
-                                    NULL, 0);
-}
-
-/**
- * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
- * @vf: The VF being resseting
- *
- * The max poll time is about ~800ms, which is about the maximum time it takes
- * for a VF to be reset and/or a VF driver to be removed.
- */
-static void ice_wait_on_vf_reset(struct ice_vf *vf)
-{
-       int i;
-
-       for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
-               if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
-                       break;
-               msleep(ICE_MAX_VF_RESET_SLEEP_MS);
-       }
-}
-
-/**
- * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
- * @vf: VF to check if it's ready to be configured/queried
- *
- * The purpose of this function is to make sure the VF is not in reset, not
- * disabled, and initialized so it can be configured and/or queried by a host
- * administrator.
- */
-int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
-{
-       struct ice_pf *pf;
-
-       ice_wait_on_vf_reset(vf);
-
-       if (ice_is_vf_disabled(vf))
-               return -EINVAL;
-
-       pf = vf->pf;
-       if (ice_check_vf_init(pf, vf))
-               return -EBUSY;
-
-       return 0;
-}
-
-/**
- * ice_set_vf_spoofchk
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ena: flag to enable or disable feature
- *
- * Enable or disable VF spoof checking
- */
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
-{
-       struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_pf *pf = np->vsi->back;
-       struct ice_vsi *vf_vsi;
-       struct device *dev;
-       struct ice_vf *vf;
-       int ret;
-
-       dev = ice_pf_to_dev(pf);
-
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf)
-               return -EINVAL;
-
-       ret = ice_check_vf_ready_for_cfg(vf);
-       if (ret)
-               goto out_put_vf;
-
-       vf_vsi = ice_get_vf_vsi(vf);
-       if (!vf_vsi) {
-               netdev_err(netdev, "VSI %d for VF %d is null\n",
-                          vf->lan_vsi_idx, vf->vf_id);
-               ret = -EINVAL;
-               goto out_put_vf;
-       }
-
-       if (vf_vsi->type != ICE_VSI_VF) {
-               netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
-                          vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
-               ret = -ENODEV;
-               goto out_put_vf;
-       }
-
-       if (ena == vf->spoofchk) {
-               dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
-               ret = 0;
-               goto out_put_vf;
-       }
-
-       if (ena)
-               ret = ice_vsi_ena_spoofchk(vf_vsi);
-       else
-               ret = ice_vsi_dis_spoofchk(vf_vsi);
-       if (ret)
-               dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
-                       ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
-       else
-               vf->spoofchk = ena;
-
-out_put_vf:
-       ice_put_vf(vf);
-       return ret;
-}
-
-/**
- * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
- * @pf: PF structure for accessing VF(s)
- *
- * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
- * else return true
- */
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
-{
-       bool is_vf_promisc = false;
-       struct ice_vf *vf;
-       unsigned int bkt;
-
-       rcu_read_lock();
-       ice_for_each_vf_rcu(pf, bkt, vf) {
-               /* found a VF that has promiscuous mode configured */
-               if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
-                   test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
-                       is_vf_promisc = true;
-                       break;
-               }
-       }
-       rcu_read_unlock();
-
-       return is_vf_promisc;
-}
-
-/**
- * ice_vc_cfg_promiscuous_mode_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure VF VSIs promiscuous mode
- */
-static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       bool rm_promisc, alluni = false, allmulti = false;
-       struct virtchnl_promisc_info *info =
-           (struct virtchnl_promisc_info *)msg;
-       struct ice_vsi_vlan_ops *vlan_ops;
-       int mcast_err = 0, ucast_err = 0;
-       struct ice_pf *pf = vf->pf;
-       struct ice_vsi *vsi;
-       struct device *dev;
-       int ret = 0;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       dev = ice_pf_to_dev(pf);
-       if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
-               dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
-                       vf->vf_id);
-               /* Leave v_ret alone, lie to the VF on purpose. */
-               goto error_param;
-       }
-
-       if (info->flags & FLAG_VF_UNICAST_PROMISC)
-               alluni = true;
-
-       if (info->flags & FLAG_VF_MULTICAST_PROMISC)
-               allmulti = true;
-
-       rm_promisc = !allmulti && !alluni;
-
-       vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
-       if (rm_promisc)
-               ret = vlan_ops->ena_rx_filtering(vsi);
-       else
-               ret = vlan_ops->dis_rx_filtering(vsi);
-       if (ret) {
-               dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
-               bool set_dflt_vsi = alluni || allmulti;
-
-               if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
-                       /* only attempt to set the default forwarding VSI if
-                        * it's not currently set
-                        */
-                       ret = ice_set_dflt_vsi(pf->first_sw, vsi);
-               else if (!set_dflt_vsi &&
-                        ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
-                       /* only attempt to free the default forwarding VSI if we
-                        * are the owner
-                        */
-                       ret = ice_clear_dflt_vsi(pf->first_sw);
-
-               if (ret) {
-                       dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
-                               set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
-                       v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-                       goto error_param;
-               }
-       } else {
-               u8 mcast_m, ucast_m;
-
-               if (ice_vf_is_port_vlan_ena(vf) ||
-                   ice_vsi_has_non_zero_vlans(vsi)) {
-                       mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
-                       ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
-               } else {
-                       mcast_m = ICE_MCAST_PROMISC_BITS;
-                       ucast_m = ICE_UCAST_PROMISC_BITS;
-               }
-
-               if (alluni)
-                       ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
-               else
-                       ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
-
-               if (allmulti)
-                       mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
-               else
-                       mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
-
-               if (ucast_err || mcast_err)
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-       }
-
-       if (!mcast_err) {
-               if (allmulti &&
-                   !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
-                       dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
-                                vf->vf_id);
-               else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
-                       dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
-                                vf->vf_id);
-       }
-
-       if (!ucast_err) {
-               if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
-                       dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
-                                vf->vf_id);
-               else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
-                       dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
-                                vf->vf_id);
-       }
-
-error_param:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
-                                    v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_get_stats_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to get VSI stats
- */
-static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_queue_select *vqs =
-               (struct virtchnl_queue_select *)msg;
-       struct ice_eth_stats stats = { 0 };
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       ice_update_eth_stats(vsi);
-
-       stats = vsi->eth_stats;
-
-error_param:
-       /* send the response to the VF */
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
-                                    (u8 *)&stats, sizeof(stats));
-}
-
-/**
- * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
- * @vqs: virtchnl_queue_select structure containing bitmaps to validate
- *
- * Return true on successful validation, else false
- */
-static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
-{
-       if ((!vqs->rx_queues && !vqs->tx_queues) ||
-           vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
-           vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
-               return false;
-
-       return true;
-}
-
-/**
- * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
-       struct ice_hw *hw = &vsi->back->hw;
-       u32 pfq = vsi->txq_map[q_idx];
-       u32 reg;
-
-       reg = rd32(hw, QINT_TQCTL(pfq));
-
-       /* MSI-X index 0 in the VF's space is always for the OICR, which means
-        * this is most likely a poll mode VF driver, so don't enable an
-        * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
-        */
-       if (!(reg & QINT_TQCTL_MSIX_INDX_M))
-               return;
-
-       wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
-       struct ice_hw *hw = &vsi->back->hw;
-       u32 pfq = vsi->rxq_map[q_idx];
-       u32 reg;
-
-       reg = rd32(hw, QINT_RQCTL(pfq));
-
-       /* MSI-X index 0 in the VF's space is always for the OICR, which means
-        * this is most likely a poll mode VF driver, so don't enable an
-        * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
-        */
-       if (!(reg & QINT_RQCTL_MSIX_INDX_M))
-               return;
-
-       wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vc_ena_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to enable all or specific queue(s)
- */
-static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_queue_select *vqs =
-           (struct virtchnl_queue_select *)msg;
-       struct ice_vsi *vsi;
-       unsigned long q_map;
-       u16 vf_q_id;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_validate_vqs_bitmaps(vqs)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       /* Enable only Rx rings, Tx rings were enabled by the FW when the
-        * Tx queue group list was configured and the context bits were
-        * programmed using ice_vsi_cfg_txqs
-        */
-       q_map = vqs->rx_queues;
-       for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
-               if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto error_param;
-               }
-
-               /* Skip queue if enabled */
-               if (test_bit(vf_q_id, vf->rxq_ena))
-                       continue;
-
-               if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
-                       dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
-                               vf_q_id, vsi->vsi_num);
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto error_param;
-               }
-
-               ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
-               set_bit(vf_q_id, vf->rxq_ena);
-       }
-
-       q_map = vqs->tx_queues;
-       for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
-               if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto error_param;
-               }
-
-               /* Skip queue if enabled */
-               if (test_bit(vf_q_id, vf->txq_ena))
-                       continue;
-
-               ice_vf_ena_txq_interrupt(vsi, vf_q_id);
-               set_bit(vf_q_id, vf->txq_ena);
-       }
-
-       /* Set flag to indicate that queues are enabled */
-       if (v_ret == VIRTCHNL_STATUS_SUCCESS)
-               set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
-       /* send the response to the VF */
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
-                                    NULL, 0);
-}
-
-/**
- * ice_vc_dis_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to disable all or specific
- * queue(s)
- */
-static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_queue_select *vqs =
-           (struct virtchnl_queue_select *)msg;
-       struct ice_vsi *vsi;
-       unsigned long q_map;
-       u16 vf_q_id;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
-           !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_validate_vqs_bitmaps(vqs)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (vqs->tx_queues) {
-               q_map = vqs->tx_queues;
-
-               for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
-                       struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
-                       struct ice_txq_meta txq_meta = { 0 };
-
-                       if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               goto error_param;
-                       }
-
-                       /* Skip queue if not enabled */
-                       if (!test_bit(vf_q_id, vf->txq_ena))
-                               continue;
-
-                       ice_fill_txq_meta(vsi, ring, &txq_meta);
-
-                       if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
-                                                ring, &txq_meta)) {
-                               dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
-                                       vf_q_id, vsi->vsi_num);
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               goto error_param;
-                       }
-
-                       /* Clear enabled queues flag */
-                       clear_bit(vf_q_id, vf->txq_ena);
-               }
-       }
-
-       q_map = vqs->rx_queues;
-       /* speed up Rx queue disable by batching them if possible */
-       if (q_map &&
-           bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
-               if (ice_vsi_stop_all_rx_rings(vsi)) {
-                       dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
-                               vsi->vsi_num);
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto error_param;
-               }
-
-               bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
-       } else if (q_map) {
-               for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
-                       if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               goto error_param;
-                       }
-
-                       /* Skip queue if not enabled */
-                       if (!test_bit(vf_q_id, vf->rxq_ena))
-                               continue;
-
-                       if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
-                                                    true)) {
-                               dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
-                                       vf_q_id, vsi->vsi_num);
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               goto error_param;
-                       }
-
-                       /* Clear enabled queues flag */
-                       clear_bit(vf_q_id, vf->rxq_ena);
-               }
-       }
-
-       /* Clear enabled queues flag */
-       if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
-               clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
-       /* send the response to the VF */
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
-                                    NULL, 0);
-}
-
-/**
- * ice_cfg_interrupt
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @vector_id: vector ID
- * @map: vector map for mapping vectors to queues
- * @q_vector: structure for interrupt vector
- * configure the IRQ to queue map
- */
-static int
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
-                 struct virtchnl_vector_map *map,
-                 struct ice_q_vector *q_vector)
-{
-       u16 vsi_q_id, vsi_q_id_idx;
-       unsigned long qmap;
-
-       q_vector->num_ring_rx = 0;
-       q_vector->num_ring_tx = 0;
-
-       qmap = map->rxq_map;
-       for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
-               vsi_q_id = vsi_q_id_idx;
-
-               if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
-                       return VIRTCHNL_STATUS_ERR_PARAM;
-
-               q_vector->num_ring_rx++;
-               q_vector->rx.itr_idx = map->rxitr_idx;
-               vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
-               ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
-                                     q_vector->rx.itr_idx);
-       }
-
-       qmap = map->txq_map;
-       for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
-               vsi_q_id = vsi_q_id_idx;
-
-               if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
-                       return VIRTCHNL_STATUS_ERR_PARAM;
-
-               q_vector->num_ring_tx++;
-               q_vector->tx.itr_idx = map->txitr_idx;
-               vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
-               ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
-                                     q_vector->tx.itr_idx);
-       }
-
-       return VIRTCHNL_STATUS_SUCCESS;
-}
-
-/**
- * ice_vc_cfg_irq_map_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the IRQ to queue map
- */
-static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       u16 num_q_vectors_mapped, vsi_id, vector_id;
-       struct virtchnl_irq_map_info *irqmap_info;
-       struct virtchnl_vector_map *map;
-       struct ice_pf *pf = vf->pf;
-       struct ice_vsi *vsi;
-       int i;
-
-       irqmap_info = (struct virtchnl_irq_map_info *)msg;
-       num_q_vectors_mapped = irqmap_info->num_vectors;
-
-       /* Check to make sure number of VF vectors mapped is not greater than
-        * number of VF vectors originally allocated, and check that
-        * there is actually at least a single VF queue vector mapped
-        */
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
-           pf->vfs.num_msix_per < num_q_vectors_mapped ||
-           !num_q_vectors_mapped) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       for (i = 0; i < num_q_vectors_mapped; i++) {
-               struct ice_q_vector *q_vector;
-
-               map = &irqmap_info->vecmap[i];
-
-               vector_id = map->vector_id;
-               vsi_id = map->vsi_id;
-               /* vector_id is always 0-based for each VF, and can never be
-                * larger than or equal to the max allowed interrupts per VF
-                */
-               if (!(vector_id < pf->vfs.num_msix_per) ||
-                   !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
-                   (!vector_id && (map->rxq_map || map->txq_map))) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto error_param;
-               }
-
-               /* No need to map VF miscellaneous or rogue vector */
-               if (!vector_id)
-                       continue;
-
-               /* Subtract non queue vector from vector_id passed by VF
-                * to get actual number of VSI queue vector array index
-                */
-               q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
-               if (!q_vector) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto error_param;
-               }
-
-               /* lookout for the invalid queue index */
-               v_ret = (enum virtchnl_status_code)
-                       ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
-               if (v_ret)
-                       goto error_param;
-       }
-
-error_param:
-       /* send the response to the VF */
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
-                                    NULL, 0);
-}
-
-/**
- * ice_vc_cfg_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the Rx/Tx queues
- */
-static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_vsi_queue_config_info *qci =
-           (struct virtchnl_vsi_queue_config_info *)msg;
-       struct virtchnl_queue_pair_info *qpi;
-       struct ice_pf *pf = vf->pf;
-       struct ice_vsi *vsi;
-       int i, q_idx;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
-           qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
-               dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
-                       vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       for (i = 0; i < qci->num_queue_pairs; i++) {
-               qpi = &qci->qpair[i];
-               if (qpi->txq.vsi_id != qci->vsi_id ||
-                   qpi->rxq.vsi_id != qci->vsi_id ||
-                   qpi->rxq.queue_id != qpi->txq.queue_id ||
-                   qpi->txq.headwb_enabled ||
-                   !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
-                   !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
-                   !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto error_param;
-               }
-
-               q_idx = qpi->rxq.queue_id;
-
-               /* make sure selected "q_idx" is in valid range of queues
-                * for selected "vsi"
-                */
-               if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto error_param;
-               }
-
-               /* copy Tx queue info from VF into VSI */
-               if (qpi->txq.ring_len > 0) {
-                       vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
-                       vsi->tx_rings[i]->count = qpi->txq.ring_len;
-                       if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               goto error_param;
-                       }
-               }
-
-               /* copy Rx queue info from VF into VSI */
-               if (qpi->rxq.ring_len > 0) {
-                       u16 max_frame_size = ice_vc_get_max_frame_size(vf);
-
-                       vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
-                       vsi->rx_rings[i]->count = qpi->rxq.ring_len;
-
-                       if (qpi->rxq.databuffer_size != 0 &&
-                           (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
-                            qpi->rxq.databuffer_size < 1024)) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               goto error_param;
-                       }
-                       vsi->rx_buf_len = qpi->rxq.databuffer_size;
-                       vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
-                       if (qpi->rxq.max_pkt_size > max_frame_size ||
-                           qpi->rxq.max_pkt_size < 64) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               goto error_param;
-                       }
-
-                       vsi->max_frame = qpi->rxq.max_pkt_size;
-                       /* add space for the port VLAN since the VF driver is not
-                        * expected to account for it in the MTU calculation
-                        */
-                       if (ice_vf_is_port_vlan_ena(vf))
-                               vsi->max_frame += VLAN_HLEN;
-
-                       if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               goto error_param;
-                       }
-               }
-       }
-
-error_param:
-       /* send the response to the VF */
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
-                                    NULL, 0);
-}
-
-/**
- * ice_is_vf_trusted
- * @vf: pointer to the VF info
- */
-static bool ice_is_vf_trusted(struct ice_vf *vf)
-{
-       return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
- * ice_can_vf_change_mac
- * @vf: pointer to the VF info
- *
- * Return true if the VF is allowed to change its MAC filters, false otherwise
- */
-static bool ice_can_vf_change_mac(struct ice_vf *vf)
-{
-       /* If the VF MAC address has been set administratively (via the
-        * ndo_set_vf_mac command), then deny permission to the VF to
-        * add/delete unicast MAC addresses, unless the VF is trusted
-        */
-       if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
-               return false;
-
-       return true;
-}
-
-/**
- * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
- * @vc_ether_addr: used to extract the type
- */
-static u8
-ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
-{
-       return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
-}
-
-/**
- * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
- * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
- */
-static bool
-ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
-{
-       u8 type = ice_vc_ether_addr_type(vc_ether_addr);
-
-       return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
-}
-
-/**
- * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
- * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
- *
- * This function should only be called when the MAC address in
- * virtchnl_ether_addr is a valid unicast MAC
- */
-static bool
-ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
-{
-       u8 type = ice_vc_ether_addr_type(vc_ether_addr);
-
-       return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
-}
-
-/**
- * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
- * @vf: VF to update
- * @vc_ether_addr: structure from VIRTCHNL with MAC to add
- */
-static void
-ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
-{
-       u8 *mac_addr = vc_ether_addr->addr;
-
-       if (!is_valid_ether_addr(mac_addr))
-               return;
-
-       /* only allow legacy VF drivers to set the device and hardware MAC if it
-        * is zero and allow new VF drivers to set the hardware MAC if the type
-        * was correctly specified over VIRTCHNL
-        */
-       if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
-            is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
-           ice_is_vc_addr_primary(vc_ether_addr)) {
-               ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
-               ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
-       }
-
-       /* hardware and device MACs are already set, but its possible that the
-        * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
-        * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
-        * away for the legacy VF driver case as it will be updated in the
-        * delete flow for this case
-        */
-       if (ice_is_vc_addr_legacy(vc_ether_addr)) {
-               ether_addr_copy(vf->legacy_last_added_umac.addr,
-                               mac_addr);
-               vf->legacy_last_added_umac.time_modified = jiffies;
-       }
-}
-
-/**
- * ice_vc_add_mac_addr - attempt to add the MAC address passed in
- * @vf: pointer to the VF info
- * @vsi: pointer to the VF's VSI
- * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
- */
-static int
-ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
-                   struct virtchnl_ether_addr *vc_ether_addr)
-{
-       struct device *dev = ice_pf_to_dev(vf->pf);
-       u8 *mac_addr = vc_ether_addr->addr;
-       int ret;
-
-       /* device MAC already added */
-       if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
-               return 0;
-
-       if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
-               dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
-               return -EPERM;
-       }
-
-       ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
-       if (ret == -EEXIST) {
-               dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
-                       vf->vf_id);
-               /* don't return since we might need to update
-                * the primary MAC in ice_vfhw_mac_add() below
-                */
-       } else if (ret) {
-               dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
-                       mac_addr, vf->vf_id, ret);
-               return ret;
-       } else {
-               vf->num_mac++;
-       }
-
-       ice_vfhw_mac_add(vf, vc_ether_addr);
-
-       return ret;
-}
-
-/**
- * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
- * @last_added_umac: structure used to check expiration
- */
-static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
-{
-#define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME   msecs_to_jiffies(3000)
-       return time_is_before_jiffies(last_added_umac->time_modified +
-                                     ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
-}
-
-/**
- * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
- * @vf: VF to update
- * @vc_ether_addr: structure from VIRTCHNL with MAC to check
- *
- * only update cached hardware MAC for legacy VF drivers on delete
- * because we cannot guarantee order/type of MAC from the VF driver
- */
-static void
-ice_update_legacy_cached_mac(struct ice_vf *vf,
-                            struct virtchnl_ether_addr *vc_ether_addr)
-{
-       if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
-           ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
-               return;
-
-       ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
-       ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
-}
-
-/**
- * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
- * @vf: VF to update
- * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
- */
-static void
-ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
-{
-       u8 *mac_addr = vc_ether_addr->addr;
-
-       if (!is_valid_ether_addr(mac_addr) ||
-           !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
-               return;
-
-       /* allow the device MAC to be repopulated in the add flow and don't
-        * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
-        * to be persistent on VM reboot and across driver unload/load, which
-        * won't work if we clear the hardware MAC here
-        */
-       eth_zero_addr(vf->dev_lan_addr.addr);
-
-       ice_update_legacy_cached_mac(vf, vc_ether_addr);
-}
-
-/**
- * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
- * @vf: pointer to the VF info
- * @vsi: pointer to the VF's VSI
- * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
- */
-static int
-ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
-                   struct virtchnl_ether_addr *vc_ether_addr)
-{
-       struct device *dev = ice_pf_to_dev(vf->pf);
-       u8 *mac_addr = vc_ether_addr->addr;
-       int status;
-
-       if (!ice_can_vf_change_mac(vf) &&
-           ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
-               return 0;
-
-       status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
-       if (status == -ENOENT) {
-               dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
-                       vf->vf_id);
-               return -ENOENT;
-       } else if (status) {
-               dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
-                       mac_addr, vf->vf_id, status);
-               return -EIO;
-       }
-
-       ice_vfhw_mac_del(vf, vc_ether_addr);
-
-       vf->num_mac--;
-
-       return 0;
-}
-
-/**
- * ice_vc_handle_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @set: true if MAC filters are being set, false otherwise
- *
- * add guest MAC address filter
- */
-static int
-ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
-{
-       int (*ice_vc_cfg_mac)
-               (struct ice_vf *vf, struct ice_vsi *vsi,
-                struct virtchnl_ether_addr *virtchnl_ether_addr);
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_ether_addr_list *al =
-           (struct virtchnl_ether_addr_list *)msg;
-       struct ice_pf *pf = vf->pf;
-       enum virtchnl_ops vc_op;
-       struct ice_vsi *vsi;
-       int i;
-
-       if (set) {
-               vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
-               ice_vc_cfg_mac = ice_vc_add_mac_addr;
-       } else {
-               vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
-               ice_vc_cfg_mac = ice_vc_del_mac_addr;
-       }
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
-           !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto handle_mac_exit;
-       }
-
-       /* If this VF is not privileged, then we can't add more than a
-        * limited number of addresses. Check to make sure that the
-        * additions do not push us over the limit.
-        */
-       if (set && !ice_is_vf_trusted(vf) &&
-           (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
-               dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
-                       vf->vf_id);
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto handle_mac_exit;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto handle_mac_exit;
-       }
-
-       for (i = 0; i < al->num_elements; i++) {
-               u8 *mac_addr = al->list[i].addr;
-               int result;
-
-               if (is_broadcast_ether_addr(mac_addr) ||
-                   is_zero_ether_addr(mac_addr))
-                       continue;
-
-               result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
-               if (result == -EEXIST || result == -ENOENT) {
-                       continue;
-               } else if (result) {
-                       v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-                       goto handle_mac_exit;
-               }
-       }
-
-handle_mac_exit:
-       /* send the response to the VF */
-       return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_add_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * add guest MAC address filter
- */
-static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
-{
-       return ice_vc_handle_mac_addr_msg(vf, msg, true);
-}
-
-/**
- * ice_vc_del_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * remove guest MAC address filter
- */
-static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
-{
-       return ice_vc_handle_mac_addr_msg(vf, msg, false);
-}
-
-/**
- * ice_vc_request_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
- * return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
- */
-static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_vf_res_request *vfres =
-               (struct virtchnl_vf_res_request *)msg;
-       u16 req_queues = vfres->num_queue_pairs;
-       struct ice_pf *pf = vf->pf;
-       u16 max_allowed_vf_queues;
-       u16 tx_rx_queue_left;
-       struct device *dev;
-       u16 cur_queues;
-
-       dev = ice_pf_to_dev(pf);
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       cur_queues = vf->num_vf_qs;
-       tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
-                                ice_get_avail_rxq_count(pf));
-       max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
-       if (!req_queues) {
-               dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
-                       vf->vf_id);
-       } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
-               dev_err(dev, "VF %d tried to request more than %d queues.\n",
-                       vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
-               vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
-       } else if (req_queues > cur_queues &&
-                  req_queues - cur_queues > tx_rx_queue_left) {
-               dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
-                        vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
-               vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
-                                              ICE_MAX_RSS_QS_PER_VF);
-       } else {
-               /* request is successful, then reset VF */
-               vf->num_req_qs = req_queues;
-               ice_vc_reset_vf(vf);
-               dev_info(dev, "VF %d granted request of %u queues.\n",
-                        vf->vf_id, req_queues);
-               return 0;
-       }
-
-error_param:
-       /* send the response to the VF */
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
-                                    v_ret, (u8 *)vfres, sizeof(*vfres));
-}
-
-/**
- * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
- * @hw: hardware structure used to check the VLAN mode
- * @vlan_proto: VLAN TPID being checked
- *
- * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
- * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
- * Mode (SVM), then only ETH_P_8021Q is supported.
- */
-static bool
-ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
-{
-       bool is_supported = false;
-
-       switch (vlan_proto) {
-       case ETH_P_8021Q:
-               is_supported = true;
-               break;
-       case ETH_P_8021AD:
-               if (ice_is_dvm_ena(hw))
-                       is_supported = true;
-               break;
-       }
-
-       return is_supported;
-}
-
-/**
- * ice_set_vf_port_vlan
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @vlan_id: VLAN ID being set
- * @qos: priority setting
- * @vlan_proto: VLAN protocol
- *
- * program VF Port VLAN ID and/or QoS
- */
-int
-ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
-                    __be16 vlan_proto)
-{
-       struct ice_pf *pf = ice_netdev_to_pf(netdev);
-       u16 local_vlan_proto = ntohs(vlan_proto);
-       struct device *dev;
-       struct ice_vf *vf;
-       int ret;
-
-       dev = ice_pf_to_dev(pf);
-
-       if (vlan_id >= VLAN_N_VID || qos > 7) {
-               dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
-                       vf_id, vlan_id, qos);
-               return -EINVAL;
-       }
-
-       if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
-               dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
-                       local_vlan_proto);
-               return -EPROTONOSUPPORT;
-       }
-
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf)
-               return -EINVAL;
-
-       ret = ice_check_vf_ready_for_cfg(vf);
-       if (ret)
-               goto out_put_vf;
-
-       if (ice_vf_get_port_vlan_prio(vf) == qos &&
-           ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
-           ice_vf_get_port_vlan_id(vf) == vlan_id) {
-               /* duplicate request, so just return success */
-               dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
-                       vlan_id, qos, local_vlan_proto);
-               ret = 0;
-               goto out_put_vf;
-       }
-
-       mutex_lock(&vf->cfg_lock);
-
-       vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
-       if (ice_vf_is_port_vlan_ena(vf))
-               dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
-                        vlan_id, qos, local_vlan_proto, vf_id);
-       else
-               dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
-
-       ice_vc_reset_vf(vf);
-       mutex_unlock(&vf->cfg_lock);
-
-out_put_vf:
-       ice_put_vf(vf);
-       return ret;
-}
-
-/**
- * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
- * @caps: VF driver negotiated capabilities
- *
- * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
- */
-static bool ice_vf_vlan_offload_ena(u32 caps)
-{
-       return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
-}
-
-/**
- * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
- * @vf: VF used to determine if VLAN promiscuous config is allowed
- */
-static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
-{
-       if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
-            test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
-           test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
-               return true;
-
-       return false;
-}
-
-/**
- * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
- * @vsi: VF's VSI used to enable VLAN promiscuous mode
- * @vlan: VLAN used to enable VLAN promiscuous
- *
- * This function should only be called if VLAN promiscuous mode is allowed,
- * which can be determined via ice_is_vlan_promisc_allowed().
- */
-static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
-{
-       u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
-       int status;
-
-       status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
-                                         vlan->vid);
-       if (status && status != -EEXIST)
-               return status;
-
-       return 0;
-}
-
-/**
- * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
- * @vsi: VF's VSI used to disable VLAN promiscuous mode for
- * @vlan: VLAN used to disable VLAN promiscuous
- *
- * This function should only be called if VLAN promiscuous mode is allowed,
- * which can be determined via ice_is_vlan_promisc_allowed().
- */
-static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
-{
-       u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
-       int status;
-
-       status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
-                                           vlan->vid);
-       if (status && status != -ENOENT)
-               return status;
-
-       return 0;
-}
-
-/**
- * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
- * @vf: VF to check against
- * @vsi: VF's VSI
- *
- * If the VF is trusted then the VF is allowed to add as many VLANs as it
- * wants to, so return false.
- *
- * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
- * allowed VLANs for an untrusted VF. Return the result of this comparison.
- */
-static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
-{
-       if (ice_is_vf_trusted(vf))
-               return false;
-
-#define ICE_VF_ADDED_VLAN_ZERO_FLTRS   1
-       return ((ice_vsi_num_non_zero_vlans(vsi) +
-               ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
-}
-
-/**
- * ice_vc_process_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @add_v: Add VLAN if true, otherwise delete VLAN
- *
- * Process virtchnl op to add or remove programmed guest VLAN ID
- */
-static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_vlan_filter_list *vfl =
-           (struct virtchnl_vlan_filter_list *)msg;
-       struct ice_pf *pf = vf->pf;
-       bool vlan_promisc = false;
-       struct ice_vsi *vsi;
-       struct device *dev;
-       int status = 0;
-       int i;
-
-       dev = ice_pf_to_dev(pf);
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       for (i = 0; i < vfl->num_elements; i++) {
-               if (vfl->vlan_id[i] >= VLAN_N_VID) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       dev_err(dev, "invalid VF VLAN id %d\n",
-                               vfl->vlan_id[i]);
-                       goto error_param;
-               }
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
-               dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
-                        vf->vf_id);
-               /* There is no need to let VF know about being not trusted,
-                * so we can just return success message here
-                */
-               goto error_param;
-       }
-
-       /* in DVM a VF can add/delete inner VLAN filters when
-        * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
-        */
-       if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       /* in DVM VLAN promiscuous is based on the outer VLAN, which would be
-        * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
-        * allow vlan_promisc = true in SVM and if no port VLAN is configured
-        */
-       vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
-               !ice_is_dvm_ena(&pf->hw) &&
-               !ice_vf_is_port_vlan_ena(vf);
-
-       if (add_v) {
-               for (i = 0; i < vfl->num_elements; i++) {
-                       u16 vid = vfl->vlan_id[i];
-                       struct ice_vlan vlan;
-
-                       if (ice_vf_has_max_vlans(vf, vsi)) {
-                               dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
-                                        vf->vf_id);
-                               /* There is no need to let VF know about being
-                                * not trusted, so we can just return success
-                                * message here as well.
-                                */
-                               goto error_param;
-                       }
-
-                       /* we add VLAN 0 by default for each VF so we can enable
-                        * Tx VLAN anti-spoof without triggering MDD events so
-                        * we don't need to add it again here
-                        */
-                       if (!vid)
-                               continue;
-
-                       vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
-                       status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
-                       if (status) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               goto error_param;
-                       }
-
-                       /* Enable VLAN filtering on first non-zero VLAN */
-                       if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
-                               if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
-                                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                                       dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
-                                               vid, status);
-                                       goto error_param;
-                               }
-                       } else if (vlan_promisc) {
-                               status = ice_vf_ena_vlan_promisc(vsi, &vlan);
-                               if (status) {
-                                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                                       dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
-                                               vid, status);
-                               }
-                       }
-               }
-       } else {
-               /* In case of non_trusted VF, number of VLAN elements passed
-                * to PF for removal might be greater than number of VLANs
-                * filter programmed for that VF - So, use actual number of
-                * VLANS added earlier with add VLAN opcode. In order to avoid
-                * removing VLAN that doesn't exist, which result to sending
-                * erroneous failed message back to the VF
-                */
-               int num_vf_vlan;
-
-               num_vf_vlan = vsi->num_vlan;
-               for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
-                       u16 vid = vfl->vlan_id[i];
-                       struct ice_vlan vlan;
-
-                       /* we add VLAN 0 by default for each VF so we can enable
-                        * Tx VLAN anti-spoof without triggering MDD events so
-                        * we don't want a VIRTCHNL request to remove it
-                        */
-                       if (!vid)
-                               continue;
-
-                       vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
-                       status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
-                       if (status) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                               goto error_param;
-                       }
-
-                       /* Disable VLAN filtering when only VLAN 0 is left */
-                       if (!ice_vsi_has_non_zero_vlans(vsi))
-                               vsi->inner_vlan_ops.dis_rx_filtering(vsi);
-
-                       if (vlan_promisc)
-                               ice_vf_dis_vlan_promisc(vsi, &vlan);
-               }
-       }
-
-error_param:
-       /* send the response to the VF */
-       if (add_v)
-               return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
-                                            NULL, 0);
-       else
-               return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
-                                            NULL, 0);
-}
-
-/**
- * ice_vc_add_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Add and program guest VLAN ID
- */
-static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
-{
-       return ice_vc_process_vlan_msg(vf, msg, true);
-}
-
-/**
- * ice_vc_remove_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * remove programmed guest VLAN ID
- */
-static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
-{
-       return ice_vc_process_vlan_msg(vf, msg, false);
-}
-
-/**
- * ice_vc_ena_vlan_stripping
- * @vf: pointer to the VF info
- *
- * Enable VLAN header stripping for a given VF
- */
-static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-error_param:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
-                                    v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_dis_vlan_stripping
- * @vf: pointer to the VF info
- *
- * Disable VLAN header stripping for a given VF
- */
-static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto error_param;
-       }
-
-       if (vsi->inner_vlan_ops.dis_stripping(vsi))
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-error_param:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
-                                    v_ret, NULL, 0);
-}
-
-/**
- * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
- * @vf: VF to enable/disable VLAN stripping for on initialization
- *
- * Set the default for VLAN stripping based on whether a port VLAN is configured
- * and the current VLAN mode of the device.
- */
-static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
-{
-       struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
-       if (!vsi)
-               return -EINVAL;
-
-       /* don't modify stripping if port VLAN is configured in SVM since the
-        * port VLAN is based on the inner/single VLAN in SVM
-        */
-       if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
-               return 0;
-
-       if (ice_vf_vlan_offload_ena(vf->driver_caps))
-               return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
-       else
-               return vsi->inner_vlan_ops.dis_stripping(vsi);
-}
-
-static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
-{
-       if (vf->trusted)
-               return VLAN_N_VID;
-       else
-               return ICE_MAX_VLAN_PER_VF;
-}
-
-/**
- * ice_vf_outer_vlan_not_allowed - check outer VLAN can be used when the device is in DVM
- * @vf: VF that being checked for
- */
-static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
-{
-       if (ice_vf_is_port_vlan_ena(vf))
-               return true;
-
-       return false;
-}
-
-/**
- * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
- * @vf: VF that capabilities are being set for
- * @caps: VLAN capabilities to populate
- *
- * Determine VLAN capabilities support based on whether a port VLAN is
- * configured. If a port VLAN is configured then the VF should use the inner
- * filtering/offload capabilities since the port VLAN is using the outer VLAN
- * capabilies.
- */
-static void
-ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
-{
-       struct virtchnl_vlan_supported_caps *supported_caps;
-
-       if (ice_vf_outer_vlan_not_allowed(vf)) {
-               /* until support for inner VLAN filtering is added when a port
-                * VLAN is configured, only support software offloaded inner
-                * VLANs when a port VLAN is confgured in DVM
-                */
-               supported_caps = &caps->filtering.filtering_support;
-               supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
-
-               supported_caps = &caps->offloads.stripping_support;
-               supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
-                                       VIRTCHNL_VLAN_TOGGLE |
-                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
-               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
-
-               supported_caps = &caps->offloads.insertion_support;
-               supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
-                                       VIRTCHNL_VLAN_TOGGLE |
-                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
-               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
-
-               caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
-               caps->offloads.ethertype_match =
-                       VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
-       } else {
-               supported_caps = &caps->filtering.filtering_support;
-               supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
-               supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
-                                       VIRTCHNL_VLAN_ETHERTYPE_88A8 |
-                                       VIRTCHNL_VLAN_ETHERTYPE_9100 |
-                                       VIRTCHNL_VLAN_ETHERTYPE_AND;
-               caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
-                                                VIRTCHNL_VLAN_ETHERTYPE_88A8 |
-                                                VIRTCHNL_VLAN_ETHERTYPE_9100;
-
-               supported_caps = &caps->offloads.stripping_support;
-               supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
-                                       VIRTCHNL_VLAN_ETHERTYPE_8100 |
-                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
-               supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
-                                       VIRTCHNL_VLAN_ETHERTYPE_8100 |
-                                       VIRTCHNL_VLAN_ETHERTYPE_88A8 |
-                                       VIRTCHNL_VLAN_ETHERTYPE_9100 |
-                                       VIRTCHNL_VLAN_ETHERTYPE_XOR |
-                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
-
-               supported_caps = &caps->offloads.insertion_support;
-               supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
-                                       VIRTCHNL_VLAN_ETHERTYPE_8100 |
-                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
-               supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
-                                       VIRTCHNL_VLAN_ETHERTYPE_8100 |
-                                       VIRTCHNL_VLAN_ETHERTYPE_88A8 |
-                                       VIRTCHNL_VLAN_ETHERTYPE_9100 |
-                                       VIRTCHNL_VLAN_ETHERTYPE_XOR |
-                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
-
-               caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
-
-               caps->offloads.ethertype_match =
-                       VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
-       }
-
-       caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
-}
-
-/**
- * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
- * @vf: VF that capabilities are being set for
- * @caps: VLAN capabilities to populate
- *
- * Determine VLAN capabilities support based on whether a port VLAN is
- * configured. If a port VLAN is configured then the VF does not have any VLAN
- * filtering or offload capabilities since the port VLAN is using the inner VLAN
- * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
- * VLAN fitlering and offload capabilities.
- */
-static void
-ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
-{
-       struct virtchnl_vlan_supported_caps *supported_caps;
-
-       if (ice_vf_is_port_vlan_ena(vf)) {
-               supported_caps = &caps->filtering.filtering_support;
-               supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
-               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
-
-               supported_caps = &caps->offloads.stripping_support;
-               supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
-               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
-
-               supported_caps = &caps->offloads.insertion_support;
-               supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
-               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
-
-               caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
-               caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
-               caps->filtering.max_filters = 0;
-       } else {
-               supported_caps = &caps->filtering.filtering_support;
-               supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
-               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
-               caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
-
-               supported_caps = &caps->offloads.stripping_support;
-               supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
-                                       VIRTCHNL_VLAN_TOGGLE |
-                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
-               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
-
-               supported_caps = &caps->offloads.insertion_support;
-               supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
-                                       VIRTCHNL_VLAN_TOGGLE |
-                                       VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
-               supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
-
-               caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
-               caps->offloads.ethertype_match =
-                       VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
-               caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
-       }
-}
-
-/**
- * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
- * @vf: VF to determine VLAN capabilities for
- *
- * This will only be called if the VF and PF successfully negotiated
- * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
- *
- * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
- * is configured or not.
- */
-static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_vlan_caps *caps = NULL;
-       int err, len = 0;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       caps = kzalloc(sizeof(*caps), GFP_KERNEL);
-       if (!caps) {
-               v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
-               goto out;
-       }
-       len = sizeof(*caps);
-
-       if (ice_is_dvm_ena(&vf->pf->hw))
-               ice_vc_set_dvm_caps(vf, caps);
-       else
-               ice_vc_set_svm_caps(vf, caps);
-
-       /* store negotiated caps to prevent invalid VF messages */
-       memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
-
-out:
-       err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
-                                   v_ret, (u8 *)caps, len);
-       kfree(caps);
-       return err;
-}
-
-/**
- * ice_vc_validate_vlan_tpid - validate VLAN TPID
- * @filtering_caps: negotiated/supported VLAN filtering capabilities
- * @tpid: VLAN TPID used for validation
- *
- * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
- * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
- */
-static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
-{
-       enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
-
-       switch (tpid) {
-       case ETH_P_8021Q:
-               vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
-               break;
-       case ETH_P_8021AD:
-               vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
-               break;
-       case ETH_P_QINQ1:
-               vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
-               break;
-       }
-
-       if (!(filtering_caps & vlan_ethertype))
-               return false;
-
-       return true;
-}
-
-/**
- * ice_vc_is_valid_vlan - validate the virtchnl_vlan
- * @vc_vlan: virtchnl_vlan to validate
- *
- * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
- * false. Otherwise return true.
- */
-static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
-{
-       if (!vc_vlan->tci || !vc_vlan->tpid)
-               return false;
-
-       return true;
-}
-
-/**
- * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
- * @vfc: negotiated/supported VLAN filtering capabilities
- * @vfl: VLAN filter list from VF to validate
- *
- * Validate all of the filters in the VLAN filter list from the VF. If any of
- * the checks fail then return false. Otherwise return true.
- */
-static bool
-ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
-                                struct virtchnl_vlan_filter_list_v2 *vfl)
-{
-       u16 i;
-
-       if (!vfl->num_elements)
-               return false;
-
-       for (i = 0; i < vfl->num_elements; i++) {
-               struct virtchnl_vlan_supported_caps *filtering_support =
-                       &vfc->filtering_support;
-               struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
-               struct virtchnl_vlan *outer = &vlan_fltr->outer;
-               struct virtchnl_vlan *inner = &vlan_fltr->inner;
-
-               if ((ice_vc_is_valid_vlan(outer) &&
-                    filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
-                   (ice_vc_is_valid_vlan(inner) &&
-                    filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
-                       return false;
-
-               if ((outer->tci_mask &&
-                    !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
-                   (inner->tci_mask &&
-                    !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
-                       return false;
-
-               if (((outer->tci & VLAN_PRIO_MASK) &&
-                    !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
-                   ((inner->tci & VLAN_PRIO_MASK) &&
-                    !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
-                       return false;
-
-               if ((ice_vc_is_valid_vlan(outer) &&
-                    !ice_vc_validate_vlan_tpid(filtering_support->outer, outer->tpid)) ||
-                   (ice_vc_is_valid_vlan(inner) &&
-                    !ice_vc_validate_vlan_tpid(filtering_support->inner, inner->tpid)))
-                       return false;
-       }
-
-       return true;
-}
-
-/**
- * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
- * @vc_vlan: struct virtchnl_vlan to transform
- */
-static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
-{
-       struct ice_vlan vlan = { 0 };
-
-       vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-       vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
-       vlan.tpid = vc_vlan->tpid;
-
-       return vlan;
-}
-
-/**
- * ice_vc_vlan_action - action to perform on the virthcnl_vlan
- * @vsi: VF's VSI used to perform the action
- * @vlan_action: function to perform the action with (i.e. add/del)
- * @vlan: VLAN filter to perform the action with
- */
-static int
-ice_vc_vlan_action(struct ice_vsi *vsi,
-                  int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
-                  struct ice_vlan *vlan)
-{
-       int err;
-
-       err = vlan_action(vsi, vlan);
-       if (err)
-               return err;
-
-       return 0;
-}
-
-/**
- * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
- * @vf: VF used to delete the VLAN(s)
- * @vsi: VF's VSI used to delete the VLAN(s)
- * @vfl: virthchnl filter list used to delete the filters
- */
-static int
-ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
-                struct virtchnl_vlan_filter_list_v2 *vfl)
-{
-       bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
-       int err;
-       u16 i;
-
-       for (i = 0; i < vfl->num_elements; i++) {
-               struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
-               struct virtchnl_vlan *vc_vlan;
-
-               vc_vlan = &vlan_fltr->outer;
-               if (ice_vc_is_valid_vlan(vc_vlan)) {
-                       struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
-
-                       err = ice_vc_vlan_action(vsi,
-                                                vsi->outer_vlan_ops.del_vlan,
-                                                &vlan);
-                       if (err)
-                               return err;
-
-                       if (vlan_promisc)
-                               ice_vf_dis_vlan_promisc(vsi, &vlan);
-               }
-
-               vc_vlan = &vlan_fltr->inner;
-               if (ice_vc_is_valid_vlan(vc_vlan)) {
-                       struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
-
-                       err = ice_vc_vlan_action(vsi,
-                                                vsi->inner_vlan_ops.del_vlan,
-                                                &vlan);
-                       if (err)
-                               return err;
-
-                       /* no support for VLAN promiscuous on inner VLAN unless
-                        * we are in Single VLAN Mode (SVM)
-                        */
-                       if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
-                               ice_vf_dis_vlan_promisc(vsi, &vlan);
-               }
-       }
-
-       return 0;
-}
-
-/**
- * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
- * @vf: VF the message was received from
- * @msg: message received from the VF
- */
-static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
-{
-       struct virtchnl_vlan_filter_list_v2 *vfl =
-               (struct virtchnl_vlan_filter_list_v2 *)msg;
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct ice_vsi *vsi;
-
-       if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
-                                             vfl)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       if (ice_vc_del_vlans(vf, vsi, vfl))
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-out:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
-                                    0);
-}
-
-/**
- * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
- * @vf: VF used to add the VLAN(s)
- * @vsi: VF's VSI used to add the VLAN(s)
- * @vfl: virthchnl filter list used to add the filters
- */
-static int
-ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
-                struct virtchnl_vlan_filter_list_v2 *vfl)
-{
-       bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
-       int err;
-       u16 i;
-
-       for (i = 0; i < vfl->num_elements; i++) {
-               struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
-               struct virtchnl_vlan *vc_vlan;
-
-               vc_vlan = &vlan_fltr->outer;
-               if (ice_vc_is_valid_vlan(vc_vlan)) {
-                       struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
-
-                       err = ice_vc_vlan_action(vsi,
-                                                vsi->outer_vlan_ops.add_vlan,
-                                                &vlan);
-                       if (err)
-                               return err;
-
-                       if (vlan_promisc) {
-                               err = ice_vf_ena_vlan_promisc(vsi, &vlan);
-                               if (err)
-                                       return err;
-                       }
-               }
-
-               vc_vlan = &vlan_fltr->inner;
-               if (ice_vc_is_valid_vlan(vc_vlan)) {
-                       struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
-
-                       err = ice_vc_vlan_action(vsi,
-                                                vsi->inner_vlan_ops.add_vlan,
-                                                &vlan);
-                       if (err)
-                               return err;
-
-                       /* no support for VLAN promiscuous on inner VLAN unless
-                        * we are in Single VLAN Mode (SVM)
-                        */
-                       if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
-                               err = ice_vf_ena_vlan_promisc(vsi, &vlan);
-                               if (err)
-                                       return err;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-/**
- * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
- * @vsi: VF VSI used to get number of existing VLAN filters
- * @vfc: negotiated/supported VLAN filtering capabilities
- * @vfl: VLAN filter list from VF to validate
- *
- * Validate all of the filters in the VLAN filter list from the VF during the
- * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
- * Otherwise return true.
- */
-static bool
-ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
-                                    struct virtchnl_vlan_filtering_caps *vfc,
-                                    struct virtchnl_vlan_filter_list_v2 *vfl)
-{
-       u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
-
-       if (num_requested_filters > vfc->max_filters)
-               return false;
-
-       return ice_vc_validate_vlan_filter_list(vfc, vfl);
-}
-
-/**
- * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
- * @vf: VF the message was received from
- * @msg: message received from the VF
- */
-static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_vlan_filter_list_v2 *vfl =
-               (struct virtchnl_vlan_filter_list_v2 *)msg;
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       if (!ice_vc_validate_add_vlan_filter_list(vsi,
-                                                 &vf->vlan_v2_caps.filtering,
-                                                 vfl)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       if (ice_vc_add_vlans(vf, vsi, vfl))
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-out:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
-                                    0);
-}
-
-/**
- * ice_vc_valid_vlan_setting - validate VLAN setting
- * @negotiated_settings: negotiated VLAN settings during VF init
- * @ethertype_setting: ethertype(s) requested for the VLAN setting
- */
-static bool
-ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
-{
-       if (ethertype_setting && !(negotiated_settings & ethertype_setting))
-               return false;
-
-       /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
-        * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
-        */
-       if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
-           hweight32(ethertype_setting) > 1)
-               return false;
-
-       /* ability to modify the VLAN setting was not negotiated */
-       if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
-               return false;
-
-       return true;
-}
-
-/**
- * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
- * @caps: negotiated VLAN settings during VF init
- * @msg: message to validate
- *
- * Used to validate any VLAN virtchnl message sent as a
- * virtchnl_vlan_setting structure. Validates the message against the
- * negotiated/supported caps during VF driver init.
- */
-static bool
-ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
-                             struct virtchnl_vlan_setting *msg)
-{
-       if ((!msg->outer_ethertype_setting &&
-            !msg->inner_ethertype_setting) ||
-           (!caps->outer && !caps->inner))
-               return false;
-
-       if (msg->outer_ethertype_setting &&
-           !ice_vc_valid_vlan_setting(caps->outer,
-                                      msg->outer_ethertype_setting))
-               return false;
-
-       if (msg->inner_ethertype_setting &&
-           !ice_vc_valid_vlan_setting(caps->inner,
-                                      msg->inner_ethertype_setting))
-               return false;
-
-       return true;
-}
-
-/**
- * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
- * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
- * @tpid: VLAN TPID to populate
- */
-static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
-{
-       switch (ethertype_setting) {
-       case VIRTCHNL_VLAN_ETHERTYPE_8100:
-               *tpid = ETH_P_8021Q;
-               break;
-       case VIRTCHNL_VLAN_ETHERTYPE_88A8:
-               *tpid = ETH_P_8021AD;
-               break;
-       case VIRTCHNL_VLAN_ETHERTYPE_9100:
-               *tpid = ETH_P_QINQ1;
-               break;
-       default:
-               *tpid = 0;
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/**
- * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
- * @vsi: VF's VSI used to enable the VLAN offload
- * @ena_offload: function used to enable the VLAN offload
- * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
- */
-static int
-ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
-                       int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
-                       u32 ethertype_setting)
-{
-       u16 tpid;
-       int err;
-
-       err = ice_vc_get_tpid(ethertype_setting, &tpid);
-       if (err)
-               return err;
-
-       err = ena_offload(vsi, tpid);
-       if (err)
-               return err;
-
-       return 0;
-}
-
-#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
-#define ICE_L2TSEL_BIT_OFFSET          23
-enum ice_l2tsel {
-       ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
-       ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
-};
-
-/**
- * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
- * @vsi: VSI used to update l2tsel on
- * @l2tsel: l2tsel setting requested
- *
- * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
- * This will modify which descriptor field the first offloaded VLAN will be
- * stripped into.
- */
-static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
-{
-       struct ice_hw *hw = &vsi->back->hw;
-       u32 l2tsel_bit;
-       int i;
-
-       if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
-               l2tsel_bit = 0;
-       else
-               l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
-
-       for (i = 0; i < vsi->alloc_rxq; i++) {
-               u16 pfq = vsi->rxq_map[i];
-               u32 qrx_context_offset;
-               u32 regval;
-
-               qrx_context_offset =
-                       QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
-
-               regval = rd32(hw, qrx_context_offset);
-               regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
-               regval |= l2tsel_bit;
-               wr32(hw, qrx_context_offset, regval);
-       }
-}
-
-/**
- * ice_vc_ena_vlan_stripping_v2_msg
- * @vf: VF the message was received from
- * @msg: message received from the VF
- *
- * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
- */
-static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_vlan_supported_caps *stripping_support;
-       struct virtchnl_vlan_setting *strip_msg =
-               (struct virtchnl_vlan_setting *)msg;
-       u32 ethertype_setting;
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
-       if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       ethertype_setting = strip_msg->outer_ethertype_setting;
-       if (ethertype_setting) {
-               if (ice_vc_ena_vlan_offload(vsi,
-                                           vsi->outer_vlan_ops.ena_stripping,
-                                           ethertype_setting)) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto out;
-               } else {
-                       enum ice_l2tsel l2tsel =
-                               ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
-
-                       /* PF tells the VF that the outer VLAN tag is always
-                        * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
-                        * inner is always extracted to
-                        * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
-                        * support outer stripping so the first tag always ends
-                        * up in L2TAG2_2ND and the second/inner tag, if
-                        * enabled, is extracted in L2TAG1.
-                        */
-                       ice_vsi_update_l2tsel(vsi, l2tsel);
-               }
-       }
-
-       ethertype_setting = strip_msg->inner_ethertype_setting;
-       if (ethertype_setting &&
-           ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
-                                   ethertype_setting)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-out:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_dis_vlan_stripping_v2_msg
- * @vf: VF the message was received from
- * @msg: message received from the VF
- *
- * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
- */
-static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_vlan_supported_caps *stripping_support;
-       struct virtchnl_vlan_setting *strip_msg =
-               (struct virtchnl_vlan_setting *)msg;
-       u32 ethertype_setting;
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
-       if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       ethertype_setting = strip_msg->outer_ethertype_setting;
-       if (ethertype_setting) {
-               if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-                       goto out;
-               } else {
-                       enum ice_l2tsel l2tsel =
-                               ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
-
-                       /* PF tells the VF that the outer VLAN tag is always
-                        * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
-                        * inner is always extracted to
-                        * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
-                        * support inner stripping while outer stripping is
-                        * disabled so that the first and only tag is extracted
-                        * in L2TAG1.
-                        */
-                       ice_vsi_update_l2tsel(vsi, l2tsel);
-               }
-       }
-
-       ethertype_setting = strip_msg->inner_ethertype_setting;
-       if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-out:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_ena_vlan_insertion_v2_msg
- * @vf: VF the message was received from
- * @msg: message received from the VF
- *
- * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
- */
-static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_vlan_supported_caps *insertion_support;
-       struct virtchnl_vlan_setting *insertion_msg =
-               (struct virtchnl_vlan_setting *)msg;
-       u32 ethertype_setting;
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
-       if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       ethertype_setting = insertion_msg->outer_ethertype_setting;
-       if (ethertype_setting &&
-           ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
-                                   ethertype_setting)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       ethertype_setting = insertion_msg->inner_ethertype_setting;
-       if (ethertype_setting &&
-           ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
-                                   ethertype_setting)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-out:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_dis_vlan_insertion_v2_msg
- * @vf: VF the message was received from
- * @msg: message received from the VF
- *
- * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
- */
-static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_vlan_supported_caps *insertion_support;
-       struct virtchnl_vlan_setting *insertion_msg =
-               (struct virtchnl_vlan_setting *)msg;
-       u32 ethertype_setting;
-       struct ice_vsi *vsi;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
-       if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       ethertype_setting = insertion_msg->outer_ethertype_setting;
-       if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-       ethertype_setting = insertion_msg->inner_ethertype_setting;
-       if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto out;
-       }
-
-out:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2, v_ret, NULL, 0);
-}
-
-static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
-       .get_ver_msg = ice_vc_get_ver_msg,
-       .get_vf_res_msg = ice_vc_get_vf_res_msg,
-       .reset_vf = ice_vc_reset_vf_msg,
-       .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
-       .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
-       .cfg_qs_msg = ice_vc_cfg_qs_msg,
-       .ena_qs_msg = ice_vc_ena_qs_msg,
-       .dis_qs_msg = ice_vc_dis_qs_msg,
-       .request_qs_msg = ice_vc_request_qs_msg,
-       .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
-       .config_rss_key = ice_vc_config_rss_key,
-       .config_rss_lut = ice_vc_config_rss_lut,
-       .get_stats_msg = ice_vc_get_stats_msg,
-       .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
-       .add_vlan_msg = ice_vc_add_vlan_msg,
-       .remove_vlan_msg = ice_vc_remove_vlan_msg,
-       .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
-       .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
-       .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
-       .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
-       .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
-       .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
-       .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
-       .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
-       .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
-       .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
-       .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
-       .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
-};
-
-void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
-{
-       *ops = ice_vc_vf_dflt_ops;
-}
-
-/**
- * ice_vc_repr_add_mac
- * @vf: pointer to VF
- * @msg: virtchannel message
- *
- * When port representors are created, we do not add MAC rule
- * to firmware, we store it so that PF could report same
- * MAC as VF.
- */
-static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
-{
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
-       struct virtchnl_ether_addr_list *al =
-           (struct virtchnl_ether_addr_list *)msg;
-       struct ice_vsi *vsi;
-       struct ice_pf *pf;
-       int i;
-
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
-           !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto handle_mac_exit;
-       }
-
-       pf = vf->pf;
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-               goto handle_mac_exit;
-       }
-
-       for (i = 0; i < al->num_elements; i++) {
-               u8 *mac_addr = al->list[i].addr;
-               int result;
-
-               if (!is_unicast_ether_addr(mac_addr) ||
-                   ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
-                       continue;
-
-               if (vf->pf_set_mac) {
-                       dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
-                       v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
-                       goto handle_mac_exit;
-               }
-
-               result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
-               if (result) {
-                       dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
-                               mac_addr, vf->vf_id, result);
-                       goto handle_mac_exit;
-               }
-
-               ice_vfhw_mac_add(vf, &al->list[i]);
-               vf->num_mac++;
-               break;
-       }
-
-handle_mac_exit:
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
-                                    v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_repr_del_mac - response with success for deleting MAC
- * @vf: pointer to VF
- * @msg: virtchannel message
- *
- * Respond with success to not break normal VF flow.
- * For legacy VF driver try to update cached MAC address.
- */
-static int
-ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
-{
-       struct virtchnl_ether_addr_list *al =
-               (struct virtchnl_ether_addr_list *)msg;
-
-       ice_update_legacy_cached_mac(vf, &al->list[0]);
-
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
-                                    VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
-{
-       dev_dbg(ice_pf_to_dev(vf->pf),
-               "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
-                                    VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
-{
-       dev_dbg(ice_pf_to_dev(vf->pf),
-               "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
-                                    VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
-{
-       dev_dbg(ice_pf_to_dev(vf->pf),
-               "Can't enable VLAN stripping in switchdev mode for VF %d\n",
-               vf->vf_id);
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
-                                    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
-                                    NULL, 0);
-}
-
-static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
-{
-       dev_dbg(ice_pf_to_dev(vf->pf),
-               "Can't disable VLAN stripping in switchdev mode for VF %d\n",
-               vf->vf_id);
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
-                                    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
-                                    NULL, 0);
-}
-
-static int
-ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
-{
-       dev_dbg(ice_pf_to_dev(vf->pf),
-               "Can't config promiscuous mode in switchdev mode for VF %d\n",
-               vf->vf_id);
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
-                                    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
-                                    NULL, 0);
-}
-
-void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
-{
-       ops->add_mac_addr_msg = ice_vc_repr_add_mac;
-       ops->del_mac_addr_msg = ice_vc_repr_del_mac;
-       ops->add_vlan_msg = ice_vc_repr_add_vlan;
-       ops->remove_vlan_msg = ice_vc_repr_del_vlan;
-       ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping;
-       ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping;
-       ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode;
-}
-
-/**
- * ice_vc_process_vf_msg - Process request from VF
- * @pf: pointer to the PF structure
- * @event: pointer to the AQ event
- *
- * called from the common asq/arq handler to
- * process request from VF
- */
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
-       u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
-       s16 vf_id = le16_to_cpu(event->desc.retval);
-       u16 msglen = event->msg_len;
-       struct ice_vc_vf_ops *ops;
-       u8 *msg = event->msg_buf;
-       struct ice_vf *vf = NULL;
-       struct device *dev;
-       int err = 0;
-
-       dev = ice_pf_to_dev(pf);
-
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf) {
-               dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
-                       vf_id, v_opcode, msglen);
-               return;
-       }
-
-       /* Check if VF is disabled. */
-       if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
-               err = -EPERM;
-               goto error_handler;
-       }
-
-       ops = &vf->vc_ops;
-
-       /* Perform basic checks on the msg */
-       err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
-       if (err) {
-               if (err == VIRTCHNL_STATUS_ERR_PARAM)
-                       err = -EPERM;
-               else
-                       err = -EINVAL;
-       }
-
-       if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
-               ice_vc_send_msg_to_vf(vf, v_opcode,
-                                     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
-                                     0);
-               ice_put_vf(vf);
-               return;
-       }
-
-error_handler:
-       if (err) {
-               ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
-                                     NULL, 0);
-               dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
-                       vf_id, v_opcode, msglen, err);
-               ice_put_vf(vf);
-               return;
-       }
-
-       /* VF is being configured in another context that triggers a VFR, so no
-        * need to process this message
-        */
-       if (!mutex_trylock(&vf->cfg_lock)) {
-               dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
-                        vf->vf_id);
-               ice_put_vf(vf);
-               return;
-       }
-
-       switch (v_opcode) {
-       case VIRTCHNL_OP_VERSION:
-               err = ops->get_ver_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_GET_VF_RESOURCES:
-               err = ops->get_vf_res_msg(vf, msg);
-               if (ice_vf_init_vlan_stripping(vf))
-                       dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
-                               vf->vf_id);
-               ice_vc_notify_vf_link_state(vf);
-               break;
-       case VIRTCHNL_OP_RESET_VF:
-               ops->reset_vf(vf);
-               break;
-       case VIRTCHNL_OP_ADD_ETH_ADDR:
-               err = ops->add_mac_addr_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_DEL_ETH_ADDR:
-               err = ops->del_mac_addr_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
-               err = ops->cfg_qs_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_ENABLE_QUEUES:
-               err = ops->ena_qs_msg(vf, msg);
-               ice_vc_notify_vf_link_state(vf);
-               break;
-       case VIRTCHNL_OP_DISABLE_QUEUES:
-               err = ops->dis_qs_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_REQUEST_QUEUES:
-               err = ops->request_qs_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_CONFIG_IRQ_MAP:
-               err = ops->cfg_irq_map_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_CONFIG_RSS_KEY:
-               err = ops->config_rss_key(vf, msg);
-               break;
-       case VIRTCHNL_OP_CONFIG_RSS_LUT:
-               err = ops->config_rss_lut(vf, msg);
-               break;
-       case VIRTCHNL_OP_GET_STATS:
-               err = ops->get_stats_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
-               err = ops->cfg_promiscuous_mode_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_ADD_VLAN:
-               err = ops->add_vlan_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_DEL_VLAN:
-               err = ops->remove_vlan_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
-               err = ops->ena_vlan_stripping(vf);
-               break;
-       case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
-               err = ops->dis_vlan_stripping(vf);
-               break;
-       case VIRTCHNL_OP_ADD_FDIR_FILTER:
-               err = ops->add_fdir_fltr_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_DEL_FDIR_FILTER:
-               err = ops->del_fdir_fltr_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_ADD_RSS_CFG:
-               err = ops->handle_rss_cfg_msg(vf, msg, true);
-               break;
-       case VIRTCHNL_OP_DEL_RSS_CFG:
-               err = ops->handle_rss_cfg_msg(vf, msg, false);
-               break;
-       case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
-               err = ops->get_offload_vlan_v2_caps(vf);
-               break;
-       case VIRTCHNL_OP_ADD_VLAN_V2:
-               err = ops->add_vlan_v2_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_DEL_VLAN_V2:
-               err = ops->remove_vlan_v2_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
-               err = ops->ena_vlan_stripping_v2_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
-               err = ops->dis_vlan_stripping_v2_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
-               err = ops->ena_vlan_insertion_v2_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
-               err = ops->dis_vlan_insertion_v2_msg(vf, msg);
-               break;
-       case VIRTCHNL_OP_UNKNOWN:
-       default:
-               dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
-                       vf_id);
-               err = ice_vc_send_msg_to_vf(vf, v_opcode,
-                                           VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
-                                           NULL, 0);
-               break;
-       }
-       if (err) {
-               /* Helper function cares less about error return values here
-                * as it is busy with pending work.
-                */
-               dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
-                        vf_id, v_opcode, err);
-       }
-
-       mutex_unlock(&vf->cfg_lock);
-       ice_put_vf(vf);
-}
-
-/**
- * ice_get_vf_cfg
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ivi: VF configuration structure
- *
- * return VF configuration
- */
-int
-ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
-{
-       struct ice_pf *pf = ice_netdev_to_pf(netdev);
-       struct ice_vf *vf;
-       int ret;
-
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf)
-               return -EINVAL;
-
-       ret = ice_check_vf_ready_for_cfg(vf);
-       if (ret)
-               goto out_put_vf;
-
-       ivi->vf = vf_id;
-       ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
-
-       /* VF configuration for VLAN and applicable QoS */
-       ivi->vlan = ice_vf_get_port_vlan_id(vf);
-       ivi->qos = ice_vf_get_port_vlan_prio(vf);
-       if (ice_vf_is_port_vlan_ena(vf))
-               ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
-
-       ivi->trusted = vf->trusted;
-       ivi->spoofchk = vf->spoofchk;
-       if (!vf->link_forced)
-               ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
-       else if (vf->link_up)
-               ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
-       else
-               ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
-       ivi->max_tx_rate = vf->max_tx_rate;
-       ivi->min_tx_rate = vf->min_tx_rate;
-
-out_put_vf:
-       ice_put_vf(vf);
-       return ret;
-}
-
-/**
- * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
- * @pf: PF used to reference the switch's rules
- * @umac: unicast MAC to compare against existing switch rules
- *
- * Return true on the first/any match, else return false
- */
-static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
-{
-       struct ice_sw_recipe *mac_recipe_list =
-               &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
-       struct ice_fltr_mgmt_list_entry *list_itr;
-       struct list_head *rule_head;
-       struct mutex *rule_lock; /* protect MAC filter list access */
-
-       rule_head = &mac_recipe_list->filt_rules;
-       rule_lock = &mac_recipe_list->filt_rule_lock;
-
-       mutex_lock(rule_lock);
-       list_for_each_entry(list_itr, rule_head, list_entry) {
-               u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
-
-               if (ether_addr_equal(existing_mac, umac)) {
-                       mutex_unlock(rule_lock);
-                       return true;
-               }
-       }
-
-       mutex_unlock(rule_lock);
-
-       return false;
-}
-
-/**
- * ice_set_vf_mac
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @mac: MAC address
- *
- * program VF MAC address
- */
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
-{
-       struct ice_pf *pf = ice_netdev_to_pf(netdev);
-       struct ice_vf *vf;
-       int ret;
-
-       if (is_multicast_ether_addr(mac)) {
-               netdev_err(netdev, "%pM not a valid unicast address\n", mac);
-               return -EINVAL;
-       }
-
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf)
-               return -EINVAL;
-
-       /* nothing left to do, unicast MAC already set */
-       if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
-           ether_addr_equal(vf->hw_lan_addr.addr, mac)) {
-               ret = 0;
-               goto out_put_vf;
-       }
-
-       ret = ice_check_vf_ready_for_cfg(vf);
-       if (ret)
-               goto out_put_vf;
-
-       if (ice_unicast_mac_exists(pf, mac)) {
-               netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
-                          mac, vf_id, mac);
-               ret = -EINVAL;
-               goto out_put_vf;
-       }
-
-       mutex_lock(&vf->cfg_lock);
-
-       /* VF is notified of its new MAC via the PF's response to the
-        * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
-        */
-       ether_addr_copy(vf->dev_lan_addr.addr, mac);
-       ether_addr_copy(vf->hw_lan_addr.addr, mac);
-       if (is_zero_ether_addr(mac)) {
-               /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
-               vf->pf_set_mac = false;
-               netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
-                           vf->vf_id);
-       } else {
-               /* PF will add MAC rule for the VF */
-               vf->pf_set_mac = true;
-               netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
-                           mac, vf_id);
-       }
-
-       ice_vc_reset_vf(vf);
-       mutex_unlock(&vf->cfg_lock);
-
-out_put_vf:
-       ice_put_vf(vf);
-       return ret;
-}
-
-/**
- * ice_set_vf_trust
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @trusted: Boolean value to enable/disable trusted VF
- *
- * Enable or disable a given VF as trusted
- */
-int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
-{
-       struct ice_pf *pf = ice_netdev_to_pf(netdev);
-       struct ice_vf *vf;
-       int ret;
-
-       if (ice_is_eswitch_mode_switchdev(pf)) {
-               dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
-               return -EOPNOTSUPP;
-       }
-
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf)
-               return -EINVAL;
-
-       ret = ice_check_vf_ready_for_cfg(vf);
-       if (ret)
-               goto out_put_vf;
-
-       /* Check if already trusted */
-       if (trusted == vf->trusted) {
-               ret = 0;
-               goto out_put_vf;
-       }
-
-       mutex_lock(&vf->cfg_lock);
-
-       vf->trusted = trusted;
-       ice_vc_reset_vf(vf);
-       dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
-                vf_id, trusted ? "" : "un");
-
-       mutex_unlock(&vf->cfg_lock);
-
-out_put_vf:
-       ice_put_vf(vf);
-       return ret;
-}
-
-/**
- * ice_set_vf_link_state
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @link_state: required link state
- *
- * Set VF's link state, irrespective of physical link state status
- */
-int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
-{
-       struct ice_pf *pf = ice_netdev_to_pf(netdev);
-       struct ice_vf *vf;
-       int ret;
-
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf)
-               return -EINVAL;
-
-       ret = ice_check_vf_ready_for_cfg(vf);
-       if (ret)
-               goto out_put_vf;
-
-       switch (link_state) {
-       case IFLA_VF_LINK_STATE_AUTO:
-               vf->link_forced = false;
-               break;
-       case IFLA_VF_LINK_STATE_ENABLE:
-               vf->link_forced = true;
-               vf->link_up = true;
-               break;
-       case IFLA_VF_LINK_STATE_DISABLE:
-               vf->link_forced = true;
-               vf->link_up = false;
-               break;
-       default:
-               ret = -EINVAL;
-               goto out_put_vf;
-       }
-
-       ice_vc_notify_vf_link_state(vf);
-
-out_put_vf:
-       ice_put_vf(vf);
-       return ret;
-}
-
-/**
- * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
- * @pf: PF associated with VFs
- */
-static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
-{
-       struct ice_vf *vf;
-       unsigned int bkt;
-       int rate = 0;
-
-       rcu_read_lock();
-       ice_for_each_vf_rcu(pf, bkt, vf)
-               rate += vf->min_tx_rate;
-       rcu_read_unlock();
-
-       return rate;
-}
-
-/**
- * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
- * @vf: VF trying to configure min_tx_rate
- * @min_tx_rate: min Tx rate in Mbps
- *
- * Check if the min_tx_rate being passed in will cause oversubscription of total
- * min_tx_rate based on the current link speed and all other VFs configured
- * min_tx_rate
- *
- * Return true if the passed min_tx_rate would cause oversubscription, else
- * return false
- */
-static bool
-ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
-{
-       int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
-       int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
-
-       /* this VF's previous rate is being overwritten */
-       all_vfs_min_tx_rate -= vf->min_tx_rate;
-
-       if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
-               dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
-                       min_tx_rate, vf->vf_id,
-                       all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
-                       link_speed_mbps);
-               return true;
-       }
-
-       return false;
-}
-
-/**
- * ice_set_vf_bw - set min/max VF bandwidth
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @min_tx_rate: Minimum Tx rate in Mbps
- * @max_tx_rate: Maximum Tx rate in Mbps
- */
-int
-ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
-             int max_tx_rate)
-{
-       struct ice_pf *pf = ice_netdev_to_pf(netdev);
-       struct ice_vsi *vsi;
-       struct device *dev;
-       struct ice_vf *vf;
-       int ret;
-
-       dev = ice_pf_to_dev(pf);
-
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf)
-               return -EINVAL;
-
-       ret = ice_check_vf_ready_for_cfg(vf);
-       if (ret)
-               goto out_put_vf;
-
-       vsi = ice_get_vf_vsi(vf);
-
-       /* when max_tx_rate is zero that means no max Tx rate limiting, so only
-        * check if max_tx_rate is non-zero
-        */
-       if (max_tx_rate && min_tx_rate > max_tx_rate) {
-               dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
-                       min_tx_rate, max_tx_rate);
-               ret = -EINVAL;
-               goto out_put_vf;
-       }
-
-       if (min_tx_rate && ice_is_dcb_active(pf)) {
-               dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
-               ret = -EOPNOTSUPP;
-               goto out_put_vf;
-       }
-
-       if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
-               ret = -EINVAL;
-               goto out_put_vf;
-       }
-
-       if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
-               ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
-               if (ret) {
-                       dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
-                               vf->vf_id);
-                       goto out_put_vf;
-               }
-
-               vf->min_tx_rate = min_tx_rate;
-       }
-
-       if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
-               ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
-               if (ret) {
-                       dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
-                               vf->vf_id);
-                       goto out_put_vf;
-               }
-
-               vf->max_tx_rate = max_tx_rate;
-       }
-
-out_put_vf:
-       ice_put_vf(vf);
-       return ret;
-}
-
-/**
- * ice_get_vf_stats - populate some stats for the VF
- * @netdev: the netdev of the PF
- * @vf_id: the host OS identifier (0-255)
- * @vf_stats: pointer to the OS memory to be initialized
- */
-int ice_get_vf_stats(struct net_device *netdev, int vf_id,
-                    struct ifla_vf_stats *vf_stats)
-{
-       struct ice_pf *pf = ice_netdev_to_pf(netdev);
-       struct ice_eth_stats *stats;
-       struct ice_vsi *vsi;
-       struct ice_vf *vf;
-       int ret;
-
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf)
-               return -EINVAL;
-
-       ret = ice_check_vf_ready_for_cfg(vf);
-       if (ret)
-               goto out_put_vf;
-
-       vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               ret = -EINVAL;
-               goto out_put_vf;
-       }
-
-       ice_update_eth_stats(vsi);
-       stats = &vsi->eth_stats;
-
-       memset(vf_stats, 0, sizeof(*vf_stats));
-
-       vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
-               stats->rx_multicast;
-       vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
-               stats->tx_multicast;
-       vf_stats->rx_bytes   = stats->rx_bytes;
-       vf_stats->tx_bytes   = stats->tx_bytes;
-       vf_stats->broadcast  = stats->rx_broadcast;
-       vf_stats->multicast  = stats->rx_multicast;
-       vf_stats->rx_dropped = stats->rx_discards;
-       vf_stats->tx_dropped = stats->tx_discards;
-
-out_put_vf:
-       ice_put_vf(vf);
-       return ret;
-}
-
-/**
- * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
- * @vf: pointer to the VF structure
- */
-void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
-{
-       struct ice_pf *pf = vf->pf;
-       struct device *dev;
-
-       dev = ice_pf_to_dev(pf);
-
-       dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
-                vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
-                vf->dev_lan_addr.addr,
-                test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
-                         ? "on" : "off");
-}
-
-/**
- * ice_print_vfs_mdd_events - print VFs malicious driver detect event
- * @pf: pointer to the PF structure
- *
- * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
- */
-void ice_print_vfs_mdd_events(struct ice_pf *pf)
-{
-       struct device *dev = ice_pf_to_dev(pf);
-       struct ice_hw *hw = &pf->hw;
-       struct ice_vf *vf;
-       unsigned int bkt;
-
-       /* check that there are pending MDD events to print */
-       if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
-               return;
-
-       /* VF MDD event logs are rate limited to one second intervals */
-       if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
-               return;
-
-       pf->vfs.last_printed_mdd_jiffies = jiffies;
-
-       mutex_lock(&pf->vfs.table_lock);
-       ice_for_each_vf(pf, bkt, vf) {
-               /* only print Rx MDD event message if there are new events */
-               if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
-                       vf->mdd_rx_events.last_printed =
-                                                       vf->mdd_rx_events.count;
-                       ice_print_vf_rx_mdd_event(vf);
-               }
-
-               /* only print Tx MDD event message if there are new events */
-               if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
-                       vf->mdd_tx_events.last_printed =
-                                                       vf->mdd_tx_events.count;
-
-                       dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
-                                vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
-                                vf->dev_lan_addr.addr);
-               }
-       }
-       mutex_unlock(&pf->vfs.table_lock);
-}
-
-/**
- * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
- * @pdev: pointer to a pci_dev structure
- *
- * Called when recovering from a PF FLR to restore interrupt capability to
- * the VFs.
- */
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
-{
-       u16 vf_id;
-       int pos;
-
-       if (!pci_num_vf(pdev))
-               return;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
-       if (pos) {
-               struct pci_dev *vfdev;
-
-               pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
-                                    &vf_id);
-               vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
-               while (vfdev) {
-                       if (vfdev->is_virtfn && vfdev->physfn == pdev)
-                               pci_restore_msi_state(vfdev);
-                       vfdev = pci_get_device(pdev->vendor, vf_id,
-                                              vfdev);
-               }
-       }
-}
-
-/**
- * ice_is_malicious_vf - helper function to detect a malicious VF
- * @pf: ptr to struct ice_pf
- * @event: pointer to the AQ event
- * @num_msg_proc: the number of messages processed so far
- * @num_msg_pending: the number of messages peinding in admin queue
- */
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
-                   u16 num_msg_proc, u16 num_msg_pending)
-{
-       s16 vf_id = le16_to_cpu(event->desc.retval);
-       struct device *dev = ice_pf_to_dev(pf);
-       struct ice_mbx_data mbxdata;
-       bool malvf = false;
-       struct ice_vf *vf;
-       int status;
-
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf)
-               return false;
-
-       if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
-               goto out_put_vf;
-
-       mbxdata.num_msg_proc = num_msg_proc;
-       mbxdata.num_pending_arq = num_msg_pending;
-       mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
-#define ICE_MBX_OVERFLOW_WATERMARK 64
-       mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
-
-       /* check to see if we have a malicious VF */
-       status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
-       if (status)
-               goto out_put_vf;
-
-       if (malvf) {
-               bool report_vf = false;
-
-               /* if the VF is malicious and we haven't let the user
-                * know about it, then let them know now
-                */
-               status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs,
-                                             ICE_MAX_VF_COUNT, vf_id,
-                                             &report_vf);
-               if (status)
-                       dev_dbg(dev, "Error reporting malicious VF\n");
-
-               if (report_vf) {
-                       struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
-
-                       if (pf_vsi)
-                               dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
-                                        &vf->dev_lan_addr.addr[0],
-                                        pf_vsi->netdev->dev_addr);
-               }
-       }
-
-out_put_vf:
-       ice_put_vf(vf);
-       return malvf;
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
deleted file mode 100644 (file)
index 02e3d30..0000000
+++ /dev/null
@@ -1,440 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
-
-#ifndef _ICE_VIRTCHNL_PF_H_
-#define _ICE_VIRTCHNL_PF_H_
-#include "ice.h"
-#include "ice_virtchnl_fdir.h"
-#include "ice_vsi_vlan_ops.h"
-
-/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
-#define ICE_MAX_VLAN_PER_VF            8
-/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
- * broadcast, and 16 for additional unicast/multicast filters
- */
-#define ICE_MAX_MACADDR_PER_VF         18
-
-/* Malicious Driver Detection */
-#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED                10
-#define ICE_MDD_EVENTS_THRESHOLD               30
-
-/* Static VF transaction/status register def */
-#define VF_DEVICE_STATUS               0xAA
-#define VF_TRANS_PENDING_M             0x20
-
-/* wait defines for polling PF_PCI_CIAD register status */
-#define ICE_PCI_CIAD_WAIT_COUNT                100
-#define ICE_PCI_CIAD_WAIT_DELAY_US     1
-
-/* VF resource constraints */
-#define ICE_MAX_VF_COUNT               256
-#define ICE_MIN_QS_PER_VF              1
-#define ICE_NONQ_VECS_VF               1
-#define ICE_MAX_SCATTER_QS_PER_VF      16
-#define ICE_MAX_RSS_QS_PER_VF          16
-#define ICE_NUM_VF_MSIX_MED            17
-#define ICE_NUM_VF_MSIX_SMALL          5
-#define ICE_NUM_VF_MSIX_MULTIQ_MIN     3
-#define ICE_MIN_INTR_PER_VF            (ICE_MIN_QS_PER_VF + 1)
-#define ICE_MAX_VF_RESET_TRIES         40
-#define ICE_MAX_VF_RESET_SLEEP_MS      20
-
-/* VF Hash Table access functions
- *
- * These functions provide abstraction for interacting with the VF hash table.
- * In general, direct access to the hash table should be avoided outside of
- * these functions where possible.
- *
- * The VF entries in the hash table are protected by reference counting to
- * track lifetime of accesses from the table. The ice_get_vf_by_id() function
- * obtains a reference to the VF structure which must be dropped by using
- * ice_put_vf().
- */
-
-/**
- * ice_for_each_vf - Iterate over each VF entry
- * @pf: pointer to the PF private structure
- * @bkt: bucket index used for iteration
- * @vf: pointer to the VF entry currently being processed in the loop.
- *
- * The bkt variable is an unsigned integer iterator used to traverse the VF
- * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
- * Use vf->vf_id to get the id number if needed.
- *
- * The caller is expected to be under the table_lock mutex for the entire
- * loop. Use this iterator if your loop is long or if it might sleep.
- */
-#define ice_for_each_vf(pf, bkt, vf) \
-       hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
-
-/**
- * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU
- * @pf: pointer to the PF private structure
- * @bkt: bucket index used for iteration
- * @vf: pointer to the VF entry currently being processed in the loop.
- *
- * The bkt variable is an unsigned integer iterator used to traverse the VF
- * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
- * Use vf->vf_id to get the id number if needed.
- *
- * The caller is expected to be under rcu_read_lock() for the entire loop.
- * Only use this iterator if your loop is short and you can guarantee it does
- * not sleep.
- */
-#define ice_for_each_vf_rcu(pf, bkt, vf) \
-       hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
-
-/* Specific VF states */
-enum ice_vf_states {
-       ICE_VF_STATE_INIT = 0,          /* PF is initializing VF */
-       ICE_VF_STATE_ACTIVE,            /* VF resources are allocated for use */
-       ICE_VF_STATE_QS_ENA,            /* VF queue(s) enabled */
-       ICE_VF_STATE_DIS,
-       ICE_VF_STATE_MC_PROMISC,
-       ICE_VF_STATE_UC_PROMISC,
-       ICE_VF_STATES_NBITS
-};
-
-/* VF capabilities */
-enum ice_virtchnl_cap {
-       ICE_VIRTCHNL_VF_CAP_L2 = 0,
-       ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
-};
-
-struct ice_time_mac {
-       unsigned long time_modified;
-       u8 addr[ETH_ALEN];
-};
-
-/* VF MDD events print structure */
-struct ice_mdd_vf_events {
-       u16 count;                      /* total count of Rx|Tx events */
-       /* count number of the last printed event */
-       u16 last_printed;
-};
-
-struct ice_vf;
-
-struct ice_vc_vf_ops {
-       int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
-       int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
-       void (*reset_vf)(struct ice_vf *vf);
-       int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
-       int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
-       int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
-       int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
-       int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
-       int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
-       int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
-       int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
-       int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
-       int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
-       int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
-       int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
-       int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
-       int (*ena_vlan_stripping)(struct ice_vf *vf);
-       int (*dis_vlan_stripping)(struct ice_vf *vf);
-       int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
-       int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
-       int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
-       int (*get_offload_vlan_v2_caps)(struct ice_vf *vf);
-       int (*add_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
-       int (*remove_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
-       int (*ena_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
-       int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
-       int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
-       int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
-};
-
-/* Virtchnl/SR-IOV config info */
-struct ice_vfs {
-       DECLARE_HASHTABLE(table, 8);    /* table of VF entries */
-       struct mutex table_lock;        /* Lock for protecting the hash table */
-       u16 num_supported;              /* max supported VFs on this PF */
-       u16 num_qps_per;                /* number of queue pairs per VF */
-       u16 num_msix_per;               /* number of MSI-X vectors per VF */
-       unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
-       DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); /* malicious VF indicator */
-};
-
-/* VF information structure */
-struct ice_vf {
-       struct hlist_node entry;
-       struct rcu_head rcu;
-       struct kref refcnt;
-       struct ice_pf *pf;
-
-       /* Used during virtchnl message handling and NDO ops against the VF
-        * that will trigger a VFR
-        */
-       struct mutex cfg_lock;
-
-       u16 vf_id;                      /* VF ID in the PF space */
-       u16 lan_vsi_idx;                /* index into PF struct */
-       u16 ctrl_vsi_idx;
-       struct ice_vf_fdir fdir;
-       /* first vector index of this VF in the PF space */
-       int first_vector_idx;
-       struct ice_sw *vf_sw_id;        /* switch ID the VF VSIs connect to */
-       struct virtchnl_version_info vf_ver;
-       u32 driver_caps;                /* reported by VF driver */
-       struct virtchnl_ether_addr dev_lan_addr;
-       struct virtchnl_ether_addr hw_lan_addr;
-       struct ice_time_mac legacy_last_added_umac;
-       DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
-       DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
-       struct ice_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */
-       struct virtchnl_vlan_caps vlan_v2_caps;
-       u8 pf_set_mac:1;                /* VF MAC address set by VMM admin */
-       u8 trusted:1;
-       u8 spoofchk:1;
-       u8 link_forced:1;
-       u8 link_up:1;                   /* only valid if VF link is forced */
-       /* VSI indices - actual VSI pointers are maintained in the PF structure
-        * When assigned, these will be non-zero, because VSI 0 is always
-        * the main LAN VSI for the PF.
-        */
-       u16 lan_vsi_num;                /* ID as used by firmware */
-       unsigned int min_tx_rate;       /* Minimum Tx bandwidth limit in Mbps */
-       unsigned int max_tx_rate;       /* Maximum Tx bandwidth limit in Mbps */
-       DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
-
-       u64 num_inval_msgs;             /* number of continuous invalid msgs */
-       u64 num_valid_msgs;             /* number of valid msgs detected */
-       unsigned long vf_caps;          /* VF's adv. capabilities */
-       u8 num_req_qs;                  /* num of queue pairs requested by VF */
-       u16 num_mac;
-       u16 num_vf_qs;                  /* num of queue configured per VF */
-       struct ice_mdd_vf_events mdd_rx_events;
-       struct ice_mdd_vf_events mdd_tx_events;
-       DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
-
-       struct ice_repr *repr;
-
-       struct ice_vc_vf_ops vc_ops;
-
-       /* devlink port data */
-       struct devlink_port devlink_port;
-};
-
-#ifdef CONFIG_PCI_IOV
-struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
-void ice_put_vf(struct ice_vf *vf);
-bool ice_has_vfs(struct ice_pf *pf);
-u16 ice_get_num_vfs(struct ice_pf *pf);
-struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
-void ice_process_vflr_event(struct ice_pf *pf);
-int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int
-ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
-
-void ice_free_vfs(struct ice_pf *pf);
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
-void ice_vc_notify_link_state(struct ice_pf *pf);
-void ice_vc_notify_reset(struct ice_pf *pf);
-void ice_vc_notify_vf_link_state(struct ice_vf *vf);
-void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops);
-void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops);
-bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
-bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
-                   u16 num_msg_proc, u16 num_msg_pending);
-
-int
-ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
-                    __be16 vlan_proto);
-
-int
-ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
-             int max_tx_rate);
-
-int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
-
-int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
-
-int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
-
-bool ice_is_vf_disabled(struct ice_vf *vf);
-
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
-
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
-
-void ice_set_vf_state_qs_dis(struct ice_vf *vf);
-int
-ice_get_vf_stats(struct net_device *netdev, int vf_id,
-                struct ifla_vf_stats *vf_stats);
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
-void
-ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
-void ice_print_vfs_mdd_events(struct ice_pf *pf);
-void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
-struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
-int
-ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
-                     enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
-bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
-bool ice_vf_is_port_vlan_ena(struct ice_vf *vf);
-#else /* CONFIG_PCI_IOV */
-static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
-{
-       return NULL;
-}
-
-static inline void ice_put_vf(struct ice_vf *vf)
-{
-}
-
-static inline bool ice_has_vfs(struct ice_pf *pf)
-{
-       return false;
-}
-
-static inline u16 ice_get_num_vfs(struct ice_pf *pf)
-{
-       return 0;
-}
-
-static inline void ice_process_vflr_event(struct ice_pf *pf) { }
-static inline void ice_free_vfs(struct ice_pf *pf) { }
-static inline
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
-static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
-static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
-static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { }
-static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { }
-static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
-static inline
-void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
-static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
-static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
-
-static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline bool ice_is_vf_disabled(struct ice_vf *vf)
-{
-       return true;
-}
-
-static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
-{
-       return NULL;
-}
-
-static inline bool
-ice_is_malicious_vf(struct ice_pf __always_unused *pf,
-                   struct ice_rq_event_info __always_unused *event,
-                   u16 __always_unused num_msg_proc,
-                   u16 __always_unused num_msg_pending)
-{
-       return false;
-}
-
-static inline bool
-ice_reset_all_vfs(struct ice_pf __always_unused *pf,
-                 bool __always_unused is_vflr)
-{
-       return true;
-}
-
-static inline bool
-ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
-{
-       return true;
-}
-
-static inline int
-ice_sriov_configure(struct pci_dev __always_unused *pdev,
-                   int __always_unused num_vfs)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_mac(struct net_device __always_unused *netdev,
-              int __always_unused vf_id, u8 __always_unused *mac)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int
-ice_get_vf_cfg(struct net_device __always_unused *netdev,
-              int __always_unused vf_id,
-              struct ifla_vf_info __always_unused *ivi)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_trust(struct net_device __always_unused *netdev,
-                int __always_unused vf_id, bool __always_unused trusted)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
-                    int __always_unused vf_id, u16 __always_unused vid,
-                    u8 __always_unused qos, __be16 __always_unused v_proto)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
-                   int __always_unused vf_id, bool __always_unused ena)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_link_state(struct net_device __always_unused *netdev,
-                     int __always_unused vf_id, int __always_unused link_state)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_bw(struct net_device __always_unused *netdev,
-             int __always_unused vf_id, int __always_unused min_tx_rate,
-             int __always_unused max_tx_rate)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int
-ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
-                   struct ice_q_vector __always_unused *q_vector)
-{
-       return 0;
-}
-
-static inline int
-ice_get_vf_stats(struct net_device __always_unused *netdev,
-                int __always_unused vf_id,
-                struct ifla_vf_stats __always_unused *vf_stats)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf)
-{
-       return false;
-}
-
-static inline bool ice_vf_is_port_vlan_ena(struct ice_vf __always_unused *vf)
-{
-       return false;
-}
-#endif /* CONFIG_PCI_IOV */
-#endif /* _ICE_VIRTCHNL_PF_H_ */
index 0cbb579..21faec8 100644 (file)
@@ -4,13 +4,12 @@
 #ifndef _ICE_XSK_H_
 #define _ICE_XSK_H_
 #include "ice_txrx.h"
-#include "ice.h"
 
 #define PKTS_PER_BATCH 8
 
 #ifdef __clang__
 #define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
-#elif __GNUC__ >= 4
+#elif __GNUC__ >= 8
 #define loop_unrolled_for _Pragma("GCC unroll 8") for
 #else
 #define loop_unrolled_for for
index 6580fcd..02fec94 100644 (file)
@@ -165,23 +165,21 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
        unsigned long flags;
        u64 ns;
 
+       memset(hwtstamps, 0, sizeof(*hwtstamps));
+
        switch (adapter->hw.mac.type) {
        case e1000_82576:
        case e1000_82580:
        case e1000_i354:
        case e1000_i350:
                spin_lock_irqsave(&adapter->tmreg_lock, flags);
-
                ns = timecounter_cyc2time(&adapter->tc, systim);
-
                spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
 
-               memset(hwtstamps, 0, sizeof(*hwtstamps));
                hwtstamps->hwtstamp = ns_to_ktime(ns);
                break;
        case e1000_i210:
        case e1000_i211:
-               memset(hwtstamps, 0, sizeof(*hwtstamps));
                /* Upper 32 bits contain s, lower 32 bits contain ns. */
                hwtstamps->hwtstamp = ktime_set(systim >> 32,
                                                systim & 0xFFFFFFFF);
index 4a69823..921a4d9 100644 (file)
@@ -177,11 +177,14 @@ struct vf_data_storage {
        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
        u16 pf_qos;
        u16 tx_rate;
+       int link_enable;
+       int link_state;
        u8 spoofchk_enabled;
        bool rss_query_enabled;
        u8 trusted;
        int xcast_mode;
        unsigned int vf_api;
+       u8 primary_abort_count;
 };
 
 enum ixgbevf_xcast_modes {
@@ -556,6 +559,8 @@ struct ixgbe_mac_addr {
 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
 #define IXGBE_SFP_POLL_JIFFIES (2 * HZ)        /* SFP poll every 2 seconds */
 
+#define IXGBE_PRIMARY_ABORT_LIMIT      5
+
 /* board specific private data structure */
 struct ixgbe_adapter {
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -614,6 +619,7 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG2_RX_LEGACY                  BIT(16)
 #define IXGBE_FLAG2_IPSEC_ENABLED              BIT(17)
 #define IXGBE_FLAG2_VF_IPSEC_ENABLED           BIT(18)
+#define IXGBE_FLAG2_AUTO_DISABLE_VF            BIT(19)
 
        /* Tx fast path data */
        int num_tx_queues;
index f70967c..628d0eb 100644 (file)
@@ -138,6 +138,8 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
        "legacy-rx",
 #define IXGBE_PRIV_FLAGS_VF_IPSEC_EN   BIT(1)
        "vf-ipsec",
+#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF       BIT(2)
+       "mdd-disable-vf",
 };
 
 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
@@ -3510,6 +3512,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
        if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
                priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
 
+       if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF)
+               priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF;
+
        return priv_flags;
 }
 
@@ -3517,6 +3522,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        unsigned int flags2 = adapter->flags2;
+       unsigned int i;
 
        flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
        if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
@@ -3526,6 +3532,21 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
        if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
                flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
 
+       flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF;
+       if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) {
+               if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+                       /* Reset primary abort counter */
+                       for (i = 0; i < adapter->num_vfs; i++)
+                               adapter->vfinfo[i].primary_abort_count = 0;
+
+                       flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
+               } else {
+                       e_info(probe,
+                              "Cannot set private flags: Operation not supported\n");
+                       return -EOPNOTSUPP;
+               }
+       }
+
        if (flags2 != adapter->flags2) {
                adapter->flags2 = flags2;
 
index 60a72af..c4a4954 100644 (file)
@@ -5687,6 +5687,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
        ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
        ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
        IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+       /* update setting rx tx for all active vfs */
+       ixgbe_set_all_vfs(adapter);
 }
 
 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
@@ -6144,11 +6147,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
                for (i = 0 ; i < adapter->num_vfs; i++)
                        adapter->vfinfo[i].clear_to_send = false;
 
-               /* ping all the active vfs to let them know we are going down */
-               ixgbe_ping_all_vfs(adapter);
-
-               /* Disable all VFTE/VFRE TX/RX */
-               ixgbe_disable_tx_rx(adapter);
+               /* update setting rx tx for all active vfs */
+               ixgbe_set_all_vfs(adapter);
        }
 
        /* disable transmits in the hardware now that interrupts are off */
@@ -7613,6 +7613,27 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
 }
 
 #ifdef CONFIG_PCI_IOV
+static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
+           adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) {
+               adapter->vfinfo[vf].primary_abort_count++;
+               if (adapter->vfinfo[vf].primary_abort_count ==
+                   IXGBE_PRIMARY_ABORT_LIMIT) {
+                       ixgbe_set_vf_link_state(adapter, vf,
+                                               IFLA_VF_LINK_STATE_DISABLE);
+                       adapter->vfinfo[vf].primary_abort_count = 0;
+
+                       e_info(drv,
+                              "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on",
+                              hw->bus.func, vf,
+                              adapter->vfinfo[vf].vf_mac_addresses);
+               }
+       }
+}
+
 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -7644,8 +7665,10 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
                        continue;
                pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
                if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
-                   status_reg & PCI_STATUS_REC_MASTER_ABORT)
+                   status_reg & PCI_STATUS_REC_MASTER_ABORT) {
+                       ixgbe_bad_vf_abort(adapter, vf);
                        pcie_flr(vfdev);
+               }
        }
 }
 
@@ -10284,6 +10307,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_set_vf_vlan        = ixgbe_ndo_set_vf_vlan,
        .ndo_set_vf_rate        = ixgbe_ndo_set_vf_bw,
        .ndo_set_vf_spoofchk    = ixgbe_ndo_set_vf_spoofchk,
+       .ndo_set_vf_link_state  = ixgbe_ndo_set_vf_link_state,
        .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
        .ndo_set_vf_trust       = ixgbe_ndo_set_vf_trust,
        .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
@@ -10745,6 +10769,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_sw_init;
 
+       if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+               adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
+
        switch (adapter->hw.mac.type) {
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
index a148534..8f4316b 100644 (file)
@@ -85,6 +85,8 @@ enum ixgbe_pfvf_api_rev {
 #define IXGBE_VF_IPSEC_ADD     0x0d
 #define IXGBE_VF_IPSEC_DEL     0x0e
 
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN 4
 /* word in permanent address message with the current multicast type */
index 214a38d..7f11c0a 100644 (file)
@@ -96,6 +96,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
        for (i = 0; i < num_vfs; i++) {
                /* enable spoof checking for all VFs */
                adapter->vfinfo[i].spoofchk_enabled = true;
+               adapter->vfinfo[i].link_enable = true;
 
                /* We support VF RSS querying only for 82599 and x540
                 * devices at the moment. These devices share RSS
@@ -820,6 +821,57 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
        }
 }
 
+/**
+ * ixgbe_set_vf_rx_tx - Set VF rx tx
+ * @adapter: Pointer to adapter struct
+ * @vf: VF identifier
+ *
+ * Set or reset correct transmit and receive for vf
+ **/
+static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf)
+{
+       u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 reg_offset, vf_shift;
+
+       vf_shift = vf % 32;
+       reg_offset = vf / 32;
+
+       reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+       reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+
+       if (adapter->vfinfo[vf].link_enable) {
+               reg_req_tx = reg_cur_tx | 1 << vf_shift;
+               reg_req_rx = reg_cur_rx | 1 << vf_shift;
+       } else {
+               reg_req_tx = reg_cur_tx & ~(1 << vf_shift);
+               reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
+       }
+
+       /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
+        * For more info take a look at ixgbe_set_vf_lpe
+        */
+       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+               struct net_device *dev = adapter->netdev;
+               int pf_max_frame = dev->mtu + ETH_HLEN;
+
+#if IS_ENABLED(CONFIG_FCOE)
+               if (dev->features & NETIF_F_FCOE_MTU)
+                       pf_max_frame = max_t(int, pf_max_frame,
+                                            IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif /* CONFIG_FCOE */
+
+               if (pf_max_frame > ETH_FRAME_LEN)
+                       reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
+       }
+
+       /* Enable/Disable particular VF */
+       if (reg_cur_tx != reg_req_tx)
+               IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx);
+       if (reg_cur_rx != reg_req_rx)
+               IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx);
+}
+
 static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 {
        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
@@ -845,11 +897,6 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
        vf_shift = vf % 32;
        reg_offset = vf / 32;
 
-       /* enable transmit for vf */
-       reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
-       reg |= BIT(vf_shift);
-       IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
-
        /* force drop enable for all VF Rx queues */
        reg = IXGBE_QDE_ENABLE;
        if (adapter->vfinfo[vf].pf_vlan)
@@ -857,27 +904,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 
        ixgbe_write_qde(adapter, vf, reg);
 
-       /* enable receive for vf */
-       reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
-       reg |= BIT(vf_shift);
-       /*
-        * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
-        * For more info take a look at ixgbe_set_vf_lpe
-        */
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               struct net_device *dev = adapter->netdev;
-               int pf_max_frame = dev->mtu + ETH_HLEN;
-
-#ifdef CONFIG_FCOE
-               if (dev->features & NETIF_F_FCOE_MTU)
-                       pf_max_frame = max_t(int, pf_max_frame,
-                                            IXGBE_FCOE_JUMBO_FRAME_SIZE);
-
-#endif /* CONFIG_FCOE */
-               if (pf_max_frame > ETH_FRAME_LEN)
-                       reg &= ~BIT(vf_shift);
-       }
-       IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+       ixgbe_set_vf_rx_tx(adapter, vf);
 
        /* enable VF mailbox for further messages */
        adapter->vfinfo[vf].clear_to_send = true;
@@ -1202,6 +1229,26 @@ out:
        return 0;
 }
 
+static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
+                                  u32 *msgbuf, u32 vf)
+{
+       u32 *link_state = &msgbuf[1];
+
+       /* verify the PF is supporting the correct API */
+       switch (adapter->vfinfo[vf].vf_api) {
+       case ixgbe_mbox_api_12:
+       case ixgbe_mbox_api_13:
+       case ixgbe_mbox_api_14:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       *link_state = adapter->vfinfo[vf].link_enable;
+
+       return 0;
+}
+
 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 {
        u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1267,6 +1314,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
        case IXGBE_VF_UPDATE_XCAST_MODE:
                retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
                break;
+       case IXGBE_VF_GET_LINK_STATE:
+               retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf);
+               break;
        case IXGBE_VF_IPSEC_ADD:
                retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf);
                break;
@@ -1322,18 +1372,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter)
        }
 }
 
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-
-       /* disable transmit and receive for all vfs */
-       IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
-       IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
-
-       IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
-       IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
-}
-
 static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -1359,6 +1397,21 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
        }
 }
 
+/**
+ * ixgbe_set_all_vfs - update vfs queues
+ * @adapter: Pointer to adapter struct
+ *
+ * Update setting transmit and receive queues for all vfs
+ **/
+void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
+{
+       int i;
+
+       for (i = 0 ; i < adapter->num_vfs; i++)
+               ixgbe_set_vf_link_state(adapter, i,
+                                       adapter->vfinfo[i].link_state);
+}
+
 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -1656,6 +1709,84 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
        return 0;
 }
 
+/**
+ * ixgbe_set_vf_link_state - Set link state
+ * @adapter: Pointer to adapter struct
+ * @vf: VF identifier
+ * @state: required link state
+ *
+ * Set a link force state on/off a single vf
+ **/
+void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
+{
+       adapter->vfinfo[vf].link_state = state;
+
+       switch (state) {
+       case IFLA_VF_LINK_STATE_AUTO:
+               if (test_bit(__IXGBE_DOWN, &adapter->state))
+                       adapter->vfinfo[vf].link_enable = false;
+               else
+                       adapter->vfinfo[vf].link_enable = true;
+               break;
+       case IFLA_VF_LINK_STATE_ENABLE:
+               adapter->vfinfo[vf].link_enable = true;
+               break;
+       case IFLA_VF_LINK_STATE_DISABLE:
+               adapter->vfinfo[vf].link_enable = false;
+               break;
+       }
+
+       ixgbe_set_vf_rx_tx(adapter, vf);
+
+       /* restart the VF */
+       adapter->vfinfo[vf].clear_to_send = false;
+       ixgbe_ping_vf(adapter, vf);
+}
+
+/**
+ * ixgbe_ndo_set_vf_link_state - Set link state
+ * @netdev: network interface device structure
+ * @vf: VF identifier
+ * @state: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       int ret = 0;
+
+       if (vf < 0 || vf >= adapter->num_vfs) {
+               dev_err(&adapter->pdev->dev,
+                       "NDO set VF link - invalid VF identifier %d\n", vf);
+               return -EINVAL;
+       }
+
+       switch (state) {
+       case IFLA_VF_LINK_STATE_ENABLE:
+               dev_info(&adapter->pdev->dev,
+                        "NDO set VF %d link state %d - not supported\n",
+                       vf, state);
+               break;
+       case IFLA_VF_LINK_STATE_DISABLE:
+               dev_info(&adapter->pdev->dev,
+                        "NDO set VF %d link state disable\n", vf);
+               ixgbe_set_vf_link_state(adapter, vf, state);
+               break;
+       case IFLA_VF_LINK_STATE_AUTO:
+               dev_info(&adapter->pdev->dev,
+                        "NDO set VF %d link state auto\n", vf);
+               ixgbe_set_vf_link_state(adapter, vf, state);
+               break;
+       default:
+               dev_err(&adapter->pdev->dev,
+                       "NDO set VF %d - invalid link state %d\n", vf, state);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
 int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
                                  bool setting)
 {
index 3ec2192..0690ecb 100644 (file)
@@ -17,8 +17,8 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
 #endif
 void ixgbe_msg_task(struct ixgbe_adapter *adapter);
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter);
 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
                           u8 qos, __be16 vlan_proto);
@@ -31,7 +31,9 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
 int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
                            int vf, struct ifla_vf_info *ivi);
+int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state);
 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state);
 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
 #ifdef CONFIG_PCI_IOV
 void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs);
index e257390..149c733 100644 (file)
@@ -387,6 +387,8 @@ struct ixgbevf_adapter {
        u32 *rss_key;
        u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
        u32 flags;
+       bool link_state;
+
 #define IXGBEVF_FLAGS_LEGACY_RX                BIT(1)
 
 #ifdef CONFIG_XFRM
index 84222ec..55b87bc 100644 (file)
@@ -2298,7 +2298,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
        struct ixgbe_hw *hw = &adapter->hw;
+       bool state;
 
        ixgbevf_configure_msix(adapter);
 
@@ -2311,6 +2313,11 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 
        spin_unlock_bh(&adapter->mbx_lock);
 
+       state = adapter->link_state;
+       hw->mac.ops.get_link_state(hw, &adapter->link_state);
+       if (state && state != adapter->link_state)
+               dev_info(&pdev->dev, "VF is administratively disabled\n");
+
        smp_mb__before_atomic();
        clear_bit(__IXGBEVF_DOWN, &adapter->state);
        ixgbevf_napi_enable_all(adapter);
@@ -3081,6 +3088,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
        adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
        adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
 
+       adapter->link_state = true;
+
        set_bit(__IXGBEVF_DOWN, &adapter->state);
        return 0;
 
@@ -3313,7 +3322,7 @@ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
 
        ixgbevf_watchdog_update_link(adapter);
 
-       if (adapter->link_up)
+       if (adapter->link_up && adapter->link_state)
                ixgbevf_watchdog_link_is_up(adapter);
        else
                ixgbevf_watchdog_link_is_down(adapter);
index 7346ccf..835bbcc 100644 (file)
@@ -100,6 +100,8 @@ enum ixgbe_pfvf_api_rev {
 #define IXGBE_VF_IPSEC_ADD     0x0d
 #define IXGBE_VF_IPSEC_DEL     0x0e
 
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN      4
 /* word in permanent address message with the current multicast type */
index 61d8970..68fc32e 100644 (file)
@@ -584,6 +584,46 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
        return -EOPNOTSUPP;
 }
 
+/**
+ * ixgbevf_get_link_state_vf - Get VF link state from PF
+ * @hw: pointer to the HW structure
+ * @link_state: link state storage
+ *
+ * Returns state of the operation error or success.
+ */
+static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+       u32 msgbuf[2];
+       s32 ret_val;
+       s32 err;
+
+       msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
+       msgbuf[1] = 0x0;
+
+       err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+
+       if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+               ret_val = IXGBE_ERR_MBX;
+       } else {
+               ret_val = 0;
+               *link_state = msgbuf[1];
+       }
+
+       return ret_val;
+}
+
+/**
+ * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @link_state: unused
+ *
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+       return -EOPNOTSUPP;
+}
+
 /**
  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
  *  @hw: pointer to the HW structure
@@ -968,6 +1008,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
        .set_rar                = ixgbevf_set_rar_vf,
        .update_mc_addr_list    = ixgbevf_update_mc_addr_list_vf,
        .update_xcast_mode      = ixgbevf_update_xcast_mode,
+       .get_link_state         = ixgbevf_get_link_state_vf,
        .set_uc_addr            = ixgbevf_set_uc_addr_vf,
        .set_vfta               = ixgbevf_set_vfta_vf,
        .set_rlpml              = ixgbevf_set_rlpml_vf,
@@ -985,6 +1026,7 @@ static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
        .set_rar                = ixgbevf_hv_set_rar_vf,
        .update_mc_addr_list    = ixgbevf_hv_update_mc_addr_list_vf,
        .update_xcast_mode      = ixgbevf_hv_update_xcast_mode,
+       .get_link_state         = ixgbevf_hv_get_link_state_vf,
        .set_uc_addr            = ixgbevf_hv_set_uc_addr_vf,
        .set_vfta               = ixgbevf_hv_set_vfta_vf,
        .set_rlpml              = ixgbevf_hv_set_rlpml_vf,
index 54158da..b4eef5b 100644 (file)
@@ -39,6 +39,7 @@ struct ixgbe_mac_operations {
        s32 (*init_rx_addrs)(struct ixgbe_hw *);
        s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
        s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
+       s32 (*get_link_state)(struct ixgbe_hw *hw, bool *link_state);
        s32 (*enable_mc)(struct ixgbe_hw *);
        s32 (*disable_mc)(struct ixgbe_hw *);
        s32 (*clear_vfta)(struct ixgbe_hw *);
index 41d1113..5712c3e 100644 (file)
@@ -260,9 +260,9 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
 
        if (ctl & LTQ_DMA_EOP) {
                ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
-               netif_receive_skb(ch->skb_head);
                net_dev->stats.rx_packets++;
                net_dev->stats.rx_bytes += ch->skb_head->len;
+               netif_receive_skb(ch->skb_head);
                ch->skb_head = NULL;
                ch->skb_tail = NULL;
                ret = XRX200_DMA_PACKET_COMPLETE;
index c31cbba..5f9ab18 100644 (file)
@@ -3092,8 +3092,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        struct mv643xx_eth_private *mp;
        struct net_device *dev;
        struct phy_device *phydev = NULL;
-       struct resource *res;
-       int err;
+       int err, irq;
 
        pd = dev_get_platdata(&pdev->dev);
        if (pd == NULL) {
@@ -3189,9 +3188,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
 
 
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       BUG_ON(!res);
-       dev->irq = res->start;
+       irq = platform_get_irq(pdev, 0);
+       if (WARN_ON(irq < 0)) {
+               err = irq;
+               goto out;
+       }
+       dev->irq = irq;
 
        dev->netdev_ops = &mv643xx_eth_netdev_ops;
 
index f1335a1..934f6dd 100644 (file)
@@ -76,6 +76,8 @@
 #define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
 #define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
 #define MVNETA_BASE_ADDR_ENABLE                 0x2290
+#define      MVNETA_AC5_CNM_DDR_TARGET         0x2
+#define      MVNETA_AC5_CNM_DDR_ATTR           0xb
 #define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
 #define MVNETA_PORT_CONFIG                      0x2400
 #define      MVNETA_UNI_PROMISC_MODE            BIT(0)
@@ -544,6 +546,7 @@ struct mvneta_port {
 
        /* Flags for special SoC configurations */
        bool neta_armada3700;
+       bool neta_ac5;
        u16 rx_offset_correction;
        const struct mbus_dram_target_info *dram_target_info;
 };
@@ -5324,6 +5327,10 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
                        win_protect |= 3 << (2 * i);
                }
        } else {
+               if (pp->neta_ac5)
+                       mvreg_write(pp, MVNETA_WIN_BASE(0),
+                                   (MVNETA_AC5_CNM_DDR_ATTR << 8) |
+                                   MVNETA_AC5_CNM_DDR_TARGET);
                /* For Armada3700 open default 4GB Mbus window, leaving
                 * arbitration of target/attribute to a different layer
                 * of configuration.
@@ -5409,6 +5416,10 @@ static int mvneta_probe(struct platform_device *pdev)
        /* Get special SoC configurations */
        if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
                pp->neta_armada3700 = true;
+       if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) {
+               pp->neta_armada3700 = true;
+               pp->neta_ac5 = true;
+       }
 
        dev->irq = irq_of_parse_and_map(dn, 0);
        if (dev->irq == 0)
@@ -5769,6 +5780,7 @@ static const struct of_device_id mvneta_match[] = {
        { .compatible = "marvell,armada-370-neta" },
        { .compatible = "marvell,armada-xp-neta" },
        { .compatible = "marvell,armada-3700-neta" },
+       { .compatible = "marvell,armada-ac5-neta" },
        { }
 };
 MODULE_DEVICE_TABLE(of, mvneta_match);
index e4af8a5..47c899c 100644 (file)
@@ -91,7 +91,7 @@ static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
 
 int prestera_acl_chain_to_client(u32 chain_index, u32 *client)
 {
-       u32 client_map[] = {
+       static const u32 client_map[] = {
                PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0,
                PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1,
                PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2
index a180b68..1402c78 100644 (file)
@@ -560,6 +560,7 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
                dev_info(prestera_dev(sw), "using random base mac address\n");
        }
        of_node_put(base_mac_np);
+       of_node_put(np);
 
        return prestera_hw_switch_mac_set(sw, sw->base_mac);
 }
index 8cfc649..8f762fc 100644 (file)
@@ -1067,7 +1067,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
        struct mlx4_qp_context *context;
        int err = 0;
 
-       context = kmalloc(sizeof(*context), GFP_KERNEL);
+       context = kzalloc(sizeof(*context), GFP_KERNEL);
        if (!context)
                return -ENOMEM;
 
@@ -1078,7 +1078,6 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
        }
        qp->event = mlx4_en_sqp_event;
 
-       memset(context, 0, sizeof(*context));
        mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
                                qpn, ring->cqn, -1, context);
        context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
index 817f415..f777151 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/tcp.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
-#include <linux/moduleparam.h>
 #include <linux/indirect_call_wrapper.h>
 
 #include "mlx4_en.h"
index a7170ab..4bc6667 100644 (file)
@@ -55,7 +55,11 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT)     += en/tc/act/act.o en/tc/act/drop.o en/tc/a
                                        en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
                                        en/tc/act/redirect_ingress.o
 
-mlx5_core-$(CONFIG_MLX5_TC_CT)      += en/tc_ct.o
+ifneq ($(CONFIG_MLX5_TC_CT),)
+       mlx5_core-y                          += en/tc_ct.o en/tc/ct_fs_dmfs.o
+       mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+endif
+
 mlx5_core-$(CONFIG_MLX5_TC_SAMPLE)   += en/tc/sample.o
 
 #
@@ -103,9 +107,10 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
                                        steering/dr_icm_pool.o steering/dr_buddy.o \
                                        steering/dr_ste.o steering/dr_send.o \
                                        steering/dr_ste_v0.o steering/dr_ste_v1.o \
+                                       steering/dr_ste_v2.o \
                                        steering/dr_cmd.o steering/dr_fw.o \
                                        steering/dr_action.o steering/fs_dr.o \
-                                       steering/dr_dbg.o
+                                       steering/dr_dbg.o lib/smfs.o
 #
 # SF device
 #
index 291e427..e52b0ba 100644 (file)
@@ -71,53 +71,6 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
        return cpu_handle;
 }
 
-static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
-                              struct mlx5_frag_buf *buf, int node)
-{
-       dma_addr_t t;
-
-       buf->size = size;
-       buf->npages       = 1;
-       buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-
-       buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
-       if (!buf->frags)
-               return -ENOMEM;
-
-       buf->frags->buf   = mlx5_dma_zalloc_coherent_node(dev, size,
-                                                         &t, node);
-       if (!buf->frags->buf)
-               goto err_out;
-
-       buf->frags->map = t;
-
-       while (t & ((1 << buf->page_shift) - 1)) {
-               --buf->page_shift;
-               buf->npages *= 2;
-       }
-
-       return 0;
-err_out:
-       kfree(buf->frags);
-       return -ENOMEM;
-}
-
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
-                  int size, struct mlx5_frag_buf *buf)
-{
-       return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
-}
-EXPORT_SYMBOL(mlx5_buf_alloc);
-
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
-{
-       dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
-                         buf->frags->map);
-
-       kfree(buf->frags);
-}
-EXPORT_SYMBOL_GPL(mlx5_buf_free);
-
 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
                             struct mlx5_frag_buf *buf, int node)
 {
@@ -183,11 +136,11 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
        u32 db_per_page = PAGE_SIZE / cache_line_size();
        struct mlx5_db_pgdir *pgdir;
 
-       pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
+       pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
        if (!pgdir)
                return NULL;
 
-       pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL);
+       pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
        if (!pgdir->bitmap) {
                kfree(pgdir);
                return NULL;
@@ -286,19 +239,6 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
 }
 EXPORT_SYMBOL_GPL(mlx5_db_free);
 
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
-{
-       u64 addr;
-       int i;
-
-       for (i = 0; i < buf->npages; i++) {
-               addr = buf->frags->map + (i << buf->page_shift);
-
-               pas[i] = cpu_to_be64(addr);
-       }
-}
-EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
-
 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
 {
        int i;
index 823d580..f1329f8 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <linux/highmem.h>
-#include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
@@ -131,11 +130,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
 
 static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&cmd->alloc_lock, flags);
+       lockdep_assert_held(&cmd->alloc_lock);
        set_bit(idx, &cmd->bitmask);
-       spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 }
 
 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
@@ -145,17 +141,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
 
 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
 {
+       struct mlx5_cmd *cmd = ent->cmd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cmd->alloc_lock, flags);
        if (!refcount_dec_and_test(&ent->refcnt))
-               return;
+               goto out;
 
        if (ent->idx >= 0) {
-               struct mlx5_cmd *cmd = ent->cmd;
-
                cmd_free_index(cmd, ent->idx);
                up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
        }
 
        cmd_free_ent(ent);
+out:
+       spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 }
 
 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
@@ -1543,7 +1543,7 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
 {
        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
 
-       dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
+       dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev));
 
        debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
        debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
@@ -1720,7 +1720,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
        }
 }
 
-void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
        unsigned long bitmask;
@@ -1877,16 +1877,38 @@ out_in:
        return err;
 }
 
+static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int err)
+{
+       struct mlx5_cmd_stats *stats;
+
+       if (!err)
+               return;
+
+       stats = &dev->cmd.stats[opcode];
+       spin_lock_irq(&stats->lock);
+       stats->failed++;
+       if (err < 0)
+               stats->last_failed_errno = -err;
+       if (err == -EREMOTEIO) {
+               stats->failed_mbox_status++;
+               stats->last_failed_mbox_status = status;
+       }
+       spin_unlock_irq(&stats->lock);
+}
+
 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
-static int cmd_status_err(int err, void *out)
+static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
 {
-       if (err) /* -EREMOTEIO is preserved */
-               return err == -EREMOTEIO ? -EIO : err;
+       u8 status = MLX5_GET(mbox_out, out, status);
 
-       if (MLX5_GET(mbox_out, out, status) != MLX5_CMD_STAT_OK)
-               return -EREMOTEIO;
+       if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
+               err = -EIO;
 
-       return 0;
+       if (!err && status != MLX5_CMD_STAT_OK)
+               err = -EREMOTEIO;
+
+       cmd_status_log(dev, opcode, status, err);
+       return err;
 }
 
 /**
@@ -1910,8 +1932,10 @@ static int cmd_status_err(int err, void *out)
 int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
 {
        int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
+       u16 opcode = MLX5_GET(mbox_in, in, opcode);
 
-       return cmd_status_err(err, out);
+       err = cmd_status_err(dev, err, opcode, out);
+       return err;
 }
 EXPORT_SYMBOL(mlx5_cmd_do);
 
@@ -1954,8 +1978,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
                          void *out, int out_size)
 {
        int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
+       u16 opcode = MLX5_GET(mbox_in, in, opcode);
 
-       err = cmd_status_err(err, out);
+       err = cmd_status_err(dev, err, opcode, out);
        return mlx5_cmd_check(dev, err, in, out);
 }
 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
@@ -1991,7 +2016,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
        struct mlx5_async_ctx *ctx;
 
        ctx = work->ctx;
-       status = cmd_status_err(status, work->out);
+       status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
        work->user_callback(status, work);
        if (atomic_dec_and_test(&ctx->num_inflight))
                wake_up(&ctx->wait);
@@ -2005,6 +2030,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
 
        work->ctx = ctx;
        work->user_callback = callback;
+       work->opcode = MLX5_GET(mbox_in, in, opcode);
        work->out = out;
        if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
                return -EIO;
index 15a7496..4caa1b6 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/hardirq.h>
 #include <linux/mlx5/driver.h>
 #include <rdma/ib_verbs.h>
index 10d1950..3d3e55a 100644 (file)
@@ -30,7 +30,6 @@
  * SOFTWARE.
  */
 
-#include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/mlx5/qp.h>
 #include <linux/mlx5/cq.h>
@@ -99,26 +98,32 @@ void mlx5_unregister_debugfs(void)
        debugfs_remove(mlx5_debugfs_root);
 }
 
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
+{
+       return dev->priv.dbg.dbg_root;
+}
+EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
+
 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
 {
-       dev->priv.qp_debugfs = debugfs_create_dir("QPs",  dev->priv.dbg_root);
+       dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
 }
 EXPORT_SYMBOL(mlx5_qp_debugfs_init);
 
 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
 {
-       debugfs_remove_recursive(dev->priv.qp_debugfs);
+       debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
 }
 EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
 
 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
 {
-       dev->priv.eq_debugfs = debugfs_create_dir("EQs",  dev->priv.dbg_root);
+       dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
 }
 
 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
 {
-       debugfs_remove_recursive(dev->priv.eq_debugfs);
+       debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
 }
 
 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
@@ -168,8 +173,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
        const char *namep;
        int i;
 
-       cmd = &dev->priv.cmdif_debugfs;
-       *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
+       cmd = &dev->priv.dbg.cmdif_debugfs;
+       *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
 
        for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
                stats = &dev->cmd.stats[i];
@@ -180,23 +185,51 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
                        debugfs_create_file("average", 0400, stats->root, stats,
                                            &stats_fops);
                        debugfs_create_u64("n", 0400, stats->root, &stats->n);
+                       debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
+                       debugfs_create_u64("failed_mbox_status", 0400, stats->root,
+                                          &stats->failed_mbox_status);
+                       debugfs_create_u32("last_failed_errno", 0400, stats->root,
+                                          &stats->last_failed_errno);
+                       debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
+                                         &stats->last_failed_mbox_status);
                }
        }
 }
 
 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
 {
-       debugfs_remove_recursive(dev->priv.cmdif_debugfs);
+       debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
 }
 
 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
 {
-       dev->priv.cq_debugfs = debugfs_create_dir("CQs",  dev->priv.dbg_root);
+       dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
 }
 
 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
 {
-       debugfs_remove_recursive(dev->priv.cq_debugfs);
+       debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
+}
+
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
+{
+       struct dentry *pages;
+
+       dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
+       pages = dev->priv.dbg.pages_debugfs;
+
+       debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
+       debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
+       debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
+       debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
+       debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
+       debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
+                          &dev->priv.reclaim_pages_discard);
+}
+
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
+{
+       debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
 }
 
 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
@@ -441,7 +474,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
        if (!mlx5_debugfs_root)
                return 0;
 
-       err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
+       err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
                           &qp->dbg, qp->qpn, qp_fields,
                           ARRAY_SIZE(qp_fields), qp);
        if (err)
@@ -468,7 +501,7 @@ int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
        if (!mlx5_debugfs_root)
                return 0;
 
-       err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
+       err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
                           &eq->dbg, eq->eqn, eq_fields,
                           ARRAY_SIZE(eq_fields), eq);
        if (err)
@@ -493,7 +526,7 @@ int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
        if (!mlx5_debugfs_root)
                return 0;
 
-       err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
+       err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
                           &cq->dbg, cq->cqn, cq_fields,
                           ARRAY_SIZE(cq_fields), cq);
        if (err)
index 2704c75..8653ac0 100644 (file)
@@ -405,6 +405,7 @@ enum {
        MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
        MLX5E_SQ_STATE_PENDING_XSK_TX,
        MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+       MLX5E_SQ_STATE_XDP_MULTIBUF,
 };
 
 struct mlx5e_tx_mpwqe {
@@ -515,7 +516,7 @@ struct mlx5e_xdp_info {
                } frame;
                struct {
                        struct mlx5e_rq *rq;
-                       struct mlx5e_dma_info di;
+                       struct page *page;
                } page;
        };
 };
@@ -537,7 +538,7 @@ struct mlx5e_xdpsq;
 typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
                                        struct mlx5e_xmit_data *,
-                                       struct mlx5e_xdp_info *,
+                                       struct skb_shared_info *,
                                        int);
 
 struct mlx5e_xdpsq {
index 0bd8698..08fd137 100644 (file)
@@ -188,12 +188,18 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
                          struct mlx5e_params *params,
                          struct mlx5e_xsk_param *xsk)
 {
-       bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
-               mlx5e_rx_is_linear_skb(params, xsk) :
-               mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
+       u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
 
-       return is_linear_skb || params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO ?
-               mlx5e_get_linear_rq_headroom(params, xsk) : 0;
+       if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
+               return linear_headroom;
+
+       if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+               return linear_headroom;
+
+       if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+               return linear_headroom;
+
+       return 0;
 }
 
 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
@@ -392,16 +398,29 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
        };
 }
 
+static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
+{
+       if (xdp)
+               /* XDP requires all fragments to be of the same size. */
+               return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size;
+
+       /* Optimization for small packets: the last fragment is bigger than the others. */
+       return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
+}
+
 #define DEFAULT_FRAG_SIZE (2048)
 
-static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
-                                     struct mlx5e_params *params,
-                                     struct mlx5e_xsk_param *xsk,
-                                     struct mlx5e_rq_frags_info *info)
+static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
+                                    struct mlx5e_params *params,
+                                    struct mlx5e_xsk_param *xsk,
+                                    struct mlx5e_rq_frags_info *info)
 {
        u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        int frag_size_max = DEFAULT_FRAG_SIZE;
+       int first_frag_size_max;
        u32 buf_size = 0;
+       u16 headroom;
+       int max_mtu;
        int i;
 
        if (mlx5_fpga_is_ipsec_device(mdev))
@@ -420,21 +439,48 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
                goto out;
        }
 
-       if (byte_count > PAGE_SIZE +
-           (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
+       headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+       first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
+
+       max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
+                                         params->xdp_prog);
+       if (byte_count > max_mtu || params->xdp_prog) {
                frag_size_max = PAGE_SIZE;
+               first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
+
+               max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
+                                                 params->xdp_prog);
+               if (byte_count > max_mtu) {
+                       mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
+                                     params->sw_mtu, max_mtu);
+                       return -EINVAL;
+               }
+       }
 
        i = 0;
        while (buf_size < byte_count) {
                int frag_size = byte_count - buf_size;
 
-               if (i < MLX5E_MAX_RX_FRAGS - 1)
+               if (i == 0)
+                       frag_size = min(frag_size, first_frag_size_max);
+               else if (i < MLX5E_MAX_RX_FRAGS - 1)
                        frag_size = min(frag_size, frag_size_max);
 
                info->arr[i].frag_size = frag_size;
-               info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
-
                buf_size += frag_size;
+
+               if (params->xdp_prog) {
+                       /* XDP multi buffer expects fragments of the same size. */
+                       info->arr[i].frag_stride = frag_size_max;
+               } else {
+                       if (i == 0) {
+                               /* Ensure that headroom and tailroom are included. */
+                               frag_size += headroom;
+                               frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+                       }
+                       info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
+               }
+
                i++;
        }
        info->num_frags = i;
@@ -444,6 +490,8 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
 out:
        info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
        info->log_num_frags = order_base_2(info->num_frags);
+
+       return 0;
 }
 
 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
@@ -540,6 +588,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
        void *rqc = param->rqc;
        void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
        int ndsegs = 1;
+       int err;
 
        switch (params->rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
@@ -579,7 +628,9 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
        }
        default: /* MLX5_WQ_TYPE_CYCLIC */
                MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
-               mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+               err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+               if (err)
+                       return err;
                ndsegs = param->frags_info.num_frags;
        }
 
@@ -792,6 +843,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
 
 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
                             struct mlx5e_params *params,
+                            struct mlx5e_xsk_param *xsk,
                             struct mlx5e_sq_param *param)
 {
        void *sqc = param->sqc;
@@ -800,6 +852,7 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
        mlx5e_build_sq_param_common(mdev, param);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
+       param->is_xdp_mb = !mlx5e_rx_is_linear_skb(params, xsk);
        mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
 }
 
@@ -819,7 +872,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
        async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
 
        mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
-       mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+       mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
        mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
        mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
 
index 47a3681..f5c46e7 100644 (file)
@@ -31,6 +31,7 @@ struct mlx5e_sq_param {
        struct mlx5_wq_param       wq;
        bool                       is_mpw;
        bool                       is_tls;
+       bool                       is_xdp_mb;
        u16                        stop_room;
 };
 
@@ -155,6 +156,7 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
                             struct mlx5e_cq_param *param);
 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
                             struct mlx5e_params *params,
+                            struct mlx5e_xsk_param *xsk,
                             struct mlx5e_sq_param *param);
 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
                              struct mlx5e_params *params,
index b755890..5d9bd91 100644 (file)
@@ -18,7 +18,6 @@ int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
 
 /* TX datapath API */
 int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
-struct mlx5e_txqsq *mlx5e_get_sq(struct mlx5e_priv *priv, int qid);
 
 /* SQ lifecycle */
 int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
index cb8f759..af37a8d 100644 (file)
@@ -35,6 +35,13 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
        NULL, /* FLOW_ACTION_CT_METADATA, */
        &mlx5e_tc_act_mpls_push,
        &mlx5e_tc_act_mpls_pop,
+       NULL, /* FLOW_ACTION_MPLS_MANGLE, */
+       NULL, /* FLOW_ACTION_GATE, */
+       NULL, /* FLOW_ACTION_PPPOE_PUSH, */
+       NULL, /* FLOW_ACTION_JUMP, */
+       NULL, /* FLOW_ACTION_PIPE, */
+       &mlx5e_tc_act_vlan,
+       &mlx5e_tc_act_vlan,
 };
 
 /* Must be aligned with enum flow_action_id. */
index 94a7cf3..f34714c 100644 (file)
@@ -22,6 +22,8 @@ struct mlx5e_tc_act_parse_state {
        bool encap;
        bool decap;
        bool mpls_push;
+       bool eth_push;
+       bool eth_pop;
        bool ptype_host;
        const struct ip_tunnel_info *tun_info;
        struct mlx5e_mpls_info mpls_info;
index 05a42fb..2b002c6 100644 (file)
@@ -125,6 +125,16 @@ tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
                return false;
        }
 
+       if (parse_state->eth_pop && !parse_state->mpls_push) {
+               NL_SET_ERR_MSG_MOD(extack, "vlan pop eth is supported only with mpls push");
+               return false;
+       }
+
+       if (flow_flag_test(parse_state->flow, L3_TO_L2_DECAP) && !parse_state->eth_push) {
+               NL_SET_ERR_MSG_MOD(extack, "mpls pop is only supported with vlan eth push");
+               return false;
+       }
+
        if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) {
                /* Ignore forward to self rules generated
                 * by adding both mlx5 devs to the flow table
index 96a80e0..f106190 100644 (file)
@@ -57,12 +57,13 @@ tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
        filter_dev = attr->parse_attr->filter_dev;
 
        /* we only support mpls pop if it is the first action
+        * or it is second action after tunnel key unset
         * and the filter net device is bareudp. Subsequent
         * actions can be pedit and the last can be mirred
         * egress redirect.
         */
-       if (act_index) {
-               NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action");
+       if ((act_index == 1 && !parse_state->decap) || act_index > 1) {
+               NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action or with decap");
                return false;
        }
 
@@ -80,7 +81,7 @@ tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
                      struct mlx5e_priv *priv,
                      struct mlx5_flow_attr *attr)
 {
-       attr->parse_attr->eth.h_proto = act->mpls_pop.proto;
+       attr->esw_attr->eth.h_proto = act->mpls_pop.proto;
        attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
        flow_flag_set(parse_state->flow, L3_TO_L2_DECAP);
 
index 39f8f71..47597c5 100644 (file)
@@ -42,13 +42,12 @@ out_err:
        return -EOPNOTSUPP;
 }
 
-static int
-parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
-                         const struct flow_action_entry *act, int namespace,
-                         struct mlx5e_tc_flow_parse_attr *parse_attr,
-                         struct netlink_ext_ack *extack)
+int
+mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
+                               const struct flow_action_entry *act, int namespace,
+                               struct pedit_headers_action *hdrs,
+                               struct netlink_ext_ack *extack)
 {
-       struct pedit_headers_action *hdrs = parse_attr->hdrs;
        u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
        u8 htype = act->mangle.htype;
        int err = -EOPNOTSUPP;
@@ -79,46 +78,6 @@ out_err:
        return err;
 }
 
-static int
-parse_pedit_to_reformat(const struct flow_action_entry *act,
-                       struct mlx5e_tc_flow_parse_attr *parse_attr,
-                       struct netlink_ext_ack *extack)
-{
-       u32 mask, val, offset;
-       u32 *p;
-
-       if (act->id != FLOW_ACTION_MANGLE) {
-               NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
-               return -EOPNOTSUPP;
-       }
-
-       if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
-               NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
-               return -EOPNOTSUPP;
-       }
-
-       mask = ~act->mangle.mask;
-       val = act->mangle.val;
-       offset = act->mangle.offset;
-       p = (u32 *)&parse_attr->eth;
-       *(p + (offset >> 2)) |= (val & mask);
-
-       return 0;
-}
-
-int
-mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
-                               const struct flow_action_entry *act, int namespace,
-                               struct mlx5e_tc_flow_parse_attr *parse_attr,
-                               struct mlx5e_tc_flow *flow,
-                               struct netlink_ext_ack *extack)
-{
-       if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
-               return parse_pedit_to_reformat(act, parse_attr, extack);
-
-       return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, extack);
-}
-
 static bool
 tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
                         const struct flow_action_entry *act,
@@ -141,20 +100,16 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
 
        ns_type = mlx5e_get_flow_namespace(flow);
 
-       err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type, attr->parse_attr,
-                                             flow, parse_state->extack);
+       err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type, attr->parse_attr->hdrs,
+                                             parse_state->extack);
        if (err)
                return err;
 
-       if (flow_flag_test(flow, L3_TO_L2_DECAP))
-               goto out;
-
        attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
 
        if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
                esw_attr->split_count = esw_attr->out_count;
 
-out:
        return 0;
 }
 
index 258f030..434c8bd 100644 (file)
@@ -24,8 +24,7 @@ struct pedit_headers_action {
 int
 mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
                                const struct flow_action_entry *act, int namespace,
-                               struct mlx5e_tc_flow_parse_attr *parse_attr,
-                               struct mlx5e_tc_flow *flow,
+                               struct pedit_headers_action *hdrs,
                                struct netlink_ext_ack *extack);
 
 #endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */
index 6378b75..b86ac60 100644 (file)
@@ -34,7 +34,8 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
                     const struct flow_action_entry *act,
                     struct mlx5_esw_flow_attr *attr,
                     u32 *action,
-                    struct netlink_ext_ack *extack)
+                    struct netlink_ext_ack *extack,
+                    struct mlx5e_tc_act_parse_state *parse_state)
 {
        u8 vlan_idx = attr->total_vlan;
 
@@ -84,6 +85,16 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
                        *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
                }
                break;
+       case FLOW_ACTION_VLAN_POP_ETH:
+               parse_state->eth_pop = true;
+               break;
+       case FLOW_ACTION_VLAN_PUSH_ETH:
+               if (!flow_flag_test(parse_state->flow, L3_TO_L2_DECAP))
+                       return -EOPNOTSUPP;
+               parse_state->eth_push = true;
+               memcpy(attr->eth.h_dest, act->vlan_push_eth.dst, ETH_ALEN);
+               memcpy(attr->eth.h_source, act->vlan_push_eth.src, ETH_ALEN);
+               break;
        default:
                NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
                return -EINVAL;
@@ -109,7 +120,7 @@ mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
        };
        int err;
 
-       err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
+       err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack, NULL);
        if (err)
                return err;
 
@@ -139,7 +150,7 @@ mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
                                                priv->netdev->lower_level;
        while (nest_level--) {
                err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action,
-                                          extack);
+                                          extack, NULL);
                if (err)
                        return err;
        }
@@ -174,7 +185,7 @@ tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
                                                           parse_state->extack);
        } else {
                err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
-                                          parse_state->extack);
+                                          parse_state->extack, parse_state);
        }
 
        if (err)
index 28444d4..9a8a1a6 100644 (file)
@@ -43,8 +43,8 @@ mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
                return -EOPNOTSUPP;
        }
 
-       err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr,
-                                             NULL, extack);
+       err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr->hdrs,
+                                             extack);
        *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
 
        return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
new file mode 100644 (file)
index 0000000..bb6b1a9
--- /dev/null
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_EN_TC_CT_FS_H__
+#define __MLX5_EN_TC_CT_FS_H__
+
+struct mlx5_ct_fs {
+       const struct net_device *netdev;
+       struct mlx5_core_dev *dev;
+
+       /* private data */
+       void *priv_data[];
+};
+
+struct mlx5_ct_fs_rule {
+};
+
+struct mlx5_ct_fs_ops {
+       int (*init)(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+                   struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct);
+       void (*destroy)(struct mlx5_ct_fs *fs);
+
+       struct mlx5_ct_fs_rule * (*ct_rule_add)(struct mlx5_ct_fs *fs,
+                                               struct mlx5_flow_spec *spec,
+                                               struct mlx5_flow_attr *attr,
+                                               struct flow_rule *flow_rule);
+       void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule);
+
+       size_t priv_size;
+};
+
+static inline void *mlx5_ct_fs_priv(struct mlx5_ct_fs *fs)
+{
+       return &fs->priv_data;
+}
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void);
+
+#if IS_ENABLED(CONFIG_MLX5_SW_STEERING)
+struct mlx5_ct_fs_ops *mlx5_ct_fs_smfs_ops_get(void);
+#else
+static inline struct mlx5_ct_fs_ops *
+mlx5_ct_fs_smfs_ops_get(void)
+{
+       return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+
+#endif /* __MLX5_EN_TC_CT_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
new file mode 100644 (file)
index 0000000..ae4f55b
--- /dev/null
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+
+#define ct_dbg(fmt, args...)\
+       netdev_dbg(fs->netdev, "ct_fs_dmfs debug: " fmt "\n", ##args)
+
+struct mlx5_ct_fs_dmfs_rule {
+       struct mlx5_ct_fs_rule fs_rule;
+       struct mlx5_flow_handle *rule;
+       struct mlx5_flow_attr *attr;
+};
+
+static int
+mlx5_ct_fs_dmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+                    struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+       return 0;
+}
+
+static void
+mlx5_ct_fs_dmfs_destroy(struct mlx5_ct_fs *fs)
+{
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_dmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+                           struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+       struct mlx5e_priv *priv = netdev_priv(fs->netdev);
+       struct mlx5_ct_fs_dmfs_rule *dmfs_rule;
+       int err;
+
+       dmfs_rule = kzalloc(sizeof(*dmfs_rule), GFP_KERNEL);
+       if (!dmfs_rule)
+               return ERR_PTR(-ENOMEM);
+
+       dmfs_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
+       if (IS_ERR(dmfs_rule->rule)) {
+               err = PTR_ERR(dmfs_rule->rule);
+               ct_dbg("Failed to add ct entry fs rule");
+               goto err_insert;
+       }
+
+       dmfs_rule->attr = attr;
+
+       return &dmfs_rule->fs_rule;
+
+err_insert:
+       kfree(dmfs_rule);
+       return ERR_PTR(err);
+}
+
+static void
+mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+       struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule,
+                                                             struct mlx5_ct_fs_dmfs_rule,
+                                                             fs_rule);
+
+       mlx5_tc_rule_delete(netdev_priv(fs->netdev), dmfs_rule->rule, dmfs_rule->attr);
+       kfree(dmfs_rule);
+}
+
+static struct mlx5_ct_fs_ops dmfs_ops = {
+       .ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add,
+       .ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del,
+
+       .init = mlx5_ct_fs_dmfs_init,
+       .destroy = mlx5_ct_fs_dmfs_destroy,
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void)
+{
+       return &dmfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
new file mode 100644 (file)
index 0000000..59988e2
--- /dev/null
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/refcount.h>
+
+#include "en_tc.h"
+#include "en/tc_priv.h"
+#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+
+#include "lib/smfs.h"
+
+#define INIT_ERR_PREFIX "ct_fs_smfs init failed"
+#define ct_dbg(fmt, args...)\
+       netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
+#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
+
+struct mlx5_ct_fs_smfs_matcher {
+       struct mlx5dr_matcher *dr_matcher;
+       struct list_head list;
+       int prio;
+       refcount_t ref;
+};
+
+struct mlx5_ct_fs_smfs_matchers {
+       struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
+       struct list_head used;
+};
+
+struct mlx5_ct_fs_smfs {
+       struct mlx5dr_table *ct_tbl, *ct_nat_tbl;
+       struct mlx5_ct_fs_smfs_matchers matchers;
+       struct mlx5_ct_fs_smfs_matchers matchers_nat;
+       struct mlx5dr_action *fwd_action;
+       struct mlx5_flow_table *ct_nat;
+       struct mutex lock; /* Guards matchers */
+};
+
+struct mlx5_ct_fs_smfs_rule {
+       struct mlx5_ct_fs_rule fs_rule;
+       struct mlx5dr_rule *rule;
+       struct mlx5dr_action *count_action;
+       struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
+};
+
+static inline void
+mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp)
+{
+       void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+
+       if (likely(MLX5_CAP_FLOWTABLE_NIC_RX(fs->dev, ft_field_support.outer_ip_version)))
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
+       else
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+
+       MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+       if (likely(ipv4)) {
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+                                src_ipv4_src_ipv6.ipv4_layout.ipv4);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+                                dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+       } else {
+               memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+                                   dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      0xFF,
+                      MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
+                                        dst_ipv4_dst_ipv6.ipv6_layout.ipv6));
+               memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+                                   src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      0xFF,
+                      MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
+                                        src_ipv4_src_ipv6.ipv6_layout.ipv6));
+       }
+
+       if (likely(tcp)) {
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_sport);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
+                        ntohs(MLX5_CT_TCP_FLAGS_MASK));
+       } else {
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
+       }
+
+       mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, 0, MLX5_CT_ZONE_MASK);
+}
+
+static struct mlx5dr_matcher *
+mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
+                              bool tcp, u32 priority)
+{
+       struct mlx5dr_matcher *dr_matcher;
+       struct mlx5_flow_spec *spec;
+
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec)
+               return ERR_PTR(-ENOMEM);
+
+       mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp);
+       spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
+
+       dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
+       kfree(spec);
+       if (!dr_matcher)
+               return ERR_PTR(-EINVAL);
+
+       return dr_matcher;
+}
+
+static struct mlx5_ct_fs_smfs_matcher *
+mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
+{
+       struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+       struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
+       struct mlx5_ct_fs_smfs_matchers *matchers;
+       struct mlx5dr_matcher *dr_matcher;
+       struct mlx5dr_table *tbl;
+       struct list_head *prev;
+       int prio;
+
+       matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
+       smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
+
+       if (refcount_inc_not_zero(&smfs_matcher->ref))
+               return smfs_matcher;
+
+       mutex_lock(&fs_smfs->lock);
+
+       /* Retry with lock, as another thread might have already created the relevant matcher
+        * till we acquired the lock
+        */
+       if (refcount_inc_not_zero(&smfs_matcher->ref))
+               goto out_unlock;
+
+       // Find next available priority in sorted used list
+       prio = 0;
+       prev = &matchers->used;
+       list_for_each_entry(m, &matchers->used, list) {
+               prev = &m->list;
+
+               if (m->prio == prio)
+                       prio = m->prio + 1;
+               else
+                       break;
+       }
+
+       tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
+       dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
+       if (IS_ERR(dr_matcher)) {
+               netdev_warn(fs->netdev,
+                           "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
+                           nat, ipv4, tcp, PTR_ERR(dr_matcher));
+
+               smfs_matcher = ERR_CAST(dr_matcher);
+               goto out_unlock;
+       }
+
+       smfs_matcher->dr_matcher = dr_matcher;
+       smfs_matcher->prio = prio;
+       list_add(&smfs_matcher->list, prev);
+       refcount_set(&smfs_matcher->ref, 1);
+
+out_unlock:
+       mutex_unlock(&fs_smfs->lock);
+       return smfs_matcher;
+}
+
+static void
+mlx5_ct_fs_smfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_smfs_matcher *smfs_matcher)
+{
+       struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+       if (!refcount_dec_and_mutex_lock(&smfs_matcher->ref, &fs_smfs->lock))
+               return;
+
+       mlx5_smfs_matcher_destroy(smfs_matcher->dr_matcher);
+       list_del(&smfs_matcher->list);
+       mutex_unlock(&fs_smfs->lock);
+}
+
+static int
+mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+                    struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+       struct mlx5dr_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
+       struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+       post_ct_tbl = mlx5_smfs_table_get_from_fs_ft(post_ct);
+       ct_nat_tbl = mlx5_smfs_table_get_from_fs_ft(ct_nat);
+       ct_tbl = mlx5_smfs_table_get_from_fs_ft(ct);
+       fs_smfs->ct_nat = ct_nat;
+
+       if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
+               netdev_warn(fs->netdev, "ct_fs_smfs: failed to init, missing backing dr tables");
+               return -EOPNOTSUPP;
+       }
+
+       ct_dbg("using smfs steering");
+
+       fs_smfs->fwd_action = mlx5_smfs_action_create_dest_table(post_ct_tbl);
+       if (!fs_smfs->fwd_action) {
+               return -EINVAL;
+       }
+
+       fs_smfs->ct_tbl = ct_tbl;
+       fs_smfs->ct_nat_tbl = ct_nat_tbl;
+       mutex_init(&fs_smfs->lock);
+       INIT_LIST_HEAD(&fs_smfs->matchers.used);
+       INIT_LIST_HEAD(&fs_smfs->matchers_nat.used);
+
+       return 0;
+}
+
+static void
+mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
+{
+       struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+       mlx5_smfs_action_destroy(fs_smfs->fwd_action);
+}
+
+static inline bool
+mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
+{
+#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
+       const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) |
+                              DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META);
+       const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP);
+       const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS);
+       const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP);
+       const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS);
+
+       return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
+               used_keys == ipv6_udp);
+}
+
+static bool
+mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
+{
+       struct flow_match_ipv4_addrs ipv4_addrs;
+       struct flow_match_ipv6_addrs ipv6_addrs;
+       struct flow_match_control control;
+       struct flow_match_basic basic;
+       struct flow_match_ports ports;
+       struct flow_match_tcp tcp;
+
+       if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
+               ct_dbg("rule uses unexpected dissectors (0x%08x)",
+                      flow_rule->match.dissector->used_keys);
+               return false;
+       }
+
+       flow_rule_match_basic(flow_rule, &basic);
+       flow_rule_match_control(flow_rule, &control);
+       flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
+       flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
+       flow_rule_match_ports(flow_rule, &ports);
+       flow_rule_match_tcp(flow_rule, &tcp);
+
+       if (basic.mask->n_proto != htons(0xFFFF) ||
+           (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
+           basic.mask->ip_proto != 0xFF ||
+           (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) {
+               ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
+                      ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
+                      basic.key->ip_proto, basic.mask->ip_proto);
+               return false;
+       }
+
+       if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) {
+               ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
+                      ports.mask->src, ports.mask->dst);
+               return false;
+       }
+
+       if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
+               ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
+               return false;
+       }
+
+       return true;
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+                           struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+       struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+       struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
+       struct mlx5_ct_fs_smfs_rule *smfs_rule;
+       struct mlx5dr_action *actions[5];
+       struct mlx5dr_rule *rule;
+       int num_actions = 0, err;
+       bool nat, tcp, ipv4;
+
+       if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
+       if (!smfs_rule)
+               return ERR_PTR(-ENOMEM);
+
+       smfs_rule->count_action = mlx5_smfs_action_create_flow_counter(mlx5_fc_id(attr->counter));
+       if (!smfs_rule->count_action) {
+               err = -EINVAL;
+               goto err_count;
+       }
+
+       actions[num_actions++] = smfs_rule->count_action;
+       actions[num_actions++] = attr->modify_hdr->action.dr_action;
+       actions[num_actions++] = fs_smfs->fwd_action;
+
+       nat = (attr->ft == fs_smfs->ct_nat);
+       ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
+       tcp = MLX5_GET(fte_match_param, spec->match_value,
+                      outer_headers.ip_protocol) == IPPROTO_TCP;
+
+       smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
+       if (IS_ERR(smfs_matcher)) {
+               err = PTR_ERR(smfs_matcher);
+               goto err_matcher;
+       }
+
+       rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
+                                    MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
+       if (!rule) {
+               err = -EINVAL;
+               goto err_create;
+       }
+
+       smfs_rule->rule = rule;
+       smfs_rule->smfs_matcher = smfs_matcher;
+
+       return &smfs_rule->fs_rule;
+
+err_create:
+       mlx5_ct_fs_smfs_matcher_put(fs, smfs_matcher);
+err_matcher:
+       mlx5_smfs_action_destroy(smfs_rule->count_action);
+err_count:
+       kfree(smfs_rule);
+       return ERR_PTR(err);
+}
+
+static void
+mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+       struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
+                                                             struct mlx5_ct_fs_smfs_rule,
+                                                             fs_rule);
+
+       mlx5_smfs_rule_destroy(smfs_rule->rule);
+       mlx5_ct_fs_smfs_matcher_put(fs, smfs_rule->smfs_matcher);
+       mlx5_smfs_action_destroy(smfs_rule->count_action);
+       kfree(smfs_rule);
+}
+
+static struct mlx5_ct_fs_ops fs_smfs_ops = {
+       .ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
+       .ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
+
+       .init = mlx5_ct_fs_smfs_init,
+       .destroy = mlx5_ct_fs_smfs_destroy,
+
+       .priv_size = sizeof(struct mlx5_ct_fs_smfs),
+};
+
+struct mlx5_ct_fs_ops *
+mlx5_ct_fs_smfs_ops_get(void)
+{
+       return &fs_smfs_ops;
+}
index 875e77a..e49f511 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "lib/fs_chains.h"
 #include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
 #include "en/tc_priv.h"
 #include "en/mod_hdr.h"
 #include "en/mapping.h"
@@ -25,9 +26,8 @@
 #include "en.h"
 #include "en_tc.h"
 #include "en_rep.h"
+#include "fs_core.h"
 
-#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
-#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
 #define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
 #define MLX5_CT_STATE_TRK_BIT BIT(2)
 #define MLX5_CT_STATE_NAT_BIT BIT(3)
@@ -63,6 +63,8 @@ struct mlx5_tc_ct_priv {
        struct mapping_ctx *labels_mapping;
        enum mlx5_flow_namespace_type ns_type;
        struct mlx5_fs_chains *chains;
+       struct mlx5_ct_fs *fs;
+       struct mlx5_ct_fs_ops *fs_ops;
        spinlock_t ht_lock; /* protects ft entries */
 };
 
@@ -74,7 +76,7 @@ struct mlx5_ct_flow {
 };
 
 struct mlx5_ct_zone_rule {
-       struct mlx5_flow_handle *rule;
+       struct mlx5_ct_fs_rule *rule;
        struct mlx5e_mod_hdr_handle *mh;
        struct mlx5_flow_attr *attr;
        bool nat;
@@ -258,7 +260,8 @@ mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
                        return -EOPNOTSUPP;
                }
        } else {
-               return -EOPNOTSUPP;
+               if (tuple->ip_proto != IPPROTO_GRE)
+                       return -EOPNOTSUPP;
        }
 
        return 0;
@@ -505,7 +508,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
 
        ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
 
-       mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
+       ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
        mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
        mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
        kfree(attr);
@@ -807,7 +810,11 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
        attr->dest_chain = 0;
        attr->dest_ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
        attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
-       attr->outer_match_level = MLX5_MATCH_L4;
+       if (entry->tuple.ip_proto == IPPROTO_TCP ||
+           entry->tuple.ip_proto == IPPROTO_UDP)
+               attr->outer_match_level = MLX5_MATCH_L4;
+       else
+               attr->outer_match_level = MLX5_MATCH_L3;
        attr->counter = entry->counter->counter;
        attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
        if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
@@ -816,7 +823,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
        mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
        mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
 
-       zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
+       zone_rule->rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
        if (IS_ERR(zone_rule->rule)) {
                err = PTR_ERR(zone_rule->rule);
                ct_dbg("Failed to add ct entry rule, nat: %d", nat);
@@ -1154,7 +1161,6 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
        }
 
        rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
-       mlx5_tc_ct_entry_remove_from_tuples(entry);
        spin_unlock_bh(&ct_priv->ht_lock);
 
        mlx5_tc_ct_entry_put(entry);
@@ -1224,16 +1230,20 @@ mlx5_tc_ct_skb_to_tuple(struct sk_buff *skb, struct mlx5_ct_tuple *tuple,
        struct flow_keys flow_keys;
 
        skb_reset_network_header(skb);
-       skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
+       skb_flow_dissect_flow_keys(skb, &flow_keys, FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
 
        tuple->zone = zone;
 
        if (flow_keys.basic.ip_proto != IPPROTO_TCP &&
-           flow_keys.basic.ip_proto != IPPROTO_UDP)
+           flow_keys.basic.ip_proto != IPPROTO_UDP &&
+           flow_keys.basic.ip_proto != IPPROTO_GRE)
                return false;
 
-       tuple->port.src = flow_keys.ports.src;
-       tuple->port.dst = flow_keys.ports.dst;
+       if (flow_keys.basic.ip_proto == IPPROTO_TCP ||
+           flow_keys.basic.ip_proto == IPPROTO_UDP) {
+               tuple->port.src = flow_keys.ports.src;
+               tuple->port.dst = flow_keys.ports.dst;
+       }
        tuple->n_proto = flow_keys.basic.n_proto;
        tuple->ip_proto = flow_keys.basic.ip_proto;
 
@@ -1960,6 +1970,38 @@ mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
        mutex_unlock(&priv->control_lock);
 }
 
+static int
+mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
+{
+       struct mlx5_flow_table *post_ct = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
+       struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
+       int err;
+
+       if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+           ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
+               ct_dbg("Using SMFS ct flow steering provider");
+               fs_ops = mlx5_ct_fs_smfs_ops_get();
+       }
+
+       ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
+       if (!ct_priv->fs)
+               return -ENOMEM;
+
+       ct_priv->fs->netdev = ct_priv->netdev;
+       ct_priv->fs->dev = ct_priv->dev;
+       ct_priv->fs_ops = fs_ops;
+
+       err = ct_priv->fs_ops->init(ct_priv->fs, ct_priv->ct, ct_priv->ct_nat, post_ct);
+       if (err)
+               goto err_init;
+
+       return 0;
+
+err_init:
+       kfree(ct_priv->fs);
+       return err;
+}
+
 static int
 mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
                                  const char **err_msg)
@@ -2098,8 +2140,14 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
        if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
                goto err_ct_tuples_nat_ht;
 
+       err = mlx5_tc_ct_fs_init(ct_priv);
+       if (err)
+               goto err_init_fs;
+
        return ct_priv;
 
+err_init_fs:
+       rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
 err_ct_tuples_nat_ht:
        rhashtable_destroy(&ct_priv->ct_tuples_ht);
 err_ct_tuples_ht:
@@ -2130,6 +2178,9 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
 
        chains = ct_priv->chains;
 
+       ct_priv->fs_ops->destroy(ct_priv->fs);
+       kfree(ct_priv->fs);
+
        mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
        mlx5_chains_destroy_global_table(chains, ct_priv->ct);
        mapping_destroy(ct_priv->zone_mapping);
index 2b21c7b..36d3652 100644 (file)
@@ -86,6 +86,8 @@ struct mlx5_ct_attr {
 
 #define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
 #define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
+#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
+#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
 
 #if IS_ENABLED(CONFIG_MLX5_TC_CT)
 
index 03c953d..3b74a6f 100644 (file)
@@ -41,7 +41,6 @@ struct mlx5e_tc_flow_parse_attr {
        struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
        struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
        int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
-       struct ethhdr eth;
        struct mlx5e_tc_act_parse_state parse_state;
 };
 
index 5105c80..5aff979 100644 (file)
@@ -906,20 +906,18 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
        struct mlx5_pkt_reformat_params reformat_params;
-       struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct mlx5e_decap_entry *d;
        struct mlx5e_decap_key key;
        uintptr_t hash_key;
        int err = 0;
 
-       parse_attr = flow->attr->parse_attr;
-       if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
+       if (sizeof(attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "encap header larger than max supported");
                return -EOPNOTSUPP;
        }
 
-       key.key = parse_attr->eth;
+       key.key = attr->eth;
        hash_key = hash_decap_info(&key);
        mutex_lock(&esw->offloads.decap_tbl_lock);
        d = mlx5e_decap_get(priv, &key, hash_key);
@@ -949,8 +947,8 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
 
        memset(&reformat_params, 0, sizeof(reformat_params));
        reformat_params.type = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
-       reformat_params.size = sizeof(parse_attr->eth);
-       reformat_params.data = &parse_attr->eth;
+       reformat_params.size = sizeof(attr->eth);
+       reformat_params.data = &attr->eth;
        d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
                                                     &reformat_params,
                                                     MLX5_FLOW_NAMESPACE_FDB);
index da169b8..d4239e3 100644 (file)
@@ -88,9 +88,6 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
                         (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
                MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
                break;
-       case MLX5E_PACKET_MERGE_SHAMPO:
-               MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO);
-               break;
        default:
                break;
        }
index 210d23b..c208ea3 100644 (file)
@@ -44,10 +44,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
 
 /* RX */
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
-void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
-                               struct mlx5e_dma_info *dma_info,
-                               bool recycle);
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
index a7f0203..f35b62c 100644 (file)
@@ -57,12 +57,14 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
 
 static inline bool
 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
-                   struct mlx5e_dma_info *di, struct xdp_buff *xdp)
+                   struct page *page, struct xdp_buff *xdp)
 {
+       struct skb_shared_info *sinfo = NULL;
        struct mlx5e_xmit_data xdptxd;
        struct mlx5e_xdp_info xdpi;
        struct xdp_frame *xdpf;
        dma_addr_t dma_addr;
+       int i;
 
        xdpf = xdp_convert_buff_to_frame(xdp);
        if (unlikely(!xdpf))
@@ -96,46 +98,77 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
                xdptxd.dma_addr     = dma_addr;
                xdpi.frame.xdpf     = xdpf;
                xdpi.frame.dma_addr = dma_addr;
-       } else {
-               /* Driver assumes that xdp_convert_buff_to_frame returns
-                * an xdp_frame that points to the same memory region as
-                * the original xdp_buff. It allows to map the memory only
-                * once and to use the DMA_BIDIRECTIONAL mode.
-                */
 
-               xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+               if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+                                             mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
+                       return false;
+
+               mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+               return true;
+       }
+
+       /* Driver assumes that xdp_convert_buff_to_frame returns an xdp_frame
+        * that points to the same memory region as the original xdp_buff. It
+        * allows to map the memory only once and to use the DMA_BIDIRECTIONAL
+        * mode.
+        */
+
+       xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+       xdpi.page.rq = rq;
+
+       dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
+       dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
+
+       if (unlikely(xdp_frame_has_frags(xdpf))) {
+               sinfo = xdp_get_shared_info_from_frame(xdpf);
+
+               for (i = 0; i < sinfo->nr_frags; i++) {
+                       skb_frag_t *frag = &sinfo->frags[i];
+                       dma_addr_t addr;
+                       u32 len;
+
+                       addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+                               skb_frag_off(frag);
+                       len = skb_frag_size(frag);
+                       dma_sync_single_for_device(sq->pdev, addr, len,
+                                                  DMA_TO_DEVICE);
+               }
+       }
+
+       xdptxd.dma_addr = dma_addr;
+
+       if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+                                     mlx5e_xmit_xdp_frame, sq, &xdptxd, sinfo, 0)))
+               return false;
+
+       xdpi.page.page = page;
+       mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
 
-               dma_addr = di->addr + (xdpf->data - (void *)xdpf);
-               dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len,
-                                          DMA_TO_DEVICE);
+       if (unlikely(xdp_frame_has_frags(xdpf))) {
+               for (i = 0; i < sinfo->nr_frags; i++) {
+                       skb_frag_t *frag = &sinfo->frags[i];
 
-               xdptxd.dma_addr = dma_addr;
-               xdpi.page.rq    = rq;
-               xdpi.page.di    = *di;
+                       xdpi.page.page = skb_frag_page(frag);
+                       mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+               }
        }
 
-       return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-                              mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+       return true;
 }
 
 /* returns true if packet was consumed by xdp */
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
-                     u32 *len, struct xdp_buff *xdp)
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
+                     struct bpf_prog *prog, struct xdp_buff *xdp)
 {
-       struct bpf_prog *prog = rcu_dereference(rq->xdp_prog);
        u32 act;
        int err;
 
-       if (!prog)
-               return false;
-
        act = bpf_prog_run_xdp(prog, xdp);
        switch (act) {
        case XDP_PASS:
-               *len = xdp->data_end - xdp->data;
                return false;
        case XDP_TX:
-               if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp)))
+               if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
                        goto xdp_abort;
                __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
                return true;
@@ -147,7 +180,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
                __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
                __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
                if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
-                       mlx5e_page_dma_unmap(rq, di);
+                       mlx5e_page_dma_unmap(rq, page);
                rq->stats->xdp_redirect++;
                return true;
        default:
@@ -259,13 +292,27 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
        return MLX5E_XDP_CHECK_OK;
 }
 
+INDIRECT_CALLABLE_SCOPE bool
+mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
+                    struct skb_shared_info *sinfo, int check_result);
+
 INDIRECT_CALLABLE_SCOPE bool
 mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
-                          struct mlx5e_xdp_info *xdpi, int check_result)
+                          struct skb_shared_info *sinfo, int check_result)
 {
        struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
        struct mlx5e_xdpsq_stats *stats = sq->stats;
 
+       if (unlikely(sinfo)) {
+               /* MPWQE is enabled, but a multi-buffer packet is queued for
+                * transmission. MPWQE can't send fragmented packets, so close
+                * the current session and fall back to a regular WQE.
+                */
+               if (unlikely(sq->mpwqe.wqe))
+                       mlx5e_xdp_mpwqe_complete(sq);
+               return mlx5e_xmit_xdp_frame(sq, xdptxd, sinfo, 0);
+       }
+
        if (unlikely(xdptxd->len > sq->hw_mtu)) {
                stats->err++;
                return false;
@@ -289,14 +336,13 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
        if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs)))
                mlx5e_xdp_mpwqe_complete(sq);
 
-       mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
        stats->xmit++;
        return true;
 }
 
-INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
+static int mlx5e_xmit_xdp_frame_check_stop_room(struct mlx5e_xdpsq *sq, int stop_room)
 {
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) {
                /* SQ is full, ring doorbell */
                mlx5e_xmit_xdp_doorbell(sq);
                sq->stats->full++;
@@ -306,45 +352,76 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
        return MLX5E_XDP_CHECK_OK;
 }
 
+INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
+{
+       return mlx5e_xmit_xdp_frame_check_stop_room(sq, 1);
+}
+
 INDIRECT_CALLABLE_SCOPE bool
 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
-                    struct mlx5e_xdp_info *xdpi, int check_result)
+                    struct skb_shared_info *sinfo, int check_result)
 {
        struct mlx5_wq_cyc       *wq   = &sq->wq;
-       u16                       pi   = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-       struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
-
-       struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
-       struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
-       struct mlx5_wqe_data_seg *dseg = wqe->data;
+       struct mlx5_wqe_ctrl_seg *cseg;
+       struct mlx5_wqe_data_seg *dseg;
+       struct mlx5_wqe_eth_seg *eseg;
+       struct mlx5e_tx_wqe *wqe;
 
        dma_addr_t dma_addr = xdptxd->dma_addr;
        u32 dma_len = xdptxd->len;
+       u16 ds_cnt, inline_hdr_sz;
+       u8 num_wqebbs = 1;
+       int num_frags = 0;
+       u16 pi;
 
        struct mlx5e_xdpsq_stats *stats = sq->stats;
 
-       net_prefetchw(wqe);
-
        if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
                stats->err++;
                return false;
        }
 
-       if (!check_result)
-               check_result = mlx5e_xmit_xdp_frame_check(sq);
+       ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
+       if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE)
+               ds_cnt++;
+
+       /* check_result must be 0 if sinfo is passed. */
+       if (!check_result) {
+               int stop_room = 1;
+
+               if (unlikely(sinfo)) {
+                       ds_cnt += sinfo->nr_frags;
+                       num_frags = sinfo->nr_frags;
+                       num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+                       /* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big
+                        * enough to hold all fragments.
+                        */
+                       stop_room = MLX5E_STOP_ROOM(num_wqebbs);
+               }
+
+               check_result = mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room);
+       }
        if (unlikely(check_result < 0))
                return false;
 
-       cseg->fm_ce_se = 0;
+       pi = mlx5e_xdpsq_get_next_pi(sq, num_wqebbs);
+       wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+       net_prefetchw(wqe);
+
+       cseg = &wqe->ctrl;
+       eseg = &wqe->eth;
+       dseg = wqe->data;
+
+       inline_hdr_sz = 0;
 
        /* copy the inline part if required */
        if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
                memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
-               eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
                memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
                       MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
                dma_len  -= MLX5E_XDP_MIN_INLINE;
                dma_addr += MLX5E_XDP_MIN_INLINE;
+               inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
                dseg++;
        }
 
@@ -354,11 +431,46 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 
        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
 
-       sq->pc++;
+       if (unlikely(test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state))) {
+               u8 num_pkts = 1 + num_frags;
+               int i;
+
+               memset(&cseg->signature, 0, sizeof(*cseg) -
+                      sizeof(cseg->opmod_idx_opcode) - sizeof(cseg->qpn_ds));
+               memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
+
+               eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+               dseg->lkey = sq->mkey_be;
+
+               for (i = 0; i < num_frags; i++) {
+                       skb_frag_t *frag = &sinfo->frags[i];
+                       dma_addr_t addr;
+
+                       addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+                               skb_frag_off(frag);
+
+                       dseg++;
+                       dseg->addr = cpu_to_be64(addr);
+                       dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+                       dseg->lkey = sq->mkey_be;
+               }
+
+               cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+
+               sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+                       .num_wqebbs = num_wqebbs,
+                       .num_pkts = num_pkts,
+               };
+
+               sq->pc += num_wqebbs;
+       } else {
+               cseg->fm_ce_se = 0;
+
+               sq->pc++;
+       }
 
        sq->doorbell_cseg = cseg;
 
-       mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
        stats->xmit++;
        return true;
 }
@@ -384,7 +496,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
                        break;
                case MLX5E_XDP_XMIT_MODE_PAGE:
                        /* XDP_TX from the regular RQ */
-                       mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle);
+                       mlx5e_page_release_dynamic(xdpi.page.rq, xdpi.page.page, recycle);
                        break;
                case MLX5E_XDP_XMIT_MODE_XSK:
                        /* AF_XDP send */
@@ -537,12 +649,13 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
                xdpi.frame.dma_addr = xdptxd.dma_addr;
 
                ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-                                     mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+                                     mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0);
                if (unlikely(!ret)) {
                        dma_unmap_single(sq->pdev, xdptxd.dma_addr,
                                         xdptxd.len, DMA_TO_DEVICE);
                        break;
                }
+               mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
                nxmit++;
        }
 
index c62f11d..287e179 100644 (file)
@@ -38,7 +38,6 @@
 #include "en/txrx.h"
 
 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */)
 
 #define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
 #define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
@@ -47,8 +46,8 @@
 
 struct mlx5e_xsk_param;
 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
-                     u32 *len, struct xdp_buff *xdp);
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
+                     struct bpf_prog *prog, struct xdp_buff *xdp);
 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
@@ -59,11 +58,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 
 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
                                                          struct mlx5e_xmit_data *xdptxd,
-                                                         struct mlx5e_xdp_info *xdpi,
+                                                         struct skb_shared_info *sinfo,
                                                          int check_result));
 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
                                                    struct mlx5e_xmit_data *xdptxd,
-                                                   struct mlx5e_xdp_info *xdpi,
+                                                   struct skb_shared_info *sinfo,
                                                    int check_result));
 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
index 8e7b877..021da08 100644 (file)
@@ -4,6 +4,7 @@
 #include "rx.h"
 #include "en/xdp.h"
 #include <net/xdp_sock_drv.h>
+#include <linux/filter.h>
 
 /* RX data path */
 
@@ -30,7 +31,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
                                                    u32 page_idx)
 {
        struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
-       u32 cqe_bcnt32 = cqe_bcnt;
+       struct bpf_prog *prog;
 
        /* Check packet size. Note LRO doesn't use linear SKB */
        if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -45,7 +46,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
         */
        WARN_ON_ONCE(head_offset);
 
-       xdp->data_end = xdp->data + cqe_bcnt32;
+       xdp->data_end = xdp->data + cqe_bcnt;
        xdp_set_data_meta_invalid(xdp);
        xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
        net_prefetch(xdp->data);
@@ -65,7 +66,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
         * allocated first from the Reuse Ring, so it has enough space.
         */
 
-       if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) {
+       prog = rcu_dereference(rq->xdp_prog);
+       if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
                if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
                        __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
                return NULL; /* page/packet was consumed by XDP */
@@ -74,7 +76,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
        /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
         * frame. On SKB allocation failure, NULL is returned.
         */
-       return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32);
+       return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
 }
 
 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
@@ -83,6 +85,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
                                              u32 cqe_bcnt)
 {
        struct xdp_buff *xdp = wi->di->xsk;
+       struct bpf_prog *prog;
 
        /* wi->offset is not used in this function, because xdp->data and the
         * DMA address point directly to the necessary place. Furthermore, the
@@ -101,12 +104,13 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
                return NULL;
        }
 
-       if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp)))
+       prog = rcu_dereference(rq->xdp_prog);
+       if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
                return NULL; /* page/packet was consumed by XDP */
 
        /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
         * will be handled by mlx5e_put_rx_frag.
         * On SKB allocation failure, NULL is returned.
         */
-       return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt);
+       return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
 }
index 25eac9e..3ad7f13 100644 (file)
@@ -43,7 +43,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
                                   struct mlx5e_channel_param *cparam)
 {
        mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
-       mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+       mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
 }
 
 static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
index 8e96260..3ec0c17 100644 (file)
@@ -103,12 +103,15 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
                xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
 
                ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-                                     mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
+                                     mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL,
+                                     check_result);
                if (unlikely(!ret)) {
                        if (sq->mpwqe.wqe)
                                mlx5e_xdp_mpwqe_complete(sq);
 
                        mlx5e_xsk_tx_post_err(sq, &xdpi);
+               } else {
+                       mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
                }
 
                flush = true;
index 7cab08a..299e3f0 100644 (file)
@@ -35,7 +35,6 @@
 #include <crypto/aead.h>
 #include <linux/inetdevice.h>
 #include <linux/netdevice.h>
-#include <linux/module.h>
 
 #include "en.h"
 #include "en_accel/ipsec.h"
index b2ed2f6..2f1dedc 100644 (file)
@@ -780,7 +780,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
                 * entered, and it's safe to call mlx5e_page_release_dynamic
                 * directly.
                 */
-               mlx5e_page_release_dynamic(rq, dma_info, false);
+               mlx5e_page_release_dynamic(rq, dma_info->page, false);
        }
 
        xdp_rxq_info_unreg(&rq->xdp_rxq);
@@ -1666,14 +1666,22 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
        csp.wq_ctrl         = &sq->wq_ctrl;
        csp.min_inline_mode = sq->min_inline_mode;
        set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+
+       /* Don't enable multi buffer on XDP_REDIRECT SQ, as it's not yet
+        * supported by upstream, and there is no defined trigger to allow
+        * transmitting redirected multi-buffer frames.
+        */
+       if (param->is_xdp_mb && !is_redirect)
+               set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
+
        err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
        if (err)
                goto err_free_xdpsq;
 
        mlx5e_set_xmit_fp(sq, param->is_mpw);
 
-       if (!param->is_mpw) {
-               unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+       if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
+               unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
                unsigned int inline_hdr_sz = 0;
                int i;
 
@@ -3643,8 +3651,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
                goto out;
        }
 
-       err = mlx5e_safe_switch_params(priv, &new_params,
-                                      mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
+       err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
 out:
        mutex_unlock(&priv->state_lock);
        return err;
@@ -3946,6 +3953,31 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
        return true;
 }
 
+static bool mlx5e_params_validate_xdp(struct net_device *netdev, struct mlx5e_params *params)
+{
+       bool is_linear;
+
+       /* No XSK params: AF_XDP can't be enabled yet at the point of setting
+        * the XDP program.
+        */
+       is_linear = mlx5e_rx_is_linear_skb(params, NULL);
+
+       if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+               netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
+                           params->sw_mtu,
+                           mlx5e_xdp_max_mtu(params, NULL));
+               return false;
+       }
+       if (!is_linear && !params->xdp_prog->aux->xdp_has_frags) {
+               netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
+                           params->sw_mtu,
+                           mlx5e_xdp_max_mtu(params, NULL));
+               return false;
+       }
+
+       return true;
+}
+
 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
                     mlx5e_fp_preactivate preactivate)
 {
@@ -3965,10 +3997,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
        if (err)
                goto out;
 
-       if (params->xdp_prog &&
-           !mlx5e_rx_is_linear_skb(&new_params, NULL)) {
-               netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
-                          new_mtu, mlx5e_xdp_max_mtu(params, NULL));
+       if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, &new_params)) {
                err = -EINVAL;
                goto out;
        }
@@ -4451,15 +4480,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
        new_params = priv->channels.params;
        new_params.xdp_prog = prog;
 
-       /* No XSK params: AF_XDP can't be enabled yet at the point of setting
-        * the XDP program.
-        */
-       if (!mlx5e_rx_is_linear_skb(&new_params, NULL)) {
-               netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
-                           new_params.sw_mtu,
-                           mlx5e_xdp_max_mtu(&new_params, NULL));
+       if (!mlx5e_params_validate_xdp(netdev, &new_params))
                return -EINVAL;
-       }
 
        return 0;
 }
index 074a44b..56bb587 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
 #include <linux/bitmap.h>
+#include <linux/filter.h>
 #include <net/ip6_checksum.h>
 #include <net/page_pool.h>
 #include <net/inet_ecn.h>
@@ -221,8 +222,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
        return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
 }
 
-static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
-                                     struct mlx5e_dma_info *dma_info)
+static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
 {
        struct mlx5e_page_cache *cache = &rq->page_cache;
        u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
@@ -233,12 +233,13 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
                return false;
        }
 
-       if (!dev_page_is_reusable(dma_info->page)) {
+       if (!dev_page_is_reusable(page)) {
                stats->cache_waive++;
                return false;
        }
 
-       cache->page_cache[cache->tail] = *dma_info;
+       cache->page_cache[cache->tail].page = page;
+       cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page);
        cache->tail = tail_next;
        return true;
 }
@@ -286,6 +287,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
                dma_info->page = NULL;
                return -ENOMEM;
        }
+       page_pool_set_dma_addr(dma_info->page, dma_info->addr);
 
        return 0;
 }
@@ -299,26 +301,27 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
                return mlx5e_page_alloc_pool(rq, dma_info);
 }
 
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
 {
-       dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
+       dma_addr_t dma_addr = page_pool_get_dma_addr(page);
+
+       dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir,
                             DMA_ATTR_SKIP_CPU_SYNC);
+       page_pool_set_dma_addr(page, 0);
 }
 
-void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
-                               struct mlx5e_dma_info *dma_info,
-                               bool recycle)
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle)
 {
        if (likely(recycle)) {
-               if (mlx5e_rx_cache_put(rq, dma_info))
+               if (mlx5e_rx_cache_put(rq, page))
                        return;
 
-               mlx5e_page_dma_unmap(rq, dma_info);
-               page_pool_recycle_direct(rq->page_pool, dma_info->page);
+               mlx5e_page_dma_unmap(rq, page);
+               page_pool_recycle_direct(rq->page_pool, page);
        } else {
-               mlx5e_page_dma_unmap(rq, dma_info);
-               page_pool_release_page(rq->page_pool, dma_info->page);
-               put_page(dma_info->page);
+               mlx5e_page_dma_unmap(rq, page);
+               page_pool_release_page(rq->page_pool, page);
+               put_page(page);
        }
 }
 
@@ -333,7 +336,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
                 */
                xsk_buff_free(dma_info->xsk);
        else
-               mlx5e_page_release_dynamic(rq, dma_info, recycle);
+               mlx5e_page_release_dynamic(rq, dma_info->page, recycle);
 }
 
 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -373,12 +376,15 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
        int i;
 
        for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
+               u16 headroom;
+
                err = mlx5e_get_rx_frag(rq, frag);
                if (unlikely(err))
                        goto free_frags;
 
+               headroom = i == 0 ? rq->buff.headroom : 0;
                wqe->data[i].addr = cpu_to_be64(frag->di->addr +
-                                               frag->offset + rq->buff.headroom);
+                                               frag->offset + headroom);
        }
 
        return 0;
@@ -1520,11 +1526,11 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 {
        struct mlx5e_dma_info *di = wi->di;
        u16 rx_headroom = rq->buff.headroom;
-       struct xdp_buff xdp;
+       struct bpf_prog *prog;
        struct sk_buff *skb;
+       u32 metasize = 0;
        void *va, *data;
        u32 frag_size;
-       u32 metasize;
 
        va             = page_address(di->page) + wi->offset;
        data           = va + rx_headroom;
@@ -1532,16 +1538,22 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 
        dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
                                      frag_size, DMA_FROM_DEVICE);
-       net_prefetchw(va); /* xdp_frame data area */
        net_prefetch(data);
 
-       mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
-       if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
-               return NULL; /* page/packet was consumed by XDP */
+       prog = rcu_dereference(rq->xdp_prog);
+       if (prog) {
+               struct xdp_buff xdp;
+
+               net_prefetchw(va); /* xdp_frame data area */
+               mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+               if (mlx5e_xdp_handle(rq, di->page, prog, &xdp))
+                       return NULL; /* page/packet was consumed by XDP */
 
-       rx_headroom = xdp.data - xdp.data_hard_start;
+               rx_headroom = xdp.data - xdp.data_hard_start;
+               metasize = xdp.data - xdp.data_meta;
+               cqe_bcnt = xdp.data_end - xdp.data;
+       }
        frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
-       metasize = xdp.data - xdp.data_meta;
        skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
        if (unlikely(!skb))
                return NULL;
@@ -1558,41 +1570,103 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 {
        struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
        struct mlx5e_wqe_frag_info *head_wi = wi;
-       u16 headlen      = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
-       u16 frag_headlen = headlen;
-       u16 byte_cnt     = cqe_bcnt - headlen;
+       u16 rx_headroom = rq->buff.headroom;
+       struct mlx5e_dma_info *di = wi->di;
+       struct skb_shared_info *sinfo;
+       u32 frag_consumed_bytes;
+       struct bpf_prog *prog;
+       struct xdp_buff xdp;
        struct sk_buff *skb;
+       u32 truesize;
+       void *va;
 
-       /* XDP is not supported in this configuration, as incoming packets
-        * might spread among multiple pages.
-        */
-       skb = napi_alloc_skb(rq->cq.napi,
-                            ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
-       if (unlikely(!skb)) {
-               rq->stats->buff_alloc_err++;
-               return NULL;
-       }
+       va = page_address(di->page) + wi->offset;
+       frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
 
-       net_prefetchw(skb->data);
+       dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
+                                     rq->buff.frame0_sz, DMA_FROM_DEVICE);
+       net_prefetchw(va); /* xdp_frame data area */
+       net_prefetch(va + rx_headroom);
+
+       mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
+       sinfo = xdp_get_shared_info_from_buff(&xdp);
+       truesize = 0;
+
+       cqe_bcnt -= frag_consumed_bytes;
+       frag_info++;
+       wi++;
+
+       while (cqe_bcnt) {
+               skb_frag_t *frag;
+
+               di = wi->di;
+
+               frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
+
+               dma_sync_single_for_cpu(rq->pdev, di->addr + wi->offset,
+                                       frag_consumed_bytes, DMA_FROM_DEVICE);
+
+               if (!xdp_buff_has_frags(&xdp)) {
+                       /* Init on the first fragment to avoid cold cache access
+                        * when possible.
+                        */
+                       sinfo->nr_frags = 0;
+                       sinfo->xdp_frags_size = 0;
+                       xdp_buff_set_frags_flag(&xdp);
+               }
+
+               frag = &sinfo->frags[sinfo->nr_frags++];
+               __skb_frag_set_page(frag, di->page);
+               skb_frag_off_set(frag, wi->offset);
+               skb_frag_size_set(frag, frag_consumed_bytes);
 
-       while (byte_cnt) {
-               u16 frag_consumed_bytes =
-                       min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
+               if (page_is_pfmemalloc(di->page))
+                       xdp_buff_set_frag_pfmemalloc(&xdp);
 
-               mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
-                                  frag_consumed_bytes, frag_info->frag_stride);
-               byte_cnt -= frag_consumed_bytes;
-               frag_headlen = 0;
+               sinfo->xdp_frags_size += frag_consumed_bytes;
+               truesize += frag_info->frag_stride;
+
+               cqe_bcnt -= frag_consumed_bytes;
                frag_info++;
                wi++;
        }
 
-       /* copy header */
-       mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset,
-                             headlen);
-       /* skb linear part was allocated with headlen and aligned to long */
-       skb->tail += headlen;
-       skb->len  += headlen;
+       di = head_wi->di;
+
+       prog = rcu_dereference(rq->xdp_prog);
+       if (prog && mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
+               if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+                       int i;
+
+                       for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++)
+                               mlx5e_put_rx_frag(rq, &head_wi[i], true);
+               }
+               return NULL; /* page/packet was consumed by XDP */
+       }
+
+       skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
+                                    xdp.data - xdp.data_hard_start,
+                                    xdp.data_end - xdp.data,
+                                    xdp.data - xdp.data_meta);
+       if (unlikely(!skb))
+               return NULL;
+
+       page_ref_inc(di->page);
+
+       if (unlikely(xdp_buff_has_frags(&xdp))) {
+               int i;
+
+               /* sinfo->nr_frags is reset by build_skb, calculate again. */
+               xdp_update_skb_shared_info(skb, wi - head_wi - 1,
+                                          sinfo->xdp_frags_size, truesize,
+                                          xdp_buff_is_frag_pfmemalloc(&xdp));
+
+               for (i = 0; i < sinfo->nr_frags; i++) {
+                       skb_frag_t *frag = &sinfo->frags[i];
+
+                       page_ref_inc(skb_frag_page(frag));
+               }
+       }
 
        return skb;
 }
@@ -1836,12 +1910,11 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 {
        struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
        u16 rx_headroom = rq->buff.headroom;
-       u32 cqe_bcnt32 = cqe_bcnt;
-       struct xdp_buff xdp;
+       struct bpf_prog *prog;
        struct sk_buff *skb;
+       u32 metasize = 0;
        void *va, *data;
        u32 frag_size;
-       u32 metasize;
 
        /* Check packet size. Note LRO doesn't use linear SKB */
        if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -1851,24 +1924,30 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 
        va             = page_address(di->page) + head_offset;
        data           = va + rx_headroom;
-       frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
+       frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
 
        dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
                                      frag_size, DMA_FROM_DEVICE);
-       net_prefetchw(va); /* xdp_frame data area */
        net_prefetch(data);
 
-       mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
-       if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
-               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
-                       __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
-               return NULL; /* page/packet was consumed by XDP */
-       }
+       prog = rcu_dereference(rq->xdp_prog);
+       if (prog) {
+               struct xdp_buff xdp;
+
+               net_prefetchw(va); /* xdp_frame data area */
+               mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+               if (mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
+                       if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+                               __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
+                       return NULL; /* page/packet was consumed by XDP */
+               }
 
-       rx_headroom = xdp.data - xdp.data_hard_start;
-       frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
-       metasize = xdp.data - xdp.data_meta;
-       skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32, metasize);
+               rx_headroom = xdp.data - xdp.data_hard_start;
+               metasize = xdp.data - xdp.data_meta;
+               cqe_bcnt = xdp.data_end - xdp.data;
+       }
+       frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
+       skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
        if (unlikely(!skb))
                return NULL;
 
index 336e4d0..bdc870f 100644 (file)
@@ -521,14 +521,15 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
 
        memset(s, 0, sizeof(*s));
 
+       for (i = 0; i < priv->channels.num; i++) /* for active channels only */
+               mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
+
        for (i = 0; i < priv->stats_nch; i++) {
                struct mlx5e_channel_stats *channel_stats =
                        priv->channel_stats[i];
 
                int j;
 
-               mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
-
                mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
                mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
                mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
index 40416e0..e3fc15a 100644 (file)
@@ -3410,7 +3410,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
        if (!attr2 || !parse_attr) {
                kvfree(parse_attr);
                kfree(attr2);
-               return attr2;
+               return NULL;
        }
 
        memcpy(attr2, attr, attr_sz);
index 48a45aa..229728c 100644 (file)
@@ -5,7 +5,6 @@
 
 #include <linux/interrupt.h>
 #include <linux/notifier.h>
-#include <linux/module.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/vport.h>
 #include <linux/mlx5/eq.h>
@@ -439,7 +438,8 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
        struct mlx5_eq_table *eq_table;
        int i;
 
-       eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
+       eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL,
+                                dev->priv.numa_node);
        if (!eq_table)
                return -ENOMEM;
 
@@ -728,7 +728,8 @@ struct mlx5_eq *
 mlx5_eq_create_generic(struct mlx5_core_dev *dev,
                       struct mlx5_eq_param *param)
 {
-       struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
+       struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL,
+                                          dev->priv.numa_node);
        int err;
 
        if (!eq)
@@ -888,10 +889,11 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
                return ncomp_eqs;
        INIT_LIST_HEAD(&table->comp_eqs_list);
        nent = comp_eq_depth_devlink_param_get(dev);
+
        for (i = 0; i < ncomp_eqs; i++) {
                struct mlx5_eq_param param = {};
 
-               eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+               eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
                if (!eq) {
                        err = -ENOMEM;
                        goto clean;
index 973281b..bac5160 100644 (file)
@@ -474,6 +474,7 @@ struct mlx5_esw_flow_attr {
                int src_port_rewrite_act_id;
        } dests[MLX5_MAX_FLOW_FWD_VPORTS];
        struct mlx5_rx_tun_attr *rx_tun_attr;
+       struct ethhdr eth;
        struct mlx5_pkt_reformat *decap_pkt_reformat;
 };
 
index 35cf4cb..3f63df1 100644 (file)
@@ -3337,6 +3337,27 @@ static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
                !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
 }
 
+/* FIXME: devl_unlock() followed by devl_lock() inside driver callback
+ * is never correct and prone to races. It's a transitional workaround,
+ * never repeat this pattern.
+ *
+ * This code MUST be fixed before removing devlink_mutex as it is safe
+ * to do only because of that mutex.
+ */
+static void mlx5_eswtich_mode_callback_enter(struct devlink *devlink,
+                                            struct mlx5_eswitch *esw)
+{
+       devl_unlock(devlink);
+       down_write(&esw->mode_lock);
+}
+
+static void mlx5_eswtich_mode_callback_exit(struct devlink *devlink,
+                                           struct mlx5_eswitch *esw)
+{
+       up_write(&esw->mode_lock);
+       devl_lock(devlink);
+}
+
 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
                                  struct netlink_ext_ack *extack)
 {
@@ -3351,6 +3372,15 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
        if (esw_mode_from_devlink(mode, &mlx5_mode))
                return -EINVAL;
 
+       /* FIXME: devl_unlock() followed by devl_lock() inside driver callback
+        * is never correct and prone to races. It's a transitional workaround,
+        * never repeat this pattern.
+        *
+        * This code MUST be fixed before removing devlink_mutex as it is safe
+        * to do only because of that mutex.
+        */
+       devl_unlock(devlink);
+
        mlx5_lag_disable_change(esw->dev);
        err = mlx5_esw_try_lock(esw);
        if (err < 0) {
@@ -3381,6 +3411,7 @@ unlock:
        mlx5_esw_unlock(esw);
 enable_lag:
        mlx5_lag_enable_change(esw->dev);
+       devl_lock(devlink);
        return err;
 }
 
@@ -3393,14 +3424,14 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
        if (IS_ERR(esw))
                return PTR_ERR(esw);
 
-       down_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_enter(devlink, esw);
        err = eswitch_devlink_esw_mode_check(esw);
        if (err)
                goto unlock;
 
        err = esw_mode_to_devlink(esw->mode, mode);
 unlock:
-       up_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_exit(devlink, esw);
        return err;
 }
 
@@ -3447,7 +3478,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
        if (IS_ERR(esw))
                return PTR_ERR(esw);
 
-       down_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_enter(devlink, esw);
        err = eswitch_devlink_esw_mode_check(esw);
        if (err)
                goto out;
@@ -3484,11 +3515,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
                goto out;
 
        esw->offloads.inline_mode = mlx5_mode;
-       up_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_exit(devlink, esw);
        return 0;
 
 out:
-       up_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_exit(devlink, esw);
        return err;
 }
 
@@ -3501,14 +3532,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
        if (IS_ERR(esw))
                return PTR_ERR(esw);
 
-       down_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_enter(devlink, esw);
        err = eswitch_devlink_esw_mode_check(esw);
        if (err)
                goto unlock;
 
        err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
 unlock:
-       up_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_exit(devlink, esw);
        return err;
 }
 
@@ -3524,7 +3555,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
        if (IS_ERR(esw))
                return PTR_ERR(esw);
 
-       down_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_enter(devlink, esw);
        err = eswitch_devlink_esw_mode_check(esw);
        if (err)
                goto unlock;
@@ -3570,7 +3601,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
        }
 
 unlock:
-       up_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_exit(devlink, esw);
        return err;
 }
 
@@ -3584,15 +3615,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
        if (IS_ERR(esw))
                return PTR_ERR(esw);
 
-
-       down_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_enter(devlink, esw);
        err = eswitch_devlink_esw_mode_check(esw);
        if (err)
                goto unlock;
 
        *encap = esw->offloads.encap;
 unlock:
-       up_write(&esw->mode_lock);
+       mlx5_eswtich_mode_callback_exit(devlink, esw);
        return err;
 }
 
index 2ce4241..39c03dc 100644 (file)
@@ -30,7 +30,6 @@
  * SOFTWARE.
  */
 
-#include <linux/module.h>
 #include <linux/etherdevice.h>
 #include <linux/mlx5/driver.h>
 
index 2d8406f..614687e 100644 (file)
@@ -32,7 +32,6 @@
 
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/eswitch.h>
-#include <linux/module.h>
 #include "mlx5_core.h"
 #include "../../mlxfw/mlxfw.h"
 #include "lib/tout.h"
index 737df40..659021c 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/random.h>
 #include <linux/vmalloc.h>
 #include <linux/hardirq.h>
index 4213208..4a6ec15 100644 (file)
@@ -126,6 +126,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
                return;
        }
 
+       /* Handle multipath entry with lower priority value */
+       if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
+               return;
+
        /* Handle add/replace event */
        nhs = fib_info_num_path(fi);
        if (nhs == 1) {
@@ -135,12 +139,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
                        int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
 
                        if (i < 0)
-                               i = MLX5_LAG_NORMAL_AFFINITY;
-                       else
-                               ++i;
+                               return;
 
+                       i++;
                        mlx5_lag_set_port_affinity(ldev, i);
                }
+
+               mp->mfi = fi;
                return;
        }
 
index 1e8ec4f..df58cba 100644 (file)
@@ -121,9 +121,6 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
 
 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
 {
-       if (!mlx5_chains_prios_supported(chains))
-               return 1;
-
        if (mlx5_chains_ignore_flow_level_supported(chains))
                return UINT_MAX;
 
index e042e09..4571c56 100644 (file)
@@ -1,7 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
 /* Copyright (c) 2019 Mellanox Technologies. */
 
-#include <linux/module.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/port.h>
 #include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
new file mode 100644 (file)
index 0000000..9b8c051
--- /dev/null
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/kernel.h>
+#include <linux/mlx5/driver.h>
+
+#include "smfs.h"
+
+struct mlx5dr_matcher *
+mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec)
+{
+       struct mlx5dr_match_parameters matcher_mask = {};
+
+       matcher_mask.match_buf = (u64 *)&spec->match_criteria;
+       matcher_mask.match_sz = DR_SZ_MATCH_PARAM;
+
+       return mlx5dr_matcher_create(table, priority, spec->match_criteria_enable, &matcher_mask);
+}
+
+void
+mlx5_smfs_matcher_destroy(struct mlx5dr_matcher *matcher)
+{
+       mlx5dr_matcher_destroy(matcher);
+}
+
+struct mlx5dr_table *
+mlx5_smfs_table_get_from_fs_ft(struct mlx5_flow_table *ft)
+{
+       return mlx5dr_table_get_from_fs_ft(ft);
+}
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table)
+{
+       return mlx5dr_action_create_dest_table(table);
+}
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_flow_counter(u32 counter_id)
+{
+       return mlx5dr_action_create_flow_counter(counter_id);
+}
+
+void
+mlx5_smfs_action_destroy(struct mlx5dr_action *action)
+{
+       mlx5dr_action_destroy(action);
+}
+
+struct mlx5dr_rule *
+mlx5_smfs_rule_create(struct mlx5dr_matcher *matcher, struct mlx5_flow_spec *spec,
+                     size_t num_actions, struct mlx5dr_action *actions[],
+                     u32 flow_source)
+{
+       struct mlx5dr_match_parameters value = {};
+
+       value.match_buf = (u64 *)spec->match_value;
+       value.match_sz = DR_SZ_MATCH_PARAM;
+
+       return mlx5dr_rule_create(matcher, &value, num_actions, actions, flow_source);
+}
+
+void
+mlx5_smfs_rule_destroy(struct mlx5dr_rule *rule)
+{
+       mlx5dr_rule_destroy(rule);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
new file mode 100644 (file)
index 0000000..452d0df
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_LIB_SMFS_H__
+#define __MLX5_LIB_SMFS_H__
+
+#include "steering/mlx5dr.h"
+#include "steering/dr_types.h"
+
+struct mlx5dr_matcher *
+mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec);
+
+void
+mlx5_smfs_matcher_destroy(struct mlx5dr_matcher *matcher);
+
+struct mlx5dr_table *
+mlx5_smfs_table_get_from_fs_ft(struct mlx5_flow_table *ft);
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table);
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_flow_counter(u32 counter_id);
+
+void
+mlx5_smfs_action_destroy(struct mlx5dr_action *action);
+
+struct mlx5dr_rule *
+mlx5_smfs_rule_create(struct mlx5dr_matcher *matcher, struct mlx5_flow_spec *spec,
+                     size_t num_actions, struct mlx5dr_action *actions[],
+                     u32 flow_source);
+
+void
+mlx5_smfs_rule_destroy(struct mlx5dr_rule *rule);
+
+#endif /* __MLX5_LIB_SMFS_H__ */
index e3b0a13..d55e15c 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/refcount.h>
 #include <linux/mlx5/driver.h>
 #include <net/vxlan.h>
index 98be705..d8d3647 100644 (file)
@@ -1487,8 +1487,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
        INIT_LIST_HEAD(&priv->pgdir_list);
 
        priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
-       priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
-                                           mlx5_debugfs_root);
+       priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
+                                               mlx5_debugfs_root);
        INIT_LIST_HEAD(&priv->traps);
 
        err = mlx5_tout_init(dev);
@@ -1524,7 +1524,7 @@ err_pagealloc_init:
 err_health_init:
        mlx5_tout_cleanup(dev);
 err_timeout_init:
-       debugfs_remove(dev->priv.dbg_root);
+       debugfs_remove(dev->priv.dbg.dbg_root);
        mutex_destroy(&priv->pgdir_mutex);
        mutex_destroy(&priv->alloc_mutex);
        mutex_destroy(&priv->bfregs.wc_head.lock);
@@ -1542,7 +1542,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
        mlx5_pagealloc_cleanup(dev);
        mlx5_health_cleanup(dev);
        mlx5_tout_cleanup(dev);
-       debugfs_remove_recursive(dev->priv.dbg_root);
+       debugfs_remove_recursive(dev->priv.dbg.dbg_root);
        mutex_destroy(&priv->pgdir_mutex);
        mutex_destroy(&priv->alloc_mutex);
        mutex_destroy(&priv->bfregs.wc_head.lock);
index e019d68..495cca5 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/mlx5/driver.h>
 #include <rdma/ib_verbs.h>
 #include "mlx5_core.h"
index 6f8baa0..3b231e9 100644 (file)
@@ -176,7 +176,6 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
                                        u32 element_id);
 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
 
-void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
 void mlx5_cmd_flush(struct mlx5_core_dev *dev);
 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
index f099a08..9d735c3 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/mlx5/driver.h>
 #include "mlx5_core.h"
 
index f6b5451..ec76a8b 100644 (file)
@@ -32,7 +32,6 @@
 
 #include <linux/highmem.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/mlx5/driver.h>
 #include <linux/xarray.h>
@@ -327,11 +326,12 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
 }
 
 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
-                     int notify_fail, bool ec_function)
+                     int event, bool ec_function)
 {
        u32 function = get_function(func_id, ec_function);
        u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
        int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
+       int notify_fail = event;
        u64 addr;
        int err;
        u32 *in;
@@ -351,8 +351,10 @@ retry:
                if (err) {
                        if (err == -ENOMEM)
                                err = alloc_system_page(dev, function);
-                       if (err)
+                       if (err) {
+                               dev->priv.fw_pages_alloc_failed += (npages - i);
                                goto out_4k;
+                       }
 
                        goto retry;
                }
@@ -365,11 +367,20 @@ retry:
        MLX5_SET(manage_pages_in, in, input_num_entries, npages);
        MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
 
-       err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+       err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
+       if (err == -EREMOTEIO) {
+               notify_fail = 0;
+               /* if triggered by FW and failed by FW ignore */
+               if (event) {
+                       err = 0;
+                       goto out_dropped;
+               }
+       }
        if (err) {
+               err = mlx5_cmd_check(dev, err, in, out);
                mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
                               func_id, npages, err);
-               goto out_4k;
+               goto out_dropped;
        }
 
        dev->priv.fw_pages += npages;
@@ -384,6 +395,8 @@ retry:
        kvfree(in);
        return 0;
 
+out_dropped:
+       dev->priv.give_pages_dropped += npages;
 out_4k:
        for (i--; i >= 0; i--)
                free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
@@ -455,7 +468,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
        u32 i = 0;
 
        if (!mlx5_cmd_is_down(dev))
-               return mlx5_cmd_exec(dev, in, in_size, out, out_size);
+               return mlx5_cmd_do(dev, in, in_size, out, out_size);
 
        /* No hard feelings, we want our pages back! */
        npages = MLX5_GET(manage_pages_in, in, input_num_entries);
@@ -479,7 +492,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
 }
 
 static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
-                        int *nclaimed, bool ec_function)
+                        int *nclaimed, bool event, bool ec_function)
 {
        u32 function = get_function(func_id, ec_function);
        int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
@@ -507,6 +520,14 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
                      func_id, npages, outlen);
        err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
        if (err) {
+               npages = MLX5_GET(manage_pages_in, in, input_num_entries);
+               dev->priv.reclaim_pages_discard += npages;
+       }
+       /* if triggered by FW event and failed by FW then ignore */
+       if (event && err == -EREMOTEIO)
+               err = 0;
+       if (err) {
+               err = mlx5_cmd_check(dev, err, in, out);
                mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
                goto out_free;
        }
@@ -546,7 +567,7 @@ static void pages_work_handler(struct work_struct *work)
                release_all_pages(dev, req->func_id, req->ec_function);
        else if (req->npages < 0)
                err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
-                                   req->ec_function);
+                                   true, req->ec_function);
        else if (req->npages > 0)
                err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
 
@@ -645,7 +666,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
                int err;
 
                err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
-                                   &nclaimed, mlx5_core_is_ecpf(dev));
+                                   &nclaimed, false, mlx5_core_is_ecpf(dev));
                if (err) {
                        mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
                                       err, func_id);
@@ -700,12 +721,14 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
                return -ENOMEM;
 
        xa_init(&dev->priv.page_root_xa);
+       mlx5_pages_debugfs_init(dev);
 
        return 0;
 }
 
 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
 {
+       mlx5_pages_debugfs_cleanup(dev);
        xa_destroy(&dev->priv.page_root_xa);
        destroy_workqueue(dev->priv.pg_wq);
 }
index 41807ef..db77f1d 100644 (file)
@@ -3,7 +3,6 @@
 
 #include <linux/interrupt.h>
 #include <linux/notifier.h>
-#include <linux/module.h>
 #include <linux/mlx5/driver.h>
 #include "mlx5_core.h"
 #include "mlx5_irq.h"
@@ -601,7 +600,8 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
        if (mlx5_core_is_sf(dev))
                return 0;
 
-       irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
+       irq_table = kvzalloc_node(sizeof(*irq_table), GFP_KERNEL,
+                                 dev->priv.numa_node);
        if (!irq_table)
                return -ENOMEM;
 
index aabc53a..ee5ffde 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/mlx5/driver.h>
 #include "mlx5_core.h"
 
index 289b29a..e1bd545 100644 (file)
@@ -275,7 +275,6 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
 {
        u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
        u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
-       int module_mapping;
        int err;
 
        MLX5_SET(pmlp_reg, in, local_port, 1);
@@ -284,8 +283,9 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
        if (err)
                return err;
 
-       module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
-       *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+       *module_num = MLX5_GET(lane_2_module_mapping,
+                              MLX5_ADDR_OF(pmlp_reg, out, lane0_module_mapping),
+                              module);
 
        return 0;
 }
@@ -365,6 +365,12 @@ static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset
        *offset -= MLX5_EEPROM_PAGE_LENGTH;
 }
 
+static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev)
+{
+       /* mcia supports either 12 dwords or 32 dwords */
+       return (MLX5_CAP_MCAM_FEATURE(dev, mcia_32dwords) ? 32 : 12) * sizeof(u32);
+}
+
 static int mlx5_query_mcia(struct mlx5_core_dev *dev,
                           struct mlx5_module_eeprom_query_params *params, u8 *data)
 {
@@ -374,7 +380,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
        void *ptr;
        u16 size;
 
-       size = min_t(int, params->size, MLX5_EEPROM_MAX_BYTES);
+       size = min_t(int, params->size, mlx5_mcia_max_bytes(dev));
 
        MLX5_SET(mcia_reg, in, l, 0);
        MLX5_SET(mcia_reg, in, size, size);
@@ -445,35 +451,12 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
                                     struct mlx5_module_eeprom_query_params *params,
                                     u8 *data)
 {
-       u8 module_id;
        int err;
 
        err = mlx5_query_module_num(dev, &params->module_number);
        if (err)
                return err;
 
-       err = mlx5_query_module_id(dev, params->module_number, &module_id);
-       if (err)
-               return err;
-
-       switch (module_id) {
-       case MLX5_MODULE_ID_SFP:
-               if (params->page > 0)
-                       return -EINVAL;
-               break;
-       case MLX5_MODULE_ID_QSFP:
-       case MLX5_MODULE_ID_QSFP28:
-       case MLX5_MODULE_ID_QSFP_PLUS:
-               if (params->page > 3)
-                       return -EINVAL;
-               break;
-       case MLX5_MODULE_ID_DSFP:
-               break;
-       default:
-               mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
-               return -EINVAL;
-       }
-
        if (params->i2c_address != MLX5_I2C_ADDR_HIGH &&
            params->i2c_address != MLX5_I2C_ADDR_LOW) {
                mlx5_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address);
index 7161220..9f8b400 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/mlx5/driver.h>
 #include "mlx5_core.h"
 
index c61a5e8..850937c 100644 (file)
@@ -570,6 +570,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
 
        for (i = 0; i < num_actions; i++) {
                struct mlx5dr_action_dest_tbl *dest_tbl;
+               struct mlx5dr_icm_chunk *chunk;
                struct mlx5dr_action *action;
                int max_actions_type = 1;
                u32 action_type;
@@ -598,9 +599,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                                                   matcher->tbl->level,
                                                   dest_tbl->tbl->level);
                                }
-                               attr.final_icm_addr = rx_rule ?
-                                       dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
-                                       dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
+                               chunk = rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
+                                       dest_tbl->tbl->tx.s_anchor->chunk;
+                               attr.final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
                        } else {
                                struct mlx5dr_cmd_query_flow_table_details output;
                                int ret;
@@ -669,15 +670,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                case DR_ACTION_TYP_VPORT:
                        attr.hit_gvmi = action->vport->caps->vhca_gvmi;
                        dest_action = action;
-                       if (rx_rule) {
-                               if (action->vport->caps->num == MLX5_VPORT_UPLINK) {
-                                       mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n");
-                                       return -EOPNOTSUPP;
-                               }
-                               attr.final_icm_addr = action->vport->caps->icm_address_rx;
-                       } else {
-                               attr.final_icm_addr = action->vport->caps->icm_address_tx;
-                       }
+                       attr.final_icm_addr = rx_rule ?
+                               action->vport->caps->icm_address_rx :
+                               action->vport->caps->icm_address_tx;
                        break;
                case DR_ACTION_TYP_POP_VLAN:
                        if (!rx_rule && !(dmn->ste_ctx->actions_caps &
@@ -1129,7 +1124,8 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
                }
 
                action->rewrite->data = (void *)hw_actions;
-               action->rewrite->index = (action->rewrite->chunk->icm_addr -
+               action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr
+                                         (action->rewrite->chunk) -
                                         dmn->info.caps.hdr_modify_icm_addr) /
                                         ACTION_CACHE_LINE_SIZE;
 
@@ -1708,7 +1704,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
        action->rewrite->modify_ttl = modify_ttl;
        action->rewrite->data = (u8 *)hw_actions;
        action->rewrite->num_of_actions = num_hw_actions;
-       action->rewrite->index = (chunk->icm_addr -
+       action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
                                  dmn->info.caps.hdr_modify_icm_addr) /
                                  ACTION_CACHE_LINE_SIZE;
 
index 2784cd5..d5998ef 100644 (file)
@@ -3,7 +3,6 @@
 
 #include <linux/debugfs.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/seq_file.h>
 #include "dr_types.h"
 
@@ -218,7 +217,8 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
                                       DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1;
        }
 
-       dr_dump_hex_print(hw_ste_dump, (char *)ste->hw_ste, DR_STE_SIZE_REDUCED);
+       dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
+                         DR_STE_SIZE_REDUCED);
 
        seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
                   dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
@@ -347,16 +347,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
                      const u64 matcher_id)
 {
        enum dr_dump_rec_type rec_type;
+       u64 s_icm_addr, e_icm_addr;
        int i, ret;
 
        rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX :
                           DR_DUMP_REC_TYPE_MATCHER_TX;
 
+       s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
+       e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
        seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
                   rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
                   matcher_id, matcher_rx_tx->num_of_builders,
-                  dr_dump_icm_to_idx(matcher_rx_tx->s_htbl->chunk->icm_addr),
-                  dr_dump_icm_to_idx(matcher_rx_tx->e_anchor->chunk->icm_addr));
+                  dr_dump_icm_to_idx(s_icm_addr),
+                  dr_dump_icm_to_idx(e_icm_addr));
 
        for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
                ret = dr_dump_matcher_builder(file,
@@ -427,12 +430,14 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
                    const u64 table_id)
 {
        enum dr_dump_rec_type rec_type;
+       u64 s_icm_addr;
 
        rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
                           DR_DUMP_REC_TYPE_TABLE_TX;
 
+       s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
        seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
-                  dr_dump_icm_to_idx(table_rx_tx->s_anchor->chunk->icm_addr));
+                  dr_dump_icm_to_idx(s_icm_addr));
 
        return 0;
 }
@@ -630,7 +635,7 @@ void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
        }
 
        dmn->dump_info.steering_debugfs =
-               debugfs_create_dir("steering", dev->priv.dbg_root);
+               debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
        dmn->dump_info.fdb_debugfs =
                debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs);
 
index 5fa7f9d..fc6ae49 100644 (file)
@@ -8,7 +8,7 @@
 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
        ((dmn)->info.caps.dmn_type##_sw_owner ||        \
         ((dmn)->info.caps.dmn_type##_sw_owner_v2 &&    \
-         (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
+         (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
 
 static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
 {
index e289cfd..4ca67fa 100644 (file)
@@ -57,6 +57,36 @@ static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
        return mlx5_core_create_mkey(mdev, mkey, in, inlen);
 }
 
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
+{
+       u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+       return (u64)offset * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
+{
+       return chunk->buddy_mem->icm_mr->mkey;
+}
+
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
+{
+       u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+       return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
+{
+       return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
+                       chunk->buddy_mem->pool->icm_type);
+}
+
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
+{
+       return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
+}
+
 static struct mlx5dr_icm_mr *
 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
 {
@@ -158,12 +188,13 @@ static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
 
 static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
 {
+       int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
        struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
 
        memset(chunk->hw_ste_arr, 0,
-              chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+              num_of_entries * dr_icm_buddy_get_ste_size(buddy));
        memset(chunk->ste_arr, 0,
-              chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
+              num_of_entries * sizeof(chunk->ste_arr[0]));
 }
 
 static enum mlx5dr_icm_type
@@ -177,7 +208,7 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
 {
        enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
 
-       buddy->used_memory -= chunk->byte_size;
+       buddy->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
        list_del(&chunk->chunk_list);
 
        if (icm_type == DR_ICM_TYPE_STE)
@@ -298,21 +329,14 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
 
        offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
 
-       chunk->rkey = buddy_mem_pool->icm_mr->mkey;
-       chunk->mr_addr = offset;
-       chunk->icm_addr =
-               (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset;
-       chunk->num_of_entries =
-               mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
-       chunk->byte_size =
-               mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
        chunk->seg = seg;
+       chunk->size = chunk_size;
        chunk->buddy_mem = buddy_mem_pool;
 
        if (pool->icm_type == DR_ICM_TYPE_STE)
                dr_icm_chunk_ste_init(chunk, offset);
 
-       buddy_mem_pool->used_memory += chunk->byte_size;
+       buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
        INIT_LIST_HEAD(&chunk->chunk_list);
 
        /* chunk now is part of the used_list */
@@ -336,6 +360,7 @@ static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
 static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
 {
        struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+       u32 num_entries;
        int err;
 
        err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
@@ -348,9 +373,9 @@ static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
                struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
 
                list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
-                       mlx5dr_buddy_free_mem(buddy, chunk->seg,
-                                             ilog2(chunk->num_of_entries));
-                       pool->hot_memory_size -= chunk->byte_size;
+                       num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
+                       mlx5dr_buddy_free_mem(buddy, chunk->seg, ilog2(num_entries));
+                       pool->hot_memory_size -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
                        dr_icm_chunk_destroy(chunk, buddy);
                }
 
@@ -448,7 +473,7 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
        /* move the memory to the waiting list AKA "hot" */
        mutex_lock(&pool->mutex);
        list_move_tail(&chunk->chunk_list, &buddy->hot_list);
-       pool->hot_memory_size += chunk->byte_size;
+       pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
 
        /* Check if we have chunks that are waiting for sync-ste */
        if (dr_icm_pool_is_sync_required(pool))
index 38971fe..0726848 100644 (file)
@@ -47,6 +47,11 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
        return spec->ttl_hoplimit;
 }
 
+static bool dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec *spec)
+{
+       return spec->ipv4_ihl;
+}
+
 #define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
        (_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
        (_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
@@ -103,7 +108,7 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
 static bool
 dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
 {
-       return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
               (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
 }
 
@@ -144,7 +149,7 @@ static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *mi
 static bool
 dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
 {
-       return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
               (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
 }
 
@@ -261,13 +266,13 @@ static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
 
 static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
 {
-       return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
               (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
 }
 
 static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
 {
-       return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
               (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
 }
 
@@ -507,7 +512,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
                                mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
                                                                     &mask, inner, rx);
 
-                       if (dr_mask_is_ttl_set(&mask.outer))
+                       if (dr_mask_is_ttl_set(&mask.outer) ||
+                           dr_mask_is_ipv4_ihl_set(&mask.outer))
                                mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
                                                                  &mask, inner, rx);
                }
@@ -614,7 +620,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
                                mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
                                                                     &mask, inner, rx);
 
-                       if (dr_mask_is_ttl_set(&mask.inner))
+                       if (dr_mask_is_ttl_set(&mask.inner) ||
+                           dr_mask_is_ipv4_ihl_set(&mask.inner))
                                mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
                                                                  &mask, inner, rx);
                }
@@ -698,7 +705,7 @@ static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
 
        /* Connect start hash table to end anchor */
        info.type = CONNECT_MISS;
-       info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr;
+       info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(curr_nic_matcher->e_anchor->chunk);
        ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
                                                curr_nic_matcher->s_htbl,
                                                &info, false);
@@ -719,12 +726,14 @@ static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
                return ret;
 
        /* Update the pointing ste and next hash table */
-       curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->ste_arr;
-       prev_htbl->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
+       curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->chunk->ste_arr;
+       prev_htbl->chunk->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
 
        if (next_nic_matcher) {
-               next_nic_matcher->s_htbl->pointing_ste = curr_nic_matcher->e_anchor->ste_arr;
-               curr_nic_matcher->e_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+               next_nic_matcher->s_htbl->pointing_ste =
+                       curr_nic_matcher->e_anchor->chunk->ste_arr;
+               curr_nic_matcher->e_anchor->chunk->ste_arr[0].next_htbl =
+                       next_nic_matcher->s_htbl;
        }
 
        return 0;
@@ -1036,12 +1045,12 @@ static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
        if (next_nic_matcher) {
                info.type = CONNECT_HIT;
                info.hit_next_htbl = next_nic_matcher->s_htbl;
-               next_nic_matcher->s_htbl->pointing_ste = prev_anchor->ste_arr;
-               prev_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+               next_nic_matcher->s_htbl->pointing_ste = prev_anchor->chunk->ste_arr;
+               prev_anchor->chunk->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
        } else {
                info.type = CONNECT_MISS;
                info.miss_icm_addr = nic_tbl->default_icm_addr;
-               prev_anchor->ste_arr[0].next_htbl = NULL;
+               prev_anchor->chunk->ste_arr[0].next_htbl = NULL;
        }
 
        return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
index b437457..ddfaf78 100644 (file)
@@ -21,12 +21,12 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
        if (!ste_info_last)
                return -ENOMEM;
 
-       mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
+       mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
                                 mlx5dr_ste_get_icm_addr(new_last_ste));
        list_add_tail(&new_last_ste->miss_list_node, miss_list);
 
        mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
-                                                 0, last_ste->hw_ste,
+                                                 0, mlx5dr_ste_get_hw_ste(last_ste),
                                                  ste_info_last, send_list, true);
 
        return 0;
@@ -41,6 +41,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
        struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
        struct mlx5dr_ste_htbl *new_htbl;
        struct mlx5dr_ste *ste;
+       u64 icm_addr;
 
        /* Create new table for miss entry */
        new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@@ -53,9 +54,9 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
        }
 
        /* One and only entry, never grows */
-       ste = new_htbl->ste_arr;
-       mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
-                                nic_matcher->e_anchor->chunk->icm_addr);
+       ste = new_htbl->chunk->ste_arr;
+       icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+       mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
        mlx5dr_htbl_get(new_htbl);
 
        return ste;
@@ -79,7 +80,7 @@ dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
        ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
 
        /* In collision entry, all members share the same miss_list_head */
-       ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
+       ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
 
        /* Next table */
        if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
@@ -107,9 +108,11 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
         * is already written to the hw.
         */
        if (ste_info->size == DR_STE_SIZE_CTRL)
-               memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
+               memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+                      ste_info->data, DR_STE_SIZE_CTRL);
        else
-               memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
+               memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+                      ste_info->data, DR_STE_SIZE_REDUCED);
 
        ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
                                       ste_info->size, ste_info->offset);
@@ -159,7 +162,7 @@ dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
 
        /* Check if hw_ste is present in the list */
        list_for_each_entry(ste, miss_list, miss_list_node) {
-               if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
+               if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
                        return ste;
        }
 
@@ -185,7 +188,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
        new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
 
        /* In collision entry, all members share the same miss_list_head */
-       new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
+       new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
 
        /* Update the previous from the list */
        ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
@@ -235,6 +238,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
        bool use_update_list = false;
        u8 hw_ste[DR_STE_SIZE] = {};
        struct mlx5dr_ste *new_ste;
+       u64 icm_addr;
        int new_idx;
        u8 sb_idx;
 
@@ -243,12 +247,12 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
        mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
 
        /* Copy STE control and tag */
-       memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
-       mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
-                                nic_matcher->e_anchor->chunk->icm_addr);
+       icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+       memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
+       mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
 
        new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
-       new_ste = &new_htbl->ste_arr[new_idx];
+       new_ste = &new_htbl->chunk->ste_arr[new_idx];
 
        if (mlx5dr_ste_is_not_used(new_ste)) {
                mlx5dr_htbl_get(new_htbl);
@@ -269,7 +273,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
                use_update_list = true;
        }
 
-       memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
+       memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
 
        new_htbl->ctrl.num_of_valid_entries++;
 
@@ -334,7 +338,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
        int err = 0;
        int i;
 
-       cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
+       cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
 
        if (cur_entries < 1) {
                mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
@@ -342,7 +346,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
        }
 
        for (i = 0; i < cur_entries; i++) {
-               cur_ste = &cur_htbl->ste_arr[i];
+               cur_ste = &cur_htbl->chunk->ste_arr[i];
                if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
                        continue;
 
@@ -398,7 +402,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
 
        /* Write new table to HW */
        info.type = CONNECT_MISS;
-       info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+       info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
        mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
                                     dmn->info.caps.gvmi,
                                     nic_dmn->type,
@@ -446,21 +450,21 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
                 * (48B len) which works only on first 32B
                 */
                mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
-                                       prev_htbl->ste_arr[0].hw_ste,
-                                       new_htbl->chunk->icm_addr,
-                                       new_htbl->chunk->num_of_entries);
+                                       prev_htbl->chunk->hw_ste_arr,
+                                       mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
+                                       mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
 
-               ste_to_update = &prev_htbl->ste_arr[0];
+               ste_to_update = &prev_htbl->chunk->ste_arr[0];
        } else {
                mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
-                                                    cur_htbl->pointing_ste->hw_ste,
+                                                    mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
                                                     new_htbl);
                ste_to_update = cur_htbl->pointing_ste;
        }
 
        mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
-                                                 0, ste_to_update->hw_ste, ste_info,
-                                                 update_list, false);
+                                                 0, mlx5dr_ste_get_hw_ste(ste_to_update),
+                                                 ste_info, update_list, false);
 
        return new_htbl;
 
@@ -489,10 +493,10 @@ static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
        struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
        enum mlx5dr_icm_chunk_size new_size;
 
-       new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
+       new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
        new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
 
-       if (new_size == cur_htbl->chunk_size)
+       if (new_size == cur_htbl->chunk->size)
                return NULL; /* Skip rehash, we already at the max size */
 
        return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
@@ -659,13 +663,13 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
        struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
        int threshold;
 
-       if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
+       if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
                return false;
 
        if (!mlx5dr_ste_htbl_may_grow(htbl))
                return false;
 
-       if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
+       if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
                return false;
 
        threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
@@ -755,6 +759,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
 {
        struct mlx5dr_domain *dmn = matcher->tbl->dmn;
        struct mlx5dr_ste_send_info *ste_info;
+       u64 icm_addr;
 
        /* Take ref on table, only on first time this ste is used */
        mlx5dr_htbl_get(cur_htbl);
@@ -762,8 +767,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
        /* new entry -> new branch */
        list_add_tail(&ste->miss_list_node, miss_list);
 
-       mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
-                                nic_matcher->e_anchor->chunk->icm_addr);
+       icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+       mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
 
        ste->ste_chain_location = ste_location;
 
@@ -822,7 +827,7 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
 again:
        index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
        miss_list = &cur_htbl->chunk->miss_list[index];
-       ste = &cur_htbl->ste_arr[index];
+       ste = &cur_htbl->chunk->ste_arr[index];
 
        if (mlx5dr_ste_is_not_used(ste)) {
                if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
@@ -858,7 +863,7 @@ again:
                                                  ste_location, send_ste_list);
                        if (!new_htbl) {
                                mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
-                                          cur_htbl->chunk_size);
+                                          cur_htbl->chunk->size);
                                mlx5dr_htbl_put(cur_htbl);
                        } else {
                                cur_htbl = new_htbl;
index 00aef47..ef19a66 100644 (file)
@@ -407,17 +407,17 @@ static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
                                   int *iterations,
                                   int *num_stes)
 {
+       u32 chunk_byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
        int alloc_size;
 
-       if (htbl->chunk->byte_size > dmn->send_ring->max_post_send_size) {
-               *iterations = htbl->chunk->byte_size /
-                       dmn->send_ring->max_post_send_size;
+       if (chunk_byte_size > dmn->send_ring->max_post_send_size) {
+               *iterations = chunk_byte_size / dmn->send_ring->max_post_send_size;
                *byte_size = dmn->send_ring->max_post_send_size;
                alloc_size = *byte_size;
                *num_stes = *byte_size / DR_STE_SIZE;
        } else {
                *iterations = 1;
-               *num_stes = htbl->chunk->num_of_entries;
+               *num_stes = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
                alloc_size = *num_stes * DR_STE_SIZE;
        }
 
@@ -453,7 +453,7 @@ int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
        send_info.write.length = size;
        send_info.write.lkey = 0;
        send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
-       send_info.rkey = ste->htbl->chunk->rkey;
+       send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk);
 
        return dr_postsend_icm_data(dmn, &send_info);
 }
@@ -462,7 +462,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
                              struct mlx5dr_ste_htbl *htbl,
                              u8 *formatted_ste, u8 *mask)
 {
-       u32 byte_size = htbl->chunk->byte_size;
+       u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
        int num_stes_per_iter;
        int iterations;
        u8 *data;
@@ -486,7 +486,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
                 * need to add the bit_mask
                 */
                for (j = 0; j < num_stes_per_iter; j++) {
-                       struct mlx5dr_ste *ste = &htbl->ste_arr[ste_index + j];
+                       struct mlx5dr_ste *ste = &htbl->chunk->ste_arr[ste_index + j];
                        u32 ste_off = j * DR_STE_SIZE;
 
                        if (mlx5dr_ste_is_not_used(ste)) {
@@ -495,7 +495,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
                        } else {
                                /* Copy data */
                                memcpy(data + ste_off,
-                                      htbl->ste_arr[ste_index + j].hw_ste,
+                                      htbl->chunk->hw_ste_arr +
+                                      DR_STE_SIZE_REDUCED * (ste_index + j),
                                       DR_STE_SIZE_REDUCED);
                                /* Copy bit_mask */
                                memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
@@ -511,8 +512,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
                send_info.write.length = byte_size;
                send_info.write.lkey = 0;
                send_info.remote_addr =
-                       mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
-               send_info.rkey = htbl->chunk->rkey;
+                       mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+               send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
 
                ret = dr_postsend_icm_data(dmn, &send_info);
                if (ret)
@@ -530,7 +531,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
                                        u8 *ste_init_data,
                                        bool update_hw_ste)
 {
-       u32 byte_size = htbl->chunk->byte_size;
+       u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
        int iterations;
        int num_stes;
        u8 *copy_dst;
@@ -546,7 +547,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
        if (update_hw_ste) {
                /* Copy the reduced STE to hash table ste_arr */
                for (i = 0; i < num_stes; i++) {
-                       copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
+                       copy_dst = htbl->chunk->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
                        memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
                }
        }
@@ -568,8 +569,8 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
                send_info.write.length = byte_size;
                send_info.write.lkey = 0;
                send_info.remote_addr =
-                       mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
-               send_info.rkey = htbl->chunk->rkey;
+                       mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+               send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
 
                ret = dr_postsend_icm_data(dmn, &send_info);
                if (ret)
@@ -591,8 +592,9 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
        send_info.write.length = action->rewrite->num_of_actions *
                                 DR_MODIFY_ACTION_SIZE;
        send_info.write.lkey = 0;
-       send_info.remote_addr = action->rewrite->chunk->mr_addr;
-       send_info.rkey = action->rewrite->chunk->rkey;
+       send_info.remote_addr =
+               mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
+       send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
 
        ret = dr_postsend_icm_data(dmn, &send_info);
 
index 187e29b..09ebd30 100644 (file)
@@ -25,6 +25,7 @@ bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
 
 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
 {
+       u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
        struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
        u8 masked[DR_STE_SIZE_TAG] = {};
        u32 crc32, index;
@@ -32,7 +33,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
        int i;
 
        /* Don't calculate CRC if the result is predicted */
-       if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
+       if (num_entries == 1 || htbl->byte_mask == 0)
                return 0;
 
        /* Mask tag using byte mask, bit per byte */
@@ -45,7 +46,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
        }
 
        crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
-       index = crc32 & (htbl->chunk->num_of_entries - 1);
+       index = crc32 & (num_entries - 1);
 
        return index;
 }
@@ -96,13 +97,11 @@ void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
 }
 
 static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
-                                   struct mlx5dr_ste *ste, u64 miss_addr)
+                                   u8 *hw_ste, u64 miss_addr)
 {
-       u8 *hw_ste_p = ste->hw_ste;
-
-       ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
-       ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
-       dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+       ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
+       ste_ctx->set_miss_addr(hw_ste, miss_addr);
+       dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
 }
 
 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
@@ -113,37 +112,45 @@ void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
 
 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
 {
-       u32 index = ste - ste->htbl->ste_arr;
+       u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
+       u32 index = ste - ste->htbl->chunk->ste_arr;
 
-       return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
+       return base_icm_addr + DR_STE_SIZE * index;
 }
 
 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
 {
-       u32 index = ste - ste->htbl->ste_arr;
+       u32 index = ste - ste->htbl->chunk->ste_arr;
 
-       return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
+       return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
+}
+
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
+{
+       u64 index = ste - ste->htbl->chunk->ste_arr;
+
+       return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
 }
 
 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
 {
-       u32 index = ste - ste->htbl->ste_arr;
+       u32 index = ste - ste->htbl->chunk->ste_arr;
 
-       return &ste->htbl->miss_list[index];
+       return &ste->htbl->chunk->miss_list[index];
 }
 
 static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
-                                  struct mlx5dr_ste *ste,
+                                  u8 *hw_ste,
                                   struct mlx5dr_ste_htbl *next_htbl)
 {
        struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
-       u8 *hw_ste = ste->hw_ste;
 
        ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
        ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
-       ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+       ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
+                             mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
 
-       dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
+       dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
 }
 
 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
@@ -166,7 +173,8 @@ bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
  */
 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
 {
-       memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
+       memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
+              DR_STE_SIZE_REDUCED);
        dst->next_htbl = src->next_htbl;
        if (dst->next_htbl)
                dst->next_htbl->pointing_ste = dst;
@@ -184,18 +192,17 @@ dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
                       struct mlx5dr_ste_htbl *stats_tbl)
 {
        u8 tmp_data_ste[DR_STE_SIZE] = {};
-       struct mlx5dr_ste tmp_ste = {};
        u64 miss_addr;
 
-       tmp_ste.hw_ste = tmp_data_ste;
+       miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
 
        /* Use temp ste because dr_ste_always_miss_addr
         * touches bit_mask area which doesn't exist at ste->hw_ste.
+        * Need to use a full-sized (DR_STE_SIZE) hw_ste.
         */
-       memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
-       miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
-       dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
-       memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
+       memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
+       dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
+       memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
 
        list_del_init(&ste->miss_list_node);
 
@@ -237,7 +244,7 @@ dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
        mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
 
        /* Copy all 64 hw_ste bytes */
-       memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
+       memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
        sb_idx = ste->ste_chain_location - 1;
        mlx5dr_ste_set_bit_mask(hw_ste,
                                nic_matcher->ste_builder[sb_idx].bit_mask);
@@ -273,12 +280,13 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
        if (WARN_ON(!prev_ste))
                return;
 
-       miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
-       ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
+       miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
+       ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
 
        mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
-                                                 prev_ste->hw_ste, ste_info,
-                                                 send_ste_list, true /* Copy data*/);
+                                                 mlx5dr_ste_get_hw_ste(prev_ste),
+                                                 ste_info, send_ste_list,
+                                                 true /* Copy data*/);
 
        list_del_init(&ste->miss_list_node);
 
@@ -364,9 +372,11 @@ void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
                                          u8 *hw_ste,
                                          struct mlx5dr_ste_htbl *next_htbl)
 {
-       struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
+       u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
+       u32 num_entries =
+               mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
 
-       ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+       ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
 }
 
 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
@@ -385,15 +395,22 @@ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
                                  struct mlx5dr_htbl_connect_info *connect_info)
 {
        bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
-       struct mlx5dr_ste ste = {};
+       u8 tmp_hw_ste[DR_STE_SIZE] = {0};
 
        ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
-       ste.hw_ste = formatted_ste;
 
+       /* Use temp ste because dr_ste_always_miss_addr/hit_htbl
+        * touches bit_mask area which doesn't exist at ste->hw_ste.
+        * Need to use a full-sized (DR_STE_SIZE) hw_ste.
+        */
+       memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
        if (connect_info->type == CONNECT_HIT)
-               dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
+               dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
+                                      connect_info->hit_next_htbl);
        else
-               dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
+               dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
+                                       connect_info->miss_icm_addr);
+       memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
 }
 
 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
@@ -444,7 +461,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
 
                /* Write new table to HW */
                info.type = CONNECT_MISS;
-               info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+               info.miss_icm_addr =
+                       mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
                if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
                                                      &info, false)) {
                        mlx5dr_info(dmn, "Failed writing table to HW\n");
@@ -470,6 +488,7 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
 {
        struct mlx5dr_icm_chunk *chunk;
        struct mlx5dr_ste_htbl *htbl;
+       u32 num_entries;
        int i;
 
        htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
@@ -483,22 +502,18 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
        htbl->chunk = chunk;
        htbl->lu_type = lu_type;
        htbl->byte_mask = byte_mask;
-       htbl->ste_arr = chunk->ste_arr;
-       htbl->hw_ste_arr = chunk->hw_ste_arr;
-       htbl->miss_list = chunk->miss_list;
        htbl->refcount = 0;
+       num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
 
-       for (i = 0; i < chunk->num_of_entries; i++) {
-               struct mlx5dr_ste *ste = &htbl->ste_arr[i];
+       for (i = 0; i < num_entries; i++) {
+               struct mlx5dr_ste *ste = &chunk->ste_arr[i];
 
-               ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
                ste->htbl = htbl;
                ste->refcount = 0;
                INIT_LIST_HEAD(&ste->miss_list_node);
-               INIT_LIST_HEAD(&htbl->miss_list[i]);
+               INIT_LIST_HEAD(&chunk->miss_list[i]);
        }
 
-       htbl->chunk_size = chunk_size;
        return htbl;
 
 out_free_htbl:
@@ -523,8 +538,8 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
                               struct mlx5dr_ste_actions_attr *attr,
                               u32 *added_stes)
 {
-       ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
-                               attr, added_stes);
+       ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+                               hw_ste_arr, attr, added_stes);
 }
 
 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -534,8 +549,8 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
                               struct mlx5dr_ste_actions_attr *attr,
                               u32 *added_stes)
 {
-       ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
-                               attr, added_stes);
+       ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+                               hw_ste_arr, attr, added_stes);
 }
 
 const struct mlx5dr_ste_action_modify_field *
@@ -793,6 +808,7 @@ static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bo
        spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
        spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
 
+       spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr);
        spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
 
        spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
@@ -1360,15 +1376,14 @@ void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
        ste_ctx->build_tnl_header_0_1_init(sb, mask);
 }
 
-static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
-       [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
-       [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
-};
-
 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
 {
-       if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
-               return NULL;
+       if (version == MLX5_STEERING_FORMAT_CONNECTX_5)
+               return mlx5dr_ste_get_ctx_v0();
+       else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX)
+               return mlx5dr_ste_get_ctx_v1();
+       else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
+               return mlx5dr_ste_get_ctx_v2();
 
-       return mlx5dr_ste_ctx_arr[version];
+       return NULL;
 }
index ca8fa32..17513ba 100644 (file)
@@ -161,11 +161,13 @@ struct mlx5dr_ste_ctx {
        u32 actions_caps;
        void (*set_actions_rx)(struct mlx5dr_domain *dmn,
                               u8 *action_type_set,
+                              u32 actions_caps,
                               u8 *hw_ste_arr,
                               struct mlx5dr_ste_actions_attr *attr,
                               u32 *added_stes);
        void (*set_actions_tx)(struct mlx5dr_domain *dmn,
                               u8 *action_type_set,
+                              u32 actions_caps,
                               u8 *hw_ste_arr,
                               struct mlx5dr_ste_actions_attr *attr,
                               u32 *added_stes);
@@ -197,7 +199,8 @@ struct mlx5dr_ste_ctx {
        void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
 };
 
-extern struct mlx5dr_ste_ctx ste_ctx_v0;
-extern struct mlx5dr_ste_ctx ste_ctx_v1;
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
 
 #endif  /* _DR_STE_ */
index 2d62950..5a32233 100644 (file)
@@ -408,6 +408,7 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
 static void
 dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
                         u8 *action_type_set,
+                        u32 actions_caps,
                         u8 *last_ste,
                         struct mlx5dr_ste_actions_attr *attr,
                         u32 *added_stes)
@@ -477,6 +478,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
 static void
 dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
                         u8 *action_type_set,
+                        u32 actions_caps,
                         u8 *last_ste,
                         struct mlx5dr_ste_actions_attr *attr,
                         u32 *added_stes)
@@ -1152,6 +1154,7 @@ dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
        struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
 
        DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+       DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, ihl, spec, ipv4_ihl);
 
        return 0;
 }
@@ -1897,7 +1900,7 @@ static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
        sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
 }
 
-struct mlx5dr_ste_ctx ste_ctx_v0 = {
+static struct mlx5dr_ste_ctx ste_ctx_v0 = {
        /* Builders */
        .build_eth_l2_src_dst_init      = &dr_ste_v0_build_eth_l2_src_dst_init,
        .build_eth_l3_ipv6_src_init     = &dr_ste_v0_build_eth_l3_ipv6_src_init,
@@ -1950,3 +1953,8 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
        .set_action_copy                = &dr_ste_v0_set_action_copy,
        .set_action_decap_l3_list       = &dr_ste_v0_set_action_decap_l3_list,
 };
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void)
+{
+       return &ste_ctx_v0;
+}
index 6ca0680..fcb962c 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <linux/types.h>
 #include "mlx5_ifc_dr_ste_v1.h"
-#include "dr_ste.h"
+#include "dr_ste_v1.h"
 
 #define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
        ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
@@ -121,12 +121,12 @@ enum {
        DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1           = 0x70,
        DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE        = 0x7b,
        DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE          = 0x7c,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2            = 0x8c,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3            = 0x8d,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4            = 0x8e,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5            = 0x8f,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6            = 0x90,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7            = 0x91,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0          = 0x8c,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1          = 0x8d,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0          = 0x8e,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1          = 0x8f,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0          = 0x90,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1          = 0x91,
 };
 
 static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
@@ -223,22 +223,22 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field
                .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
        },
        [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31,
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
        },
        [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31,
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
        },
        [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31,
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
        },
        [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31,
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
        },
        [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31,
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
        },
        [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31,
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
        },
        [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
                .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
@@ -262,7 +262,7 @@ static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
        MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
 }
 
-static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
 {
        u64 index = miss_addr >> 6;
 
@@ -270,7 +270,7 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
        MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
 }
 
-static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
 {
        u64 index =
                ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
@@ -279,12 +279,12 @@ static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
        return index << 6;
 }
 
-static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
 {
        MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
 }
 
-static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
 {
        return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
 }
@@ -295,13 +295,13 @@ static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
        MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
 }
 
-static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
 {
        MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
        MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
 }
 
-static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
 {
        u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
        u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
@@ -314,7 +314,7 @@ static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
        MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
 }
 
-static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
 {
        u64 index = (icm_addr >> 5) | ht_size;
 
@@ -322,8 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
        MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
 }
 
-static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
-                          bool is_rx, u16 gvmi)
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
 {
        dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
        dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
@@ -333,8 +332,7 @@ static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
        MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
 }
 
-static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p,
-                                          u32 ste_size)
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
 {
        u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
        u8 *mask = tag + DR_STE_SIZE_TAG;
@@ -511,11 +509,12 @@ static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
        memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
 }
 
-static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
-                                    u8 *action_type_set,
-                                    u8 *last_ste,
-                                    struct mlx5dr_ste_actions_attr *attr,
-                                    u32 *added_stes)
+void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+                             u8 *action_type_set,
+                             u32 actions_caps,
+                             u8 *last_ste,
+                             struct mlx5dr_ste_actions_attr *attr,
+                             u32 *added_stes)
 {
        u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
        u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
@@ -533,7 +532,10 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
                dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
                action_sz -= DR_STE_ACTION_SINGLE_SZ;
                action += DR_STE_ACTION_SINGLE_SZ;
-               allow_modify_hdr = false;
+
+               /* Check if vlan_pop and modify_hdr on same STE is supported */
+               if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
+                       allow_modify_hdr = false;
        }
 
        if (action_type_set[DR_ACTION_TYP_CTR])
@@ -631,11 +633,12 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
        dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
 }
 
-static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
-                                    u8 *action_type_set,
-                                    u8 *last_ste,
-                                    struct mlx5dr_ste_actions_attr *attr,
-                                    u32 *added_stes)
+void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+                             u8 *action_type_set,
+                             u32 actions_caps,
+                             u8 *last_ste,
+                             struct mlx5dr_ste_actions_attr *attr,
+                             u32 *added_stes)
 {
        u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
        u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
@@ -677,13 +680,16 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
                        dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
                        action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
                        action_sz = DR_STE_ACTION_TRIPLE_SZ;
-                       allow_modify_hdr = false;
-                       allow_ctr = false;
                }
 
                dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
                action_sz -= DR_STE_ACTION_SINGLE_SZ;
                action += DR_STE_ACTION_SINGLE_SZ;
+               allow_ctr = false;
+
+               /* Check if vlan_pop and modify_hdr on same STE is supported */
+               if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
+                       allow_modify_hdr = false;
        }
 
        if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
@@ -731,9 +737,9 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
                        action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
                        action_sz = DR_STE_ACTION_TRIPLE_SZ;
                        allow_modify_hdr = true;
-                       allow_ctr = false;
                }
                dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+               allow_ctr = false;
        }
 
        if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
@@ -800,11 +806,11 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
        dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
 }
 
-static void dr_ste_v1_set_action_set(u8 *d_action,
-                                    u8 hw_field,
-                                    u8 shifter,
-                                    u8 length,
-                                    u32 data)
+void dr_ste_v1_set_action_set(u8 *d_action,
+                             u8 hw_field,
+                             u8 shifter,
+                             u8 length,
+                             u32 data)
 {
        shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
        MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
@@ -814,11 +820,11 @@ static void dr_ste_v1_set_action_set(u8 *d_action,
        MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
 }
 
-static void dr_ste_v1_set_action_add(u8 *d_action,
-                                    u8 hw_field,
-                                    u8 shifter,
-                                    u8 length,
-                                    u32 data)
+void dr_ste_v1_set_action_add(u8 *d_action,
+                             u8 hw_field,
+                             u8 shifter,
+                             u8 length,
+                             u32 data)
 {
        shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
        MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
@@ -828,12 +834,12 @@ static void dr_ste_v1_set_action_add(u8 *d_action,
        MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
 }
 
-static void dr_ste_v1_set_action_copy(u8 *d_action,
-                                     u8 dst_hw_field,
-                                     u8 dst_shifter,
-                                     u8 dst_len,
-                                     u8 src_hw_field,
-                                     u8 src_shifter)
+void dr_ste_v1_set_action_copy(u8 *d_action,
+                              u8 dst_hw_field,
+                              u8 dst_shifter,
+                              u8 dst_len,
+                              u8 src_hw_field,
+                              u8 src_shifter)
 {
        dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
        src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
@@ -848,11 +854,11 @@ static void dr_ste_v1_set_action_copy(u8 *d_action,
 #define DR_STE_DECAP_L3_ACTION_NUM     8
 #define DR_STE_L2_HDR_MAX_SZ           20
 
-static int dr_ste_v1_set_action_decap_l3_list(void *data,
-                                             u32 data_sz,
-                                             u8 *hw_action,
-                                             u32 hw_action_sz,
-                                             u16 *used_hw_action_num)
+int dr_ste_v1_set_action_decap_l3_list(void *data,
+                                      u32 data_sz,
+                                      u8 *hw_action,
+                                      u32 hw_action_sz,
+                                      u16 *used_hw_action_num)
 {
        u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
        void *data_ptr = padded_data;
@@ -977,8 +983,8 @@ static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
-                                               struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
 
@@ -1001,8 +1007,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
-                                                struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
 
@@ -1025,8 +1031,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
-                                                struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
 
@@ -1060,8 +1066,8 @@ static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *va
        return 0;
 }
 
-static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
-                                                    struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+                                             struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
 
@@ -1079,8 +1085,8 @@ static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param
        DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
        DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
        DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
-       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ?
-       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ?
+       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
+       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
        DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
 
        if (mask->svlan_tag || mask->cvlan_tag) {
@@ -1201,8 +1207,8 @@ static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
        return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
 }
 
-static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
 
@@ -1234,8 +1240,8 @@ static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
        return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
 }
 
-static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
 
@@ -1314,8 +1320,8 @@ static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
 
@@ -1331,12 +1337,13 @@ static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value
        struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
 
        DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
+       DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
 
        return 0;
 }
 
-static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
-                                                 struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+                                          struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
 
@@ -1375,8 +1382,8 @@ static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
-                                               struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
 
@@ -1399,8 +1406,8 @@ static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+                              struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
 
@@ -1426,8 +1433,8 @@ static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
-                                        struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+                                 struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
 
@@ -1471,8 +1478,8 @@ static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
-                                         struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+                                  struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
 
@@ -1506,8 +1513,8 @@ static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *valu
        return 0;
 }
 
-static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
-                                                  struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
 
@@ -1547,8 +1554,8 @@ static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *valu
        return 0;
 }
 
-static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
-                                                  struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
 
@@ -1594,8 +1601,8 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+                              struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
 
@@ -1616,8 +1623,8 @@ static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
-                                                struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
 
@@ -1643,8 +1650,8 @@ static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
-                                            struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
 
@@ -1673,9 +1680,8 @@ dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void
-dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
-                                              struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+                                                   struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
 
@@ -1703,9 +1709,8 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void
-dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+                                                struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
 
@@ -1726,8 +1731,8 @@ static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
-                                               struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask)
 {
        sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
        dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
@@ -1749,8 +1754,8 @@ static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
 
@@ -1773,8 +1778,8 @@ static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
 
@@ -1837,8 +1842,8 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
-                                             struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+                                      struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
 
@@ -1892,8 +1897,8 @@ static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
-                                              struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask)
 {
        sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
        dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
@@ -1901,8 +1906,8 @@ static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
        sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
 }
 
-static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
-                                              struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask)
 {
        sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
        dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
@@ -1926,7 +1931,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *va
        return 0;
 }
 
-static void
+void
 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
                                                    struct mlx5dr_match_param *mask)
 {
@@ -1959,7 +1964,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_par
        return 0;
 }
 
-static void
+void
 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
                                                          struct mlx5dr_match_param *mask)
 {
@@ -1982,8 +1987,8 @@ static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *v
        return 0;
 }
 
-static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
-                                                     struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+                                              struct mlx5dr_match_param *mask)
 {
        dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
 
@@ -2008,7 +2013,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void
+void
 dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
                                            struct mlx5dr_match_param *mask)
 {
@@ -2035,7 +2040,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
        return 0;
 }
 
-static void
+void
 dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
                                            struct mlx5dr_match_param *mask)
 {
@@ -2046,7 +2051,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
        sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
 }
 
-struct mlx5dr_ste_ctx ste_ctx_v1 = {
+static struct mlx5dr_ste_ctx ste_ctx_v1 = {
        /* Builders */
        .build_eth_l2_src_dst_init      = &dr_ste_v1_build_eth_l2_src_dst_init,
        .build_eth_l3_ipv6_src_init     = &dr_ste_v1_build_eth_l3_ipv6_src_init,
@@ -2091,7 +2096,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
        /* Actions */
        .actions_caps                   = DR_STE_CTX_ACTION_CAP_TX_POP |
                                          DR_STE_CTX_ACTION_CAP_RX_PUSH |
-                                         DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+                                         DR_STE_CTX_ACTION_CAP_RX_ENCAP |
+                                         DR_STE_CTX_ACTION_CAP_POP_MDFY,
        .set_actions_rx                 = &dr_ste_v1_set_actions_rx,
        .set_actions_tx                 = &dr_ste_v1_set_actions_tx,
        .modify_field_arr_sz            = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
@@ -2103,3 +2109,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
        /* Send */
        .prepare_for_postsend           = &dr_ste_v1_prepare_for_postsend,
 };
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
+{
+       return &ste_ctx_v1;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
new file mode 100644 (file)
index 0000000..8a1d497
--- /dev/null
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef        _DR_STE_V1_
+#define        _DR_STE_V1_
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p);
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type);
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
+                             u32 actions_caps, u8 *last_ste,
+                             struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
+                             u32 actions_caps, u8 *last_ste,
+                             struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
+                             u8 length, u32 data);
+void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter,
+                             u8 length, u32 data);
+void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
+                              u8 dst_len, u8 src_hw_field, u8 src_shifter);
+int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
+                                      u32 hw_action_sz, u16 *used_hw_action_num);
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+                                             struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+                                          struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+                              struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+                                 struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+                                  struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+                              struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+                                                   struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+                                                struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+                                      struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+                                                        struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
+                                                              struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+                                              struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+                                                struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+                                                struct mlx5dr_match_param *mask);
+
+#endif  /* _DR_STE_V1_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
new file mode 100644 (file)
index 0000000..c60fddd
--- /dev/null
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+
+enum {
+       DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0              = 0x00,
+       DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1              = 0x01,
+       DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2              = 0x02,
+       DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0          = 0x08,
+       DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1          = 0x09,
+       DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0              = 0x0e,
+       DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0              = 0x18,
+       DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1              = 0x19,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0            = 0x40,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1            = 0x41,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0        = 0x44,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1        = 0x45,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2        = 0x46,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3        = 0x47,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0        = 0x4c,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1        = 0x4d,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2        = 0x4e,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3        = 0x4f,
+       DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0            = 0x5e,
+       DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1            = 0x5f,
+       DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0           = 0x6f,
+       DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1           = 0x70,
+       DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE        = 0x7b,
+       DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE          = 0x7c,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0          = 0x90,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1          = 0x91,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0          = 0x92,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1          = 0x93,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0          = 0x94,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1          = 0x95,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
+       [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+       },
+};
+
+static struct mlx5dr_ste_ctx ste_ctx_v2 = {
+       /* Builders */
+       .build_eth_l2_src_dst_init      = &dr_ste_v1_build_eth_l2_src_dst_init,
+       .build_eth_l3_ipv6_src_init     = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+       .build_eth_l3_ipv6_dst_init     = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+       .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+       .build_eth_l2_src_init          = &dr_ste_v1_build_eth_l2_src_init,
+       .build_eth_l2_dst_init          = &dr_ste_v1_build_eth_l2_dst_init,
+       .build_eth_l2_tnl_init          = &dr_ste_v1_build_eth_l2_tnl_init,
+       .build_eth_l3_ipv4_misc_init    = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+       .build_eth_ipv6_l3_l4_init      = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+       .build_mpls_init                = &dr_ste_v1_build_mpls_init,
+       .build_tnl_gre_init             = &dr_ste_v1_build_tnl_gre_init,
+       .build_tnl_mpls_init            = &dr_ste_v1_build_tnl_mpls_init,
+       .build_tnl_mpls_over_udp_init   = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+       .build_tnl_mpls_over_gre_init   = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+       .build_icmp_init                = &dr_ste_v1_build_icmp_init,
+       .build_general_purpose_init     = &dr_ste_v1_build_general_purpose_init,
+       .build_eth_l4_misc_init         = &dr_ste_v1_build_eth_l4_misc_init,
+       .build_tnl_vxlan_gpe_init       = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+       .build_tnl_geneve_init          = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+       .build_tnl_geneve_tlv_opt_init  = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+       .build_tnl_geneve_tlv_opt_exist_init =
+                                 &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+       .build_register_0_init          = &dr_ste_v1_build_register_0_init,
+       .build_register_1_init          = &dr_ste_v1_build_register_1_init,
+       .build_src_gvmi_qpn_init        = &dr_ste_v1_build_src_gvmi_qpn_init,
+       .build_flex_parser_0_init       = &dr_ste_v1_build_flex_parser_0_init,
+       .build_flex_parser_1_init       = &dr_ste_v1_build_flex_parser_1_init,
+       .build_tnl_gtpu_init            = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+       .build_tnl_header_0_1_init      = &dr_ste_v1_build_tnl_header_0_1_init,
+       .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+       .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+       /* Getters and Setters */
+       .ste_init                       = &dr_ste_v1_init,
+       .set_next_lu_type               = &dr_ste_v1_set_next_lu_type,
+       .get_next_lu_type               = &dr_ste_v1_get_next_lu_type,
+       .set_miss_addr                  = &dr_ste_v1_set_miss_addr,
+       .get_miss_addr                  = &dr_ste_v1_get_miss_addr,
+       .set_hit_addr                   = &dr_ste_v1_set_hit_addr,
+       .set_byte_mask                  = &dr_ste_v1_set_byte_mask,
+       .get_byte_mask                  = &dr_ste_v1_get_byte_mask,
+
+       /* Actions */
+       .actions_caps                   = DR_STE_CTX_ACTION_CAP_TX_POP |
+                                         DR_STE_CTX_ACTION_CAP_RX_PUSH |
+                                         DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+       .set_actions_rx                 = &dr_ste_v1_set_actions_rx,
+       .set_actions_tx                 = &dr_ste_v1_set_actions_tx,
+       .modify_field_arr_sz            = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+       .modify_field_arr               = dr_ste_v2_action_modify_field_arr,
+       .set_action_set                 = &dr_ste_v1_set_action_set,
+       .set_action_add                 = &dr_ste_v1_set_action_add,
+       .set_action_copy                = &dr_ste_v1_set_action_copy,
+       .set_action_decap_l3_list       = &dr_ste_v1_set_action_decap_l3_list,
+
+       /* Send */
+       .prepare_for_postsend           = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void)
+{
+       return &ste_ctx_v2;
+}
index 8ca1106..e5f6412 100644 (file)
@@ -10,6 +10,7 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
        struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
        struct mlx5dr_htbl_connect_info info;
        struct mlx5dr_ste_htbl *last_htbl;
+       struct mlx5dr_icm_chunk *chunk;
        int ret;
 
        if (!list_empty(&nic_tbl->nic_matcher_list))
@@ -22,13 +23,14 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
        else
                last_htbl = nic_tbl->s_anchor;
 
-       if (action)
-               nic_tbl->default_icm_addr =
-                       nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
-                               action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
-                               action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
-       else
+       if (action) {
+               chunk = nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
+                       action->dest_tbl->tbl->rx.s_anchor->chunk :
+                       action->dest_tbl->tbl->tx.s_anchor->chunk;
+               nic_tbl->default_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
+       } else {
                nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
+       }
 
        info.type = CONNECT_MISS;
        info.miss_icm_addr = nic_tbl->default_icm_addr;
@@ -222,10 +224,10 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
        int ret;
 
        if (tbl->rx.s_anchor)
-               icm_addr_rx = tbl->rx.s_anchor->chunk->icm_addr;
+               icm_addr_rx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->rx.s_anchor->chunk);
 
        if (tbl->tx.s_anchor)
-               icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
+               icm_addr_tx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->tx.s_anchor->chunk);
 
        ft_attr.table_type = tbl->table_type;
        ft_attr.icm_addr_rx = icm_addr_rx;
@@ -305,3 +307,8 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl)
 {
        return tbl->table_id;
 }
+
+struct mlx5dr_table *mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft)
+{
+       return ft->fs_dr_table.dr_table;
+}
index 55fcb75..46866a5 100644 (file)
@@ -91,6 +91,7 @@ enum mlx5dr_ste_ctx_action_cap {
        DR_STE_CTX_ACTION_CAP_TX_POP   = 1 << 0,
        DR_STE_CTX_ACTION_CAP_RX_PUSH  = 1 << 1,
        DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2,
+       DR_STE_CTX_ACTION_CAP_POP_MDFY = 1 << 3,
 };
 
 enum {
@@ -146,10 +147,12 @@ struct mlx5dr_matcher_rx_tx;
 struct mlx5dr_ste_ctx;
 
 struct mlx5dr_ste {
-       u8 *hw_ste;
        /* refcount: indicates the num of rules that using this ste */
        u32 refcount;
 
+       /* this ste is part of a rule, located in ste's chain */
+       u8 ste_chain_location;
+
        /* attached to the miss_list head at each htbl entry */
        struct list_head miss_list_node;
 
@@ -160,9 +163,6 @@ struct mlx5dr_ste {
 
        /* The rule this STE belongs to */
        struct mlx5dr_rule_rx_tx *rule_rx_tx;
-
-       /* this ste is part of a rule, located in ste's chain */
-       u8 ste_chain_location;
 };
 
 struct mlx5dr_ste_htbl_ctrl {
@@ -180,14 +180,7 @@ struct mlx5dr_ste_htbl {
        u16 byte_mask;
        u32 refcount;
        struct mlx5dr_icm_chunk *chunk;
-       struct mlx5dr_ste *ste_arr;
-       u8 *hw_ste_arr;
-
-       struct list_head *miss_list;
-
-       enum mlx5dr_icm_chunk_size chunk_size;
        struct mlx5dr_ste *pointing_ste;
-
        struct mlx5dr_ste_htbl_ctrl ctrl;
 };
 
@@ -555,7 +548,9 @@ struct mlx5dr_match_spec {
         */
        u32 tcp_dport:16;
 
-       u32 reserved_auto1:24;
+       u32 reserved_auto1:16;
+       u32 ipv4_ihl:4;
+       u32 reserved_auto2:4;
        u32 ttl_hoplimit:8;
 
        /* UDP source port.;tcp and udp sport/dport are mutually exclusive */
@@ -1094,16 +1089,12 @@ int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
 struct mlx5dr_icm_chunk {
        struct mlx5dr_icm_buddy_mem *buddy_mem;
        struct list_head chunk_list;
-       u32 rkey;
-       u32 num_of_entries;
-       u32 byte_size;
-       u64 icm_addr;
-       u64 mr_addr;
 
        /* indicates the index of this chunk in the whole memory,
         * used for deleting the chunk from the buddy
         */
        unsigned int seg;
+       enum mlx5dr_icm_chunk_size size;
 
        /* Memory optimisation */
        struct mlx5dr_ste *ste_arr;
@@ -1143,6 +1134,13 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
                                   enum mlx5dr_ipv outer_ipv,
                                   enum mlx5dr_ipv inner_ipv);
 
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk);
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk);
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste);
+
 static inline int
 mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
 {
@@ -1175,7 +1173,7 @@ static inline int
 mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
 {
        int num_of_entries =
-               mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
+               mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk->size);
 
        /* Threshold is 50%, one is added to table of size 1 */
        return (num_of_entries + 1) / 2;
@@ -1184,7 +1182,7 @@ mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
 static inline bool
 mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
 {
-       if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
+       if (htbl->chunk->size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
                return false;
 
        return true;
index 57ffcab..045b0cf 100644 (file)
@@ -758,7 +758,7 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
                                        enum fs_flow_table_type ft_type)
 {
        if (ft_type != FS_FT_FDB ||
-           MLX5_CAP_GEN(ns->dev, steering_format_version) != MLX5_STEERING_FORMAT_CONNECTX_6DX)
+           MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
                return 0;
 
        return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
index dfa2234..ec5cbec 100644 (file)
@@ -53,6 +53,9 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
 struct mlx5dr_table *
 mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
 
+struct mlx5dr_table *
+mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft);
+
 int mlx5dr_table_destroy(struct mlx5dr_table *table);
 
 u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
@@ -136,7 +139,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
               (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
                (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
                 (MLX5_CAP_GEN(dev, steering_format_version) <=
-                 MLX5_STEERING_FORMAT_CONNECTX_6DX)));
+                 MLX5_STEERING_FORMAT_CONNECTX_7)));
 }
 
 /* buddy functions & structure */
index 01e9c41..8455e79 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/io-mapping.h>
 #include <linux/mlx5/driver.h>
 #include "mlx5_core.h"
@@ -100,19 +99,21 @@ static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
        int err = -ENOMEM;
        phys_addr_t pfn;
        int bfregs;
+       int node;
        int i;
 
        bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
-       up = kzalloc(sizeof(*up), GFP_KERNEL);
+       node = mdev->priv.numa_node;
+       up = kzalloc_node(sizeof(*up), GFP_KERNEL, node);
        if (!up)
                return ERR_PTR(err);
 
        up->mdev = mdev;
-       up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
+       up->reg_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
        if (!up->reg_bitmap)
                goto error1;
 
-       up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
+       up->fp_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
        if (!up->fp_bitmap)
                goto error1;
 
index 0bf1d64..b13e0f8 100644 (file)
@@ -1217,36 +1217,37 @@ static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
                                  ARRAY_SIZE(mlxsw_core_fw_devlink_params));
 }
 
+static void *__dl_port(struct devlink_port *devlink_port)
+{
+       return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
+}
+
 static int mlxsw_devlink_port_split(struct devlink *devlink,
-                                   unsigned int port_index,
+                                   struct devlink_port *port,
                                    unsigned int count,
                                    struct netlink_ext_ack *extack)
 {
+       struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 
-       if (port_index >= mlxsw_core->max_ports) {
-               NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
-               return -EINVAL;
-       }
        if (!mlxsw_core->driver->port_split)
                return -EOPNOTSUPP;
-       return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
-                                             extack);
+       return mlxsw_core->driver->port_split(mlxsw_core,
+                                             mlxsw_core_port->local_port,
+                                             count, extack);
 }
 
 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
-                                     unsigned int port_index,
+                                     struct devlink_port *port,
                                      struct netlink_ext_ack *extack)
 {
+       struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 
-       if (port_index >= mlxsw_core->max_ports) {
-               NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
-               return -EINVAL;
-       }
        if (!mlxsw_core->driver->port_unsplit)
                return -EOPNOTSUPP;
-       return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
+       return mlxsw_core->driver->port_unsplit(mlxsw_core,
+                                               mlxsw_core_port->local_port,
                                                extack);
 }
 
@@ -1280,11 +1281,6 @@ mlxsw_devlink_sb_pool_set(struct devlink *devlink,
                                         extack);
 }
 
-static void *__dl_port(struct devlink_port *devlink_port)
-{
-       return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
-}
-
 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
                                       enum devlink_port_type port_type)
 {
@@ -2983,7 +2979,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
        attrs.switch_id.id_len = switch_id_len;
        mlxsw_core_port->local_port = local_port;
        devlink_port_attrs_set(devlink_port, &attrs);
-       err = devlink_port_register(devlink, devlink_port, local_port);
+       err = devl_port_register(devlink, devlink_port, local_port);
        if (err)
                memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
        return err;
@@ -2995,7 +2991,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port
                                        &mlxsw_core->ports[local_port];
        struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
 
-       devlink_port_unregister(devlink_port);
+       devl_port_unregister(devlink_port);
        memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
 }
 
index 0602099..3bc012d 100644 (file)
@@ -422,6 +422,7 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
                        struct netlink_ext_ack *extack)
 {
        struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+       struct devlink *devlink = priv_to_devlink(mlxsw_core);
        int err;
 
        mlxsw_m->core = mlxsw_core;
@@ -437,7 +438,9 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
                return err;
        }
 
+       devl_lock(devlink);
        err = mlxsw_m_ports_create(mlxsw_m);
+       devl_unlock(devlink);
        if (err) {
                dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
                return err;
@@ -449,8 +452,11 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
 static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
 {
        struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+       struct devlink *devlink = priv_to_devlink(mlxsw_core);
 
+       devl_lock(devlink);
        mlxsw_m_ports_remove(mlxsw_m);
+       devl_unlock(devlink);
 }
 
 static const struct mlxsw_config_profile mlxsw_m_config_profile;
index 7b7b171..8eb0509 100644 (file)
@@ -2818,6 +2818,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
                         struct netlink_ext_ack *extack)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+       struct devlink *devlink = priv_to_devlink(mlxsw_core);
        int err;
 
        mlxsw_sp->core = mlxsw_core;
@@ -2978,7 +2979,9 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
                goto err_sample_trigger_init;
        }
 
+       devl_lock(devlink);
        err = mlxsw_sp_ports_create(mlxsw_sp);
+       devl_unlock(devlink);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
                goto err_ports_create;
@@ -3159,8 +3162,12 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+       struct devlink *devlink = priv_to_devlink(mlxsw_core);
 
+       devl_lock(devlink);
        mlxsw_sp_ports_remove(mlxsw_sp);
+       devl_unlock(devlink);
+
        rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
        mlxsw_sp_port_module_info_fini(mlxsw_sp);
        mlxsw_sp_dpipe_fini(mlxsw_sp);
index d024983..2b3eb5e 100644 (file)
@@ -5225,7 +5225,6 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
  * Linux network device functions
  */
 
-static unsigned long next_jiffies;
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void netdev_netpoll(struct net_device *dev)
@@ -5411,10 +5410,12 @@ static int netdev_open(struct net_device *dev)
        struct dev_info *hw_priv = priv->adapter;
        struct ksz_hw *hw = &hw_priv->hw;
        struct ksz_port *port = &priv->port;
+       unsigned long next_jiffies;
        int i;
        int p;
        int rc = 0;
 
+       next_jiffies = jiffies + HZ * 2;
        priv->multicast = 0;
        priv->promiscuous = 0;
 
@@ -5428,10 +5429,7 @@ static int netdev_open(struct net_device *dev)
                if (rc)
                        return rc;
                for (i = 0; i < hw->mib_port_cnt; i++) {
-                       if (next_jiffies < jiffies)
-                               next_jiffies = jiffies + HZ * 2;
-                       else
-                               next_jiffies += HZ * 1;
+                       next_jiffies += HZ * 1;
                        hw_priv->counter[i].time = next_jiffies;
                        hw->port_mib[i].state = media_disconnected;
                        port_init_cnt(hw, i);
@@ -6563,6 +6561,7 @@ static void mib_read_work(struct work_struct *work)
        struct dev_info *hw_priv =
                container_of(work, struct dev_info, mib_read);
        struct ksz_hw *hw = &hw_priv->hw;
+       unsigned long next_jiffies;
        struct ksz_port_mib *mib;
        int i;
 
index 5f1e7b8..c8fe8b3 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/phy.h>
 #include "lan743x_main.h"
 #include "lan743x_ethtool.h"
+#include <linux/sched.h>
+#include <linux/iopoll.h>
 
 /* eeprom */
 #define LAN743X_EEPROM_MAGIC               (0x74A5)
 #define OTP_INDICATOR_1                            (0xF3)
 #define OTP_INDICATOR_2                            (0xF7)
 
+#define LOCK_TIMEOUT_MAX_CNT               (100) // 1 sec (10 msce * 100)
+
+#define LAN743X_CSR_READ_OP(offset)         lan743x_csr_read(adapter, offset)
+
 static int lan743x_otp_power_up(struct lan743x_adapter *adapter)
 {
        u32 reg_value;
@@ -149,6 +155,217 @@ static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
        return 0;
 }
 
+static int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
+                                     u16 timeout)
+{
+       u16 timeout_cnt = 0;
+       u32 val;
+
+       do {
+               spin_lock(&adapter->eth_syslock_spinlock);
+               if (adapter->eth_syslock_acquire_cnt == 0) {
+                       lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG,
+                                         SYS_LOCK_REG_ENET_SS_LOCK_);
+                       val = lan743x_csr_read(adapter,
+                                              ETH_SYSTEM_SYS_LOCK_REG);
+                       if (val & SYS_LOCK_REG_ENET_SS_LOCK_) {
+                               adapter->eth_syslock_acquire_cnt++;
+                               WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+                               spin_unlock(&adapter->eth_syslock_spinlock);
+                               break;
+                       }
+               } else {
+                       adapter->eth_syslock_acquire_cnt++;
+                       WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+                       spin_unlock(&adapter->eth_syslock_spinlock);
+                       break;
+               }
+
+               spin_unlock(&adapter->eth_syslock_spinlock);
+
+               if (timeout_cnt++ < timeout)
+                       usleep_range(10000, 11000);
+               else
+                       return -ETIMEDOUT;
+       } while (true);
+
+       return 0;
+}
+
+static void lan743x_hs_syslock_release(struct lan743x_adapter *adapter)
+{
+       u32 val;
+
+       spin_lock(&adapter->eth_syslock_spinlock);
+       WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+
+       if (adapter->eth_syslock_acquire_cnt) {
+               adapter->eth_syslock_acquire_cnt--;
+               if (adapter->eth_syslock_acquire_cnt == 0) {
+                       lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG, 0);
+                       val = lan743x_csr_read(adapter,
+                                              ETH_SYSTEM_SYS_LOCK_REG);
+                       WARN_ON((val & SYS_LOCK_REG_ENET_SS_LOCK_) != 0);
+               }
+       }
+
+       spin_unlock(&adapter->eth_syslock_spinlock);
+}
+
+static void lan743x_hs_otp_power_up(struct lan743x_adapter *adapter)
+{
+       u32 reg_value;
+
+       reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+       if (reg_value & OTP_PWR_DN_PWRDN_N_) {
+               reg_value &= ~OTP_PWR_DN_PWRDN_N_;
+               lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value);
+               /* To flush the posted write so the subsequent delay is
+                * guaranteed to happen after the write at the hardware
+                */
+               lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+               udelay(1);
+       }
+}
+
+static void lan743x_hs_otp_power_down(struct lan743x_adapter *adapter)
+{
+       u32 reg_value;
+
+       reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+       if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) {
+               reg_value |= OTP_PWR_DN_PWRDN_N_;
+               lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value);
+               /* To flush the posted write so the subsequent delay is
+                * guaranteed to happen after the write at the hardware
+                */
+               lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+               udelay(1);
+       }
+}
+
+static void lan743x_hs_otp_set_address(struct lan743x_adapter *adapter,
+                                      u32 address)
+{
+       lan743x_csr_write(adapter, HS_OTP_ADDR_HIGH, (address >> 8) & 0x03);
+       lan743x_csr_write(adapter, HS_OTP_ADDR_LOW, address & 0xFF);
+}
+
+static void lan743x_hs_otp_read_go(struct lan743x_adapter *adapter)
+{
+       lan743x_csr_write(adapter, HS_OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+       lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_);
+}
+
+static int lan743x_hs_otp_cmd_cmplt_chk(struct lan743x_adapter *adapter)
+{
+       u32 val;
+
+       return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_OTP_STATUS, val,
+                                 !(val & OTP_STATUS_BUSY_),
+                                 80, 10000);
+}
+
+static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
+                              u32 length, u8 *data)
+{
+       int ret;
+       int i;
+
+       ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+       if (ret < 0)
+               return ret;
+
+       lan743x_hs_otp_power_up(adapter);
+
+       ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+       if (ret < 0)
+               goto power_down;
+
+       lan743x_hs_syslock_release(adapter);
+
+       for (i = 0; i < length; i++) {
+               ret = lan743x_hs_syslock_acquire(adapter,
+                                                LOCK_TIMEOUT_MAX_CNT);
+               if (ret < 0)
+                       return ret;
+
+               lan743x_hs_otp_set_address(adapter, offset + i);
+
+               lan743x_hs_otp_read_go(adapter);
+               ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+               if (ret < 0)
+                       goto power_down;
+
+               data[i] = lan743x_csr_read(adapter, HS_OTP_READ_DATA);
+
+               lan743x_hs_syslock_release(adapter);
+       }
+
+       ret = lan743x_hs_syslock_acquire(adapter,
+                                        LOCK_TIMEOUT_MAX_CNT);
+       if (ret < 0)
+               return ret;
+
+power_down:
+       lan743x_hs_otp_power_down(adapter);
+       lan743x_hs_syslock_release(adapter);
+
+       return ret;
+}
+
+static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
+                               u32 length, u8 *data)
+{
+       int ret;
+       int i;
+
+       ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+       if (ret < 0)
+               return ret;
+
+       lan743x_hs_otp_power_up(adapter);
+
+       ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+       if (ret < 0)
+               goto power_down;
+
+       /* set to BYTE program mode */
+       lan743x_csr_write(adapter, HS_OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+
+       lan743x_hs_syslock_release(adapter);
+
+       for (i = 0; i < length; i++) {
+               ret = lan743x_hs_syslock_acquire(adapter,
+                                                LOCK_TIMEOUT_MAX_CNT);
+               if (ret < 0)
+                       return ret;
+
+               lan743x_hs_otp_set_address(adapter, offset + i);
+
+               lan743x_csr_write(adapter, HS_OTP_PRGM_DATA, data[i]);
+               lan743x_csr_write(adapter, HS_OTP_TST_CMD,
+                                 OTP_TST_CMD_PRGVRFY_);
+               lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+               ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+               if (ret < 0)
+                       goto power_down;
+
+               lan743x_hs_syslock_release(adapter);
+       }
+
+       ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+       if (ret < 0)
+               return ret;
+
+power_down:
+       lan743x_hs_otp_power_down(adapter);
+       lan743x_hs_syslock_release(adapter);
+
+       return ret;
+}
+
 static int lan743x_eeprom_wait(struct lan743x_adapter *adapter)
 {
        unsigned long start_time = jiffies;
@@ -263,6 +480,100 @@ static int lan743x_eeprom_write(struct lan743x_adapter *adapter,
        return 0;
 }
 
+static int lan743x_hs_eeprom_cmd_cmplt_chk(struct lan743x_adapter *adapter)
+{
+       u32 val;
+
+       return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_E2P_CMD, val,
+                                 (!(val & HS_E2P_CMD_EPC_BUSY_) ||
+                                   (val & HS_E2P_CMD_EPC_TIMEOUT_)),
+                                 50, 10000);
+}
+
+static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
+                                 u32 offset, u32 length, u8 *data)
+{
+       int retval;
+       u32 val;
+       int i;
+
+       retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+       if (retval < 0)
+               return retval;
+
+       retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+       lan743x_hs_syslock_release(adapter);
+       if (retval < 0)
+               return retval;
+
+       for (i = 0; i < length; i++) {
+               retval = lan743x_hs_syslock_acquire(adapter,
+                                                   LOCK_TIMEOUT_MAX_CNT);
+               if (retval < 0)
+                       return retval;
+
+               val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_READ_;
+               val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_);
+               lan743x_csr_write(adapter, HS_E2P_CMD, val);
+               retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+               if (retval < 0) {
+                       lan743x_hs_syslock_release(adapter);
+                       return retval;
+               }
+
+               val = lan743x_csr_read(adapter, HS_E2P_DATA);
+
+               lan743x_hs_syslock_release(adapter);
+
+               data[i] = val & 0xFF;
+               offset++;
+       }
+
+       return 0;
+}
+
+static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
+                                  u32 offset, u32 length, u8 *data)
+{
+       int retval;
+       u32 val;
+       int i;
+
+       retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+       if (retval < 0)
+               return retval;
+
+       retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+       lan743x_hs_syslock_release(adapter);
+       if (retval < 0)
+               return retval;
+
+       for (i = 0; i < length; i++) {
+               retval = lan743x_hs_syslock_acquire(adapter,
+                                                   LOCK_TIMEOUT_MAX_CNT);
+               if (retval < 0)
+                       return retval;
+
+               /* Fill data register */
+               val = data[i];
+               lan743x_csr_write(adapter, HS_E2P_DATA, val);
+
+               /* Send "write" command */
+               val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_WRITE_;
+               val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_);
+               lan743x_csr_write(adapter, HS_E2P_CMD, val);
+
+               retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+               lan743x_hs_syslock_release(adapter);
+               if (retval < 0)
+                       return retval;
+
+               offset++;
+       }
+
+       return 0;
+}
+
 static void lan743x_ethtool_get_drvinfo(struct net_device *netdev,
                                        struct ethtool_drvinfo *info)
 {
@@ -304,10 +615,21 @@ static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
        struct lan743x_adapter *adapter = netdev_priv(netdev);
        int ret = 0;
 
-       if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
-               ret = lan743x_otp_read(adapter, ee->offset, ee->len, data);
-       else
-               ret = lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
+       if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
+               if (adapter->is_pci11x1x)
+                       ret = lan743x_hs_otp_read(adapter, ee->offset,
+                                                 ee->len, data);
+               else
+                       ret = lan743x_otp_read(adapter, ee->offset,
+                                              ee->len, data);
+       } else {
+               if (adapter->is_pci11x1x)
+                       ret = lan743x_hs_eeprom_read(adapter, ee->offset,
+                                                    ee->len, data);
+               else
+                       ret = lan743x_eeprom_read(adapter, ee->offset,
+                                                 ee->len, data);
+       }
 
        return ret;
 }
@@ -321,13 +643,22 @@ static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
        if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
                /* Beware!  OTP is One Time Programming ONLY! */
                if (ee->magic == LAN743X_OTP_MAGIC) {
-                       ret = lan743x_otp_write(adapter, ee->offset,
-                                               ee->len, data);
+                       if (adapter->is_pci11x1x)
+                               ret = lan743x_hs_otp_write(adapter, ee->offset,
+                                                          ee->len, data);
+                       else
+                               ret = lan743x_otp_write(adapter, ee->offset,
+                                                       ee->len, data);
                }
        } else {
                if (ee->magic == LAN743X_EEPROM_MAGIC) {
-                       ret = lan743x_eeprom_write(adapter, ee->offset,
-                                                  ee->len, data);
+                       if (adapter->is_pci11x1x)
+                               ret = lan743x_hs_eeprom_write(adapter,
+                                                             ee->offset,
+                                                             ee->len, data);
+                       else
+                               ret = lan743x_eeprom_write(adapter, ee->offset,
+                                                          ee->len, data);
                }
        }
 
@@ -365,6 +696,14 @@ static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = {
        "RX Queue 3 Frames",
 };
 
+static const char lan743x_tx_queue_cnt_strings[][ETH_GSTRING_LEN] = {
+       "TX Queue 0 Frames",
+       "TX Queue 1 Frames",
+       "TX Queue 2 Frames",
+       "TX Queue 3 Frames",
+       "TX Total Queue Frames",
+};
+
 static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = {
        "RX Total Frames",
        "EEE RX LPI Transitions",
@@ -462,6 +801,8 @@ static const char lan743x_priv_flags_strings[][ETH_GSTRING_LEN] = {
 static void lan743x_ethtool_get_strings(struct net_device *netdev,
                                        u32 stringset, u8 *data)
 {
+       struct lan743x_adapter *adapter = netdev_priv(netdev);
+
        switch (stringset) {
        case ETH_SS_STATS:
                memcpy(data, lan743x_set0_hw_cnt_strings,
@@ -473,6 +814,13 @@ static void lan743x_ethtool_get_strings(struct net_device *netdev,
                       sizeof(lan743x_set1_sw_cnt_strings)],
                       lan743x_set2_hw_cnt_strings,
                       sizeof(lan743x_set2_hw_cnt_strings));
+               if (adapter->is_pci11x1x) {
+                       memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) +
+                              sizeof(lan743x_set1_sw_cnt_strings) +
+                              sizeof(lan743x_set2_hw_cnt_strings)],
+                              lan743x_tx_queue_cnt_strings,
+                              sizeof(lan743x_tx_queue_cnt_strings));
+               }
                break;
        case ETH_SS_PRIV_FLAGS:
                memcpy(data, lan743x_priv_flags_strings,
@@ -486,7 +834,9 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
                                              u64 *data)
 {
        struct lan743x_adapter *adapter = netdev_priv(netdev);
+       u64 total_queue_count = 0;
        int data_index = 0;
+       u64 pkt_cnt;
        u32 buf;
        int i;
 
@@ -500,6 +850,14 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
                buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]);
                data[data_index++] = (u64)buf;
        }
+       if (adapter->is_pci11x1x) {
+               for (i = 0; i < ARRAY_SIZE(adapter->tx); i++) {
+                       pkt_cnt = (u64)(adapter->tx[i].frame_count);
+                       data[data_index++] = pkt_cnt;
+                       total_queue_count += pkt_cnt;
+               }
+               data[data_index++] = total_queue_count;
+       }
 }
 
 static u32 lan743x_ethtool_get_priv_flags(struct net_device *netdev)
@@ -520,6 +878,8 @@ static int lan743x_ethtool_set_priv_flags(struct net_device *netdev, u32 flags)
 
 static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
 {
+       struct lan743x_adapter *adapter = netdev_priv(netdev);
+
        switch (sset) {
        case ETH_SS_STATS:
        {
@@ -528,6 +888,8 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
                ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings);
                ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings);
                ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings);
+               if (adapter->is_pci11x1x)
+                       ret += ARRAY_SIZE(lan743x_tx_queue_cnt_strings);
                return ret;
        }
        case ETH_SS_PRIV_FLAGS:
index 5282d25..9ac0c2b 100644 (file)
@@ -1776,6 +1776,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
                dev_kfree_skb_irq(skb);
                goto unlock;
        }
+       tx->frame_count++;
 
        if (gso)
                lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
@@ -2868,6 +2869,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
                adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS;
                adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
                pci11x1x_strap_get_status(adapter);
+               spin_lock_init(&adapter->eth_syslock_spinlock);
        } else {
                adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
                adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
index 2c8e76b..1ca5f32 100644 (file)
 
 #define E2P_DATA                       (0x044)
 
+/* Hearthstone top level & System Reg Addresses */
+#define ETH_CTRL_REG_ADDR_BASE         (0x0000)
+#define ETH_SYS_REG_ADDR_BASE          (0x4000)
+#define CONFIG_REG_ADDR_BASE           (0x0000)
+#define ETH_EEPROM_REG_ADDR_BASE       (0x0E00)
+#define ETH_OTP_REG_ADDR_BASE          (0x1000)
+#define SYS_LOCK_REG                   (0x00A0)
+#define SYS_LOCK_REG_MAIN_LOCK_                BIT(7)
+#define SYS_LOCK_REG_GEN_PERI_LOCK_    BIT(5)
+#define SYS_LOCK_REG_SPI_PERI_LOCK_    BIT(4)
+#define SYS_LOCK_REG_SMBUS_PERI_LOCK_  BIT(3)
+#define SYS_LOCK_REG_UART_SS_LOCK_     BIT(2)
+#define SYS_LOCK_REG_ENET_SS_LOCK_     BIT(1)
+#define SYS_LOCK_REG_USB_SS_LOCK_      BIT(0)
+#define ETH_SYSTEM_SYS_LOCK_REG                (ETH_SYS_REG_ADDR_BASE + \
+                                        CONFIG_REG_ADDR_BASE + \
+                                        SYS_LOCK_REG)
+#define HS_EEPROM_REG_ADDR_BASE                (ETH_SYS_REG_ADDR_BASE + \
+                                        ETH_EEPROM_REG_ADDR_BASE)
+#define HS_E2P_CMD                     (HS_EEPROM_REG_ADDR_BASE + 0x0000)
+#define HS_E2P_CMD_EPC_BUSY_           BIT(31)
+#define HS_E2P_CMD_EPC_CMD_WRITE_      GENMASK(29, 28)
+#define HS_E2P_CMD_EPC_CMD_READ_       (0x0)
+#define HS_E2P_CMD_EPC_TIMEOUT_                BIT(17)
+#define HS_E2P_CMD_EPC_ADDR_MASK_      GENMASK(15, 0)
+#define HS_E2P_DATA                    (HS_EEPROM_REG_ADDR_BASE + 0x0004)
+#define HS_E2P_DATA_MASK_              GENMASK(7, 0)
+#define HS_E2P_CFG                     (HS_EEPROM_REG_ADDR_BASE + 0x0008)
+#define HS_E2P_CFG_I2C_PULSE_MASK_     GENMASK(19, 16)
+#define HS_E2P_CFG_EEPROM_SIZE_SEL_    BIT(12)
+#define HS_E2P_CFG_I2C_BAUD_RATE_MASK_ GENMASK(9, 8)
+#define HS_E2P_CFG_TEST_EEPR_TO_BYP_   BIT(0)
+#define HS_E2P_PAD_CTL                 (HS_EEPROM_REG_ADDR_BASE + 0x000C)
+
 #define GPIO_CFG0                      (0x050)
 #define GPIO_CFG0_GPIO_DIR_BIT_(bit)   BIT(16 + (bit))
 #define GPIO_CFG0_GPIO_DATA_BIT_(bit)  BIT(0 + (bit))
 #define INT_MOD_CFG9                   (0x7E4)
 
 #define PTP_CMD_CTL                                    (0x0A00)
+#define PTP_CMD_CTL_PTP_LTC_TARGET_READ_               BIT(13)
 #define PTP_CMD_CTL_PTP_CLK_STP_NSEC_                  BIT(6)
 #define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_                        BIT(5)
 #define PTP_CMD_CTL_PTP_CLOCK_LOAD_                    BIT(4)
        (((value) & 0x7) << (1 + ((channel) << 2)))
 #define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel)      (BIT((channel) << 2))
 
+#define HS_PTP_GENERAL_CONFIG                          (0x0A04)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \
+       (0xf << (4 + ((channel) << 2)))
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_       (0)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_       (1)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_         (2)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_         (3)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_                (4)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_                (5)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_       (6)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_       (7)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_         (8)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_         (9)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_                (10)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_                (11)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_       (12)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_       (13)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGG_                (14)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_INT_         (15)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
+       (((value) & 0xf) << (4 + ((channel) << 2)))
+#define HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(channel)    (BIT(1 + ((channel) * 2)))
+#define HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel)   (BIT((channel) * 2))
+
 #define PTP_INT_STS                            (0x0A08)
+#define PTP_INT_IO_FE_MASK_                    GENMASK(31, 24)
+#define PTP_INT_IO_FE_SHIFT_                   (24)
+#define PTP_INT_IO_FE_SET_(channel)            BIT(24 + (channel))
+#define PTP_INT_IO_RE_MASK_                    GENMASK(23, 16)
+#define PTP_INT_IO_RE_SHIFT_                   (16)
+#define PTP_INT_IO_RE_SET_(channel)            BIT(16 + (channel))
+#define PTP_INT_TX_TS_OVRFL_INT_               BIT(14)
+#define PTP_INT_TX_SWTS_ERR_INT_               BIT(13)
+#define PTP_INT_TX_TS_INT_                     BIT(12)
+#define PTP_INT_RX_TS_OVRFL_INT_               BIT(9)
+#define PTP_INT_RX_TS_INT_                     BIT(8)
+#define PTP_INT_TIMER_INT_B_                   BIT(1)
+#define PTP_INT_TIMER_INT_A_                   BIT(0)
 #define PTP_INT_EN_SET                         (0x0A0C)
+#define PTP_INT_EN_FE_EN_SET_(channel)         BIT(24 + (channel))
+#define PTP_INT_EN_RE_EN_SET_(channel)         BIT(16 + (channel))
+#define PTP_INT_EN_TIMER_SET_(channel)         BIT(channel)
 #define PTP_INT_EN_CLR                         (0x0A10)
+#define PTP_INT_EN_FE_EN_CLR_(channel)         BIT(24 + (channel))
+#define PTP_INT_EN_RE_EN_CLR_(channel)         BIT(16 + (channel))
 #define PTP_INT_BIT_TX_SWTS_ERR_               BIT(13)
 #define PTP_INT_BIT_TX_TS_                     BIT(12)
 #define PTP_INT_BIT_TIMER_B_                   BIT(1)
 #define PTP_CLOCK_TARGET_NS_X(channel)         (0x0A34 + ((channel) << 4))
 #define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4))
 #define PTP_CLOCK_TARGET_RELOAD_NS_X(channel)  (0x0A3C + ((channel) << 4))
+#define PTP_LTC_SET_SEC_HI                     (0x0A50)
+#define PTP_LTC_SET_SEC_HI_SEC_47_32_MASK_     GENMASK(15, 0)
+#define PTP_VERSION                            (0x0A54)
+#define PTP_VERSION_TX_UP_MASK_                        GENMASK(31, 24)
+#define PTP_VERSION_TX_LO_MASK_                        GENMASK(23, 16)
+#define PTP_VERSION_RX_UP_MASK_                        GENMASK(15, 8)
+#define PTP_VERSION_RX_LO_MASK_                        GENMASK(7, 0)
+#define PTP_IO_SEL                             (0x0A58)
+#define PTP_IO_SEL_MASK_                       GENMASK(10, 8)
+#define PTP_IO_SEL_SHIFT_                      (8)
 #define PTP_LATENCY                            (0x0A5C)
 #define PTP_LATENCY_TX_SET_(tx_latency)                (((u32)(tx_latency)) << 16)
 #define PTP_LATENCY_RX_SET_(rx_latency)                \
 #define PTP_TX_MSG_HEADER_MSG_TYPE_            (0x000F0000)
 #define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_       (0x00000000)
 
+#define PTP_TX_CAP_INFO                                (0x0AB8)
+#define PTP_TX_CAP_INFO_TX_CH_MASK_            GENMASK(1, 0)
+#define PTP_TX_DOMAIN                          (0x0ABC)
+#define PTP_TX_DOMAIN_MASK_                    GENMASK(23, 16)
+#define PTP_TX_DOMAIN_RANGE_EN_                        BIT(15)
+#define PTP_TX_DOMAIN_RANGE_MASK_              GENMASK(7, 0)
+#define PTP_TX_SDOID                           (0x0AC0)
+#define PTP_TX_SDOID_MASK_                     GENMASK(23, 16)
+#define PTP_TX_SDOID_RANGE_EN_                 BIT(15)
+#define PTP_TX_SDOID_11_0_MASK_                        GENMASK(7, 0)
+#define PTP_IO_CAP_CONFIG                      (0x0AC4)
+#define PTP_IO_CAP_CONFIG_LOCK_FE_(channel)    BIT(24 + (channel))
+#define PTP_IO_CAP_CONFIG_LOCK_RE_(channel)    BIT(16 + (channel))
+#define PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel)  BIT(8 + (channel))
+#define PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel)  BIT(0 + (channel))
+#define PTP_IO_RE_LTC_SEC_CAP_X                        (0x0AC8)
+#define PTP_IO_RE_LTC_NS_CAP_X                 (0x0ACC)
+#define PTP_IO_FE_LTC_SEC_CAP_X                        (0x0AD0)
+#define PTP_IO_FE_LTC_NS_CAP_X                 (0x0AD4)
+#define PTP_IO_EVENT_OUTPUT_CFG                        (0x0AD8)
+#define PTP_IO_EVENT_OUTPUT_CFG_SEL_(channel)  BIT(16 + (channel))
+#define PTP_IO_EVENT_OUTPUT_CFG_EN_(channel)   BIT(0 + (channel))
+#define PTP_IO_PIN_CFG                         (0x0ADC)
+#define PTP_IO_PIN_CFG_OBUF_TYPE_(channel)     BIT(0 + (channel))
+#define PTP_LTC_RD_SEC_HI                      (0x0AF0)
+#define PTP_LTC_RD_SEC_HI_SEC_47_32_MASK_      GENMASK(15, 0)
+#define PTP_LTC_RD_SEC_LO                      (0x0AF4)
+#define PTP_LTC_RD_NS                          (0x0AF8)
+#define PTP_LTC_RD_NS_29_0_MASK_               GENMASK(29, 0)
+#define PTP_LTC_RD_SUBNS                       (0x0AFC)
+#define PTP_RX_USER_MAC_HI                     (0x0B00)
+#define PTP_RX_USER_MAC_HI_47_32_MASK_         GENMASK(15, 0)
+#define PTP_RX_USER_MAC_LO                     (0x0B04)
+#define PTP_RX_USER_IP_ADDR_0                  (0x0B20)
+#define PTP_RX_USER_IP_ADDR_1                  (0x0B24)
+#define PTP_RX_USER_IP_ADDR_2                  (0x0B28)
+#define PTP_RX_USER_IP_ADDR_3                  (0x0B2C)
+#define PTP_RX_USER_IP_MASK_0                  (0x0B30)
+#define PTP_RX_USER_IP_MASK_1                  (0x0B34)
+#define PTP_RX_USER_IP_MASK_2                  (0x0B38)
+#define PTP_RX_USER_IP_MASK_3                  (0x0B3C)
+#define PTP_TX_USER_MAC_HI                     (0x0B40)
+#define PTP_TX_USER_MAC_HI_47_32_MASK_         GENMASK(15, 0)
+#define PTP_TX_USER_MAC_LO                     (0x0B44)
+#define PTP_TX_USER_IP_ADDR_0                  (0x0B60)
+#define PTP_TX_USER_IP_ADDR_1                  (0x0B64)
+#define PTP_TX_USER_IP_ADDR_2                  (0x0B68)
+#define PTP_TX_USER_IP_ADDR_3                  (0x0B6C)
+#define PTP_TX_USER_IP_MASK_0                  (0x0B70)
+#define PTP_TX_USER_IP_MASK_1                  (0x0B74)
+#define PTP_TX_USER_IP_MASK_2                  (0x0B78)
+#define PTP_TX_USER_IP_MASK_3                  (0x0B7C)
+
 #define DMAC_CFG                               (0xC00)
 #define DMAC_CFG_COAL_EN_                      BIT(16)
 #define DMAC_CFG_CH_ARB_SEL_RX_HIGH_           (0x00000000)
 #define OTP_STATUS                             (0x1030)
 #define OTP_STATUS_BUSY_                       BIT(0)
 
+/* Hearthstone OTP block registers */
+#define HS_OTP_BLOCK_BASE                      (ETH_SYS_REG_ADDR_BASE + \
+                                                ETH_OTP_REG_ADDR_BASE)
+#define HS_OTP_PWR_DN                          (HS_OTP_BLOCK_BASE + 0x0)
+#define HS_OTP_ADDR_HIGH                       (HS_OTP_BLOCK_BASE + 0x4)
+#define HS_OTP_ADDR_LOW                                (HS_OTP_BLOCK_BASE + 0x8)
+#define HS_OTP_PRGM_DATA                       (HS_OTP_BLOCK_BASE + 0x10)
+#define HS_OTP_PRGM_MODE                       (HS_OTP_BLOCK_BASE + 0x14)
+#define HS_OTP_READ_DATA                       (HS_OTP_BLOCK_BASE + 0x18)
+#define HS_OTP_FUNC_CMD                                (HS_OTP_BLOCK_BASE + 0x20)
+#define HS_OTP_TST_CMD                         (HS_OTP_BLOCK_BASE + 0x24)
+#define HS_OTP_CMD_GO                          (HS_OTP_BLOCK_BASE + 0x28)
+#define HS_OTP_STATUS                          (HS_OTP_BLOCK_BASE + 0x30)
+
 /* MAC statistics registers */
 #define STAT_RX_FCS_ERRORS                     (0x1200)
 #define STAT_RX_ALIGNMENT_ERRORS               (0x1204)
@@ -715,6 +869,7 @@ struct lan743x_tx {
        int             last_tail;
 
        struct napi_struct napi;
+       u32 frame_count;
 
        struct sk_buff *overflow_skb;
 };
@@ -772,6 +927,10 @@ struct lan743x_adapter {
        struct lan743x_rx       rx[LAN743X_USED_RX_CHANNELS];
        bool                    is_pci11x1x;
        bool                    is_sgmii_en;
+       /* protect ethernet syslock */
+       spinlock_t              eth_syslock_spinlock;
+       bool                    eth_syslock_en;
+       u32                     eth_syslock_acquire_cnt;
        u8                      max_tx_channels;
        u8                      used_tx_channels;
        u8                      max_vector_count;
index ec08259..6a11e2c 100644 (file)
@@ -25,6 +25,18 @@ static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
                                  u32 seconds, u32 nano_seconds,
                                  u32 sub_nano_seconds);
 
+static int lan743x_get_channel(u32 ch_map)
+{
+       int idx;
+
+       for (idx = 0; idx < 32; idx++) {
+               if (ch_map & (0x1 << idx))
+                       return idx;
+       }
+
+       return -EINVAL;
+}
+
 int lan743x_gpio_init(struct lan743x_adapter *adapter)
 {
        struct lan743x_gpio *gpio = &adapter->gpio;
@@ -179,6 +191,8 @@ static void lan743x_ptp_release_event_ch(struct lan743x_adapter *adapter,
 static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
                                  u32 *seconds, u32 *nano_seconds,
                                  u32 *sub_nano_seconds);
+static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter,
+                                    u32 *sec, u32 *nsec, u32 *sub_nsec);
 static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
                                   s64 time_step_ns);
 
@@ -407,7 +421,11 @@ static int lan743x_ptpci_gettime64(struct ptp_clock_info *ptpci,
        u32 nano_seconds = 0;
        u32 seconds = 0;
 
-       lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
+       if (adapter->is_pci11x1x)
+               lan743x_ptp_io_clock_get(adapter, &seconds, &nano_seconds,
+                                        NULL);
+       else
+               lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
        ts->tv_sec = seconds;
        ts->tv_nsec = nano_seconds;
 
@@ -671,6 +689,322 @@ failed:
        return ret;
 }
 
+static void lan743x_ptp_io_perout_off(struct lan743x_adapter *adapter,
+                                     u32 index)
+{
+       struct lan743x_ptp *ptp = &adapter->ptp;
+       int perout_pin;
+       int event_ch;
+       u32 gen_cfg;
+       int val;
+
+       event_ch = ptp->ptp_io_perout[index];
+       if (event_ch >= 0) {
+               /* set target to far in the future, effectively disabling it */
+               lan743x_csr_write(adapter,
+                                 PTP_CLOCK_TARGET_SEC_X(event_ch),
+                                 0xFFFF0000);
+               lan743x_csr_write(adapter,
+                                 PTP_CLOCK_TARGET_NS_X(event_ch),
+                                 0);
+
+               gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG);
+               gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
+                                   (event_ch));
+               gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch));
+               gen_cfg |= HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch);
+               lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg);
+               if (event_ch)
+                       lan743x_csr_write(adapter, PTP_INT_STS,
+                                         PTP_INT_TIMER_INT_B_);
+               else
+                       lan743x_csr_write(adapter, PTP_INT_STS,
+                                         PTP_INT_TIMER_INT_A_);
+               lan743x_ptp_release_event_ch(adapter, event_ch);
+               ptp->ptp_io_perout[index] = -1;
+       }
+
+       perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index);
+
+       /* Deselect Event output */
+       val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG);
+
+       /* Disables the output of Local Time Target compare events */
+       val &= ~PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin);
+       lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val);
+
+       /* Configured as an opendrain driver*/
+       val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+       val &= ~PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin);
+       lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val);
+       /* Dummy read to make sure write operation success */
+       val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+}
+
+static int lan743x_ptp_io_perout(struct lan743x_adapter *adapter, int on,
+                                struct ptp_perout_request *perout_request)
+{
+       struct lan743x_ptp *ptp = &adapter->ptp;
+       u32 period_sec, period_nsec;
+       u32 start_sec, start_nsec;
+       u32 pulse_sec, pulse_nsec;
+       int pulse_width;
+       int perout_pin;
+       int event_ch;
+       u32 gen_cfg;
+       u32 index;
+       int val;
+
+       index = perout_request->index;
+       event_ch = ptp->ptp_io_perout[index];
+
+       if (on) {
+               perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index);
+               if (perout_pin < 0)
+                       return -EBUSY;
+       } else {
+               lan743x_ptp_io_perout_off(adapter, index);
+               return 0;
+       }
+
+       if (event_ch >= LAN743X_PTP_N_EVENT_CHAN) {
+               /* already on, turn off first */
+               lan743x_ptp_io_perout_off(adapter, index);
+       }
+
+       event_ch = lan743x_ptp_reserve_event_ch(adapter, index);
+       if (event_ch < 0) {
+               netif_warn(adapter, drv, adapter->netdev,
+                          "Failed to reserve event channel %d for PEROUT\n",
+                          index);
+               goto failed;
+       }
+       ptp->ptp_io_perout[index] = event_ch;
+
+       if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) {
+               pulse_sec = perout_request->on.sec;
+               pulse_sec += perout_request->on.nsec / 1000000000;
+               pulse_nsec = perout_request->on.nsec % 1000000000;
+       } else {
+               pulse_sec = perout_request->period.sec;
+               pulse_sec += perout_request->period.nsec / 1000000000;
+               pulse_nsec = perout_request->period.nsec % 1000000000;
+       }
+
+       if (pulse_sec == 0) {
+               if (pulse_nsec >= 400000000) {
+                       pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+               } else if (pulse_nsec >= 200000000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_;
+               } else if (pulse_nsec >= 100000000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_;
+               } else if (pulse_nsec >= 20000000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+               } else if (pulse_nsec >= 10000000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_;
+               } else if (pulse_nsec >= 2000000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+               } else if (pulse_nsec >= 1000000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_;
+               } else if (pulse_nsec >= 200000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+               } else if (pulse_nsec >= 100000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_;
+               } else if (pulse_nsec >= 20000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+               } else if (pulse_nsec >= 10000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_;
+               } else if (pulse_nsec >= 2000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_;
+               } else if (pulse_nsec >= 1000) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_;
+               } else if (pulse_nsec >= 200) {
+                       pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+               } else {
+                       netif_warn(adapter, drv, adapter->netdev,
+                                  "perout period too small, min is 200nS\n");
+                       goto failed;
+               }
+       } else {
+               pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+       }
+
+       /* turn off by setting target far in future */
+       lan743x_csr_write(adapter,
+                         PTP_CLOCK_TARGET_SEC_X(event_ch),
+                         0xFFFF0000);
+       lan743x_csr_write(adapter,
+                         PTP_CLOCK_TARGET_NS_X(event_ch), 0);
+
+       /* Configure to pulse every period */
+       gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG);
+       gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(event_ch));
+       gen_cfg |= HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
+                         (event_ch, pulse_width);
+       gen_cfg |= HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch);
+       gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch));
+       lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg);
+
+       /* set the reload to one toggle cycle */
+       period_sec = perout_request->period.sec;
+       period_sec += perout_request->period.nsec / 1000000000;
+       period_nsec = perout_request->period.nsec % 1000000000;
+       lan743x_csr_write(adapter,
+                         PTP_CLOCK_TARGET_RELOAD_SEC_X(event_ch),
+                         period_sec);
+       lan743x_csr_write(adapter,
+                         PTP_CLOCK_TARGET_RELOAD_NS_X(event_ch),
+                         period_nsec);
+
+       start_sec = perout_request->start.sec;
+       start_sec += perout_request->start.nsec / 1000000000;
+       start_nsec = perout_request->start.nsec % 1000000000;
+
+       /* set the start time */
+       lan743x_csr_write(adapter,
+                         PTP_CLOCK_TARGET_SEC_X(event_ch),
+                         start_sec);
+       lan743x_csr_write(adapter,
+                         PTP_CLOCK_TARGET_NS_X(event_ch),
+                         start_nsec);
+
+       /* Enable LTC Target Read */
+       val = lan743x_csr_read(adapter, PTP_CMD_CTL);
+       val |= PTP_CMD_CTL_PTP_LTC_TARGET_READ_;
+       lan743x_csr_write(adapter, PTP_CMD_CTL, val);
+
+       /* Configure as an push/pull driver */
+       val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+       val |= PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin);
+       lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val);
+
+       /* Select Event output */
+       val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG);
+       if (event_ch)
+               /* Channel B as the output */
+               val |= PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin);
+       else
+               /* Channel A as the output */
+               val &= ~PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin);
+
+       /* Enables the output of Local Time Target compare events */
+       val |= PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin);
+       lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val);
+
+       return 0;
+
+failed:
+       lan743x_ptp_io_perout_off(adapter, index);
+       return -ENODEV;
+}
+
+static void lan743x_ptp_io_extts_off(struct lan743x_adapter *adapter,
+                                    u32 index)
+{
+       struct lan743x_ptp *ptp = &adapter->ptp;
+       struct lan743x_extts *extts;
+       int val;
+
+       extts = &ptp->extts[index];
+       /* PTP Interrupt Enable Clear Register */
+       if (extts->flags & PTP_FALLING_EDGE)
+               val = PTP_INT_EN_FE_EN_CLR_(index);
+       else
+               val = PTP_INT_EN_RE_EN_CLR_(index);
+       lan743x_csr_write(adapter, PTP_INT_EN_CLR, val);
+
+       /* Disables PTP-IO edge lock */
+       val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG);
+       if (extts->flags & PTP_FALLING_EDGE) {
+               val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(index);
+               val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(index);
+       } else {
+               val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(index);
+               val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(index);
+       }
+       lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val);
+
+       /* PTP-IO De-select register */
+       val = lan743x_csr_read(adapter, PTP_IO_SEL);
+       val &= ~PTP_IO_SEL_MASK_;
+       lan743x_csr_write(adapter, PTP_IO_SEL, val);
+
+       /* Clear timestamp */
+       memset(&extts->ts, 0, sizeof(struct timespec64));
+       extts->flags = 0;
+}
+
+static int lan743x_ptp_io_event_cap_en(struct lan743x_adapter *adapter,
+                                      u32 flags, u32 channel)
+{
+       struct lan743x_ptp *ptp = &adapter->ptp;
+       int val;
+
+       if ((flags & PTP_EXTTS_EDGES) ==  PTP_EXTTS_EDGES)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&ptp->command_lock);
+       /* PTP-IO Event Capture Enable */
+       val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG);
+       if (flags & PTP_FALLING_EDGE) {
+               val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(channel);
+               val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel);
+               val |= PTP_IO_CAP_CONFIG_LOCK_FE_(channel);
+               val |= PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel);
+       } else {
+               /* Rising eventing as Default */
+               val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(channel);
+               val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel);
+               val |= PTP_IO_CAP_CONFIG_LOCK_RE_(channel);
+               val |= PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel);
+       }
+       lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val);
+
+       /* PTP-IO Select */
+       val = lan743x_csr_read(adapter, PTP_IO_SEL);
+       val &= ~PTP_IO_SEL_MASK_;
+       val |= channel << PTP_IO_SEL_SHIFT_;
+       lan743x_csr_write(adapter, PTP_IO_SEL, val);
+
+       /* PTP Interrupt Enable Register */
+       if (flags & PTP_FALLING_EDGE)
+               val = PTP_INT_EN_FE_EN_SET_(channel);
+       else
+               val = PTP_INT_EN_RE_EN_SET_(channel);
+       lan743x_csr_write(adapter, PTP_INT_EN_SET, val);
+
+       mutex_unlock(&ptp->command_lock);
+
+       return 0;
+}
+
+static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
+                               struct ptp_extts_request *extts_request)
+{
+       struct lan743x_ptp *ptp = &adapter->ptp;
+       u32 flags = extts_request->flags;
+       u32 index = extts_request->index;
+       struct lan743x_extts *extts;
+       int extts_pin;
+       int ret = 0;
+
+       extts = &ptp->extts[index];
+
+       if (on) {
+               extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index);
+               if (extts_pin < 0)
+                       return -EBUSY;
+
+               ret = lan743x_ptp_io_event_cap_en(adapter, flags, index);
+               if (!ret)
+                       extts->flags = flags;
+       } else {
+               lan743x_ptp_io_extts_off(adapter, index);
+       }
+
+       return ret;
+}
+
 static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
                                struct ptp_clock_request *request, int on)
 {
@@ -682,11 +1016,19 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
        if (request) {
                switch (request->type) {
                case PTP_CLK_REQ_EXTTS:
+                       if (request->extts.index < ptpci->n_ext_ts)
+                               return lan743x_ptp_io_extts(adapter, on,
+                                                        &request->extts);
                        return -EINVAL;
                case PTP_CLK_REQ_PEROUT:
-                       if (request->perout.index < ptpci->n_per_out)
-                               return lan743x_ptp_perout(adapter, on,
+                       if (request->perout.index < ptpci->n_per_out) {
+                               if (adapter->is_pci11x1x)
+                                       return lan743x_ptp_io_perout(adapter, on,
+                                                            &request->perout);
+                               else
+                                       return lan743x_ptp_perout(adapter, on,
                                                          &request->perout);
+                       }
                        return -EINVAL;
                case PTP_CLK_REQ_PPS:
                        return -EINVAL;
@@ -715,8 +1057,8 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
        switch (func) {
        case PTP_PF_NONE:
        case PTP_PF_PEROUT:
-               break;
        case PTP_PF_EXTTS:
+               break;
        case PTP_PF_PHYSYNC:
        default:
                result = -1;
@@ -725,6 +1067,33 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
        return result;
 }
 
+static void lan743x_ptp_io_event_clock_get(struct lan743x_adapter *adapter,
+                                          bool fe, u8 channel,
+                                          struct timespec64 *ts)
+{
+       struct lan743x_ptp *ptp = &adapter->ptp;
+       struct lan743x_extts *extts;
+       u32 sec, nsec;
+
+       mutex_lock(&ptp->command_lock);
+       if (fe) {
+               sec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_SEC_CAP_X);
+               nsec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_NS_CAP_X);
+       } else {
+               sec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_SEC_CAP_X);
+               nsec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_NS_CAP_X);
+       }
+
+       mutex_unlock(&ptp->command_lock);
+
+       /* Update Local timestamp */
+       extts = &ptp->extts[channel];
+       extts->ts.tv_sec = sec;
+       extts->ts.tv_nsec = nsec;
+       ts->tv_sec = sec;
+       ts->tv_nsec = nsec;
+}
+
 static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
 {
        struct lan743x_ptp *ptp =
@@ -733,41 +1102,121 @@ static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
                container_of(ptp, struct lan743x_adapter, ptp);
        u32 cap_info, cause, header, nsec, seconds;
        bool new_timestamp_available = false;
+       struct ptp_clock_event ptp_event;
+       struct timespec64 ts;
+       int ptp_int_sts;
        int count = 0;
+       int channel;
+       s64 ns;
 
-       while ((count < 100) &&
-              (lan743x_csr_read(adapter, PTP_INT_STS) & PTP_INT_BIT_TX_TS_)) {
+       ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
+       while ((count < 100) && ptp_int_sts) {
                count++;
-               cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
-
-               if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
-                       seconds = lan743x_csr_read(adapter,
-                                                  PTP_TX_EGRESS_SEC);
-                       nsec = lan743x_csr_read(adapter, PTP_TX_EGRESS_NS);
-                       cause = (nsec &
-                                PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
-                       header = lan743x_csr_read(adapter,
-                                                 PTP_TX_MSG_HEADER);
-
-                       if (cause == PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
-                               nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
-                               lan743x_ptp_tx_ts_enqueue_ts(adapter,
-                                                            seconds, nsec,
-                                                            header);
-                               new_timestamp_available = true;
-                       } else if (cause ==
-                               PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
-                               netif_err(adapter, drv, adapter->netdev,
-                                         "Auto capture cause not supported\n");
+
+               if (ptp_int_sts & PTP_INT_BIT_TX_TS_) {
+                       cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
+
+                       if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
+                               seconds = lan743x_csr_read(adapter,
+                                                          PTP_TX_EGRESS_SEC);
+                               nsec = lan743x_csr_read(adapter,
+                                                       PTP_TX_EGRESS_NS);
+                               cause = (nsec &
+                                        PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
+                               header = lan743x_csr_read(adapter,
+                                                         PTP_TX_MSG_HEADER);
+
+                               if (cause ==
+                                   PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
+                                       nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
+                                       lan743x_ptp_tx_ts_enqueue_ts(adapter,
+                                                                    seconds,
+                                                                    nsec,
+                                                                    header);
+                                       new_timestamp_available = true;
+                               } else if (cause ==
+                                          PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
+                                       netif_err(adapter, drv, adapter->netdev,
+                                                 "Auto capture cause not supported\n");
+                               } else {
+                                       netif_warn(adapter, drv, adapter->netdev,
+                                                  "unknown tx timestamp capture cause\n");
+                               }
                        } else {
                                netif_warn(adapter, drv, adapter->netdev,
-                                          "unknown tx timestamp capture cause\n");
+                                          "TX TS INT but no TX TS CNT\n");
                        }
-               } else {
-                       netif_warn(adapter, drv, adapter->netdev,
-                                  "TX TS INT but no TX TS CNT\n");
+                       lan743x_csr_write(adapter, PTP_INT_STS,
+                                         PTP_INT_BIT_TX_TS_);
                }
-               lan743x_csr_write(adapter, PTP_INT_STS, PTP_INT_BIT_TX_TS_);
+
+               if (ptp_int_sts & PTP_INT_IO_FE_MASK_) {
+                       do {
+                               channel = lan743x_get_channel((ptp_int_sts &
+                                                       PTP_INT_IO_FE_MASK_) >>
+                                                       PTP_INT_IO_FE_SHIFT_);
+                               if (channel >= 0 &&
+                                   channel < PCI11X1X_PTP_IO_MAX_CHANNELS) {
+                                       lan743x_ptp_io_event_clock_get(adapter,
+                                                                      true,
+                                                                      channel,
+                                                                      &ts);
+                                       /* PTP Falling Event post */
+                                       ns = timespec64_to_ns(&ts);
+                                       ptp_event.timestamp = ns;
+                                       ptp_event.index = channel;
+                                       ptp_event.type = PTP_CLOCK_EXTTS;
+                                       ptp_clock_event(ptp->ptp_clock,
+                                                       &ptp_event);
+                                       lan743x_csr_write(adapter, PTP_INT_STS,
+                                                         PTP_INT_IO_FE_SET_
+                                                         (channel));
+                                       ptp_int_sts &= ~(1 <<
+                                                        (PTP_INT_IO_FE_SHIFT_ +
+                                                         channel));
+                               } else {
+                                       /* Clear falling event interrupts */
+                                       lan743x_csr_write(adapter, PTP_INT_STS,
+                                                         PTP_INT_IO_FE_MASK_);
+                                       ptp_int_sts &= ~PTP_INT_IO_FE_MASK_;
+                               }
+                       } while (ptp_int_sts & PTP_INT_IO_FE_MASK_);
+               }
+
+               if (ptp_int_sts & PTP_INT_IO_RE_MASK_) {
+                       do {
+                               channel = lan743x_get_channel((ptp_int_sts &
+                                                      PTP_INT_IO_RE_MASK_) >>
+                                                      PTP_INT_IO_RE_SHIFT_);
+                               if (channel >= 0 &&
+                                   channel < PCI11X1X_PTP_IO_MAX_CHANNELS) {
+                                       lan743x_ptp_io_event_clock_get(adapter,
+                                                                      false,
+                                                                      channel,
+                                                                      &ts);
+                                       /* PTP Rising Event post */
+                                       ns = timespec64_to_ns(&ts);
+                                       ptp_event.timestamp = ns;
+                                       ptp_event.index = channel;
+                                       ptp_event.type = PTP_CLOCK_EXTTS;
+                                       ptp_clock_event(ptp->ptp_clock,
+                                                       &ptp_event);
+                                       lan743x_csr_write(adapter, PTP_INT_STS,
+                                                         PTP_INT_IO_RE_SET_
+                                                         (channel));
+                                       ptp_int_sts &= ~(1 <<
+                                                        (PTP_INT_IO_RE_SHIFT_ +
+                                                         channel));
+                               } else {
+                                       /* Clear Rising event interrupt */
+                                       lan743x_csr_write(adapter, PTP_INT_STS,
+                                                         PTP_INT_IO_RE_MASK_);
+                                       ptp_int_sts &= ~PTP_INT_IO_RE_MASK_;
+                               }
+                       } while (ptp_int_sts & PTP_INT_IO_RE_MASK_);
+               }
+
+               ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
        }
 
        if (new_timestamp_available)
@@ -802,6 +1251,28 @@ static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
        mutex_unlock(&ptp->command_lock);
 }
 
+static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter,
+                                    u32 *sec, u32 *nsec, u32 *sub_nsec)
+{
+       struct lan743x_ptp *ptp = &adapter->ptp;
+
+       mutex_lock(&ptp->command_lock);
+       lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+       lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+       if (sec)
+               (*sec) = lan743x_csr_read(adapter, PTP_LTC_RD_SEC_LO);
+
+       if (nsec)
+               (*nsec) = lan743x_csr_read(adapter, PTP_LTC_RD_NS);
+
+       if (sub_nsec)
+               (*sub_nsec) =
+               lan743x_csr_read(adapter, PTP_LTC_RD_SUBNS);
+
+       mutex_unlock(&ptp->command_lock);
+}
+
 static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
                                   s64 time_step_ns)
 {
@@ -815,8 +1286,12 @@ static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
 
        if (time_step_ns >  15000000000LL) {
                /* convert to clock set */
-               lan743x_ptp_clock_get(adapter, &unsigned_seconds,
-                                     &nano_seconds, NULL);
+               if (adapter->is_pci11x1x)
+                       lan743x_ptp_io_clock_get(adapter, &unsigned_seconds,
+                                                &nano_seconds, NULL);
+               else
+                       lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+                                             &nano_seconds, NULL);
                unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
                                                &remainder);
                nano_seconds += remainder;
@@ -831,8 +1306,13 @@ static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
                /* convert to clock set */
                time_step_ns = -time_step_ns;
 
-               lan743x_ptp_clock_get(adapter, &unsigned_seconds,
-                                     &nano_seconds, NULL);
+               if (adapter->is_pci11x1x) {
+                       lan743x_ptp_io_clock_get(adapter, &unsigned_seconds,
+                                                &nano_seconds, NULL);
+               } else {
+                       lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+                                             &nano_seconds, NULL);
+               }
                unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
                                                &remainder);
                nano_seconds_step = remainder;
@@ -1061,6 +1541,8 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
                n_pins = LAN7430_N_GPIO;
                break;
        case ID_REV_ID_LAN7431_:
+       case ID_REV_ID_A011_:
+       case ID_REV_ID_A041_:
                n_pins = LAN7431_N_GPIO;
                break;
        default:
@@ -1088,10 +1570,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
                 adapter->netdev->dev_addr);
        ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
        ptp->ptp_clock_info.n_alarm = 0;
-       ptp->ptp_clock_info.n_ext_ts = 0;
+       ptp->ptp_clock_info.n_ext_ts = LAN743X_PTP_N_EXTTS;
        ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
        ptp->ptp_clock_info.n_pins = n_pins;
-       ptp->ptp_clock_info.pps = 0;
+       ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
        ptp->ptp_clock_info.pin_config = ptp->pin_config;
        ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
        ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
index 7663bf5..e26d4ef 100644 (file)
@@ -18,6 +18,9 @@
  */
 #define LAN743X_PTP_N_EVENT_CHAN       2
 #define LAN743X_PTP_N_PEROUT           LAN743X_PTP_N_EVENT_CHAN
+#define LAN743X_PTP_N_EXTTS            4
+#define LAN743X_PTP_N_PPS              0
+#define PCI11X1X_PTP_IO_MAX_CHANNELS   8
 
 struct lan743x_adapter;
 
@@ -60,6 +63,11 @@ struct lan743x_ptp_perout {
        int  gpio_pin;  /* GPIO pin where output appears */
 };
 
+struct lan743x_extts {
+       int flags;
+       struct timespec64 ts;
+};
+
 struct lan743x_ptp {
        int flags;
 
@@ -72,6 +80,8 @@ struct lan743x_ptp {
 
        unsigned long used_event_ch;
        struct lan743x_ptp_perout perout[LAN743X_PTP_N_PEROUT];
+       int ptp_io_perout[LAN743X_PTP_N_PEROUT]; /* PTP event channel (0=channel A, 1=channel B) */
+       struct lan743x_extts extts[LAN743X_PTP_N_EXTTS];
 
        bool leds_multiplexed;
        bool led_enabled[LAN7430_N_LED];
index ad310c9..e1bcb28 100644 (file)
@@ -185,6 +185,9 @@ static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
 {
        u32 val;
 
+       if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp)))
+               return 0;
+
        return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
                                         QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
                                         READL_SLEEP_US, READL_TIMEOUT_US);
@@ -318,6 +321,7 @@ static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
 static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct lan966x_port *port = netdev_priv(dev);
+       struct lan966x *lan966x = port->lan966x;
        __be32 ifh[IFH_LEN];
        int err;
 
@@ -338,7 +342,11 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
                lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
        }
 
-       return lan966x_port_ifh_xmit(skb, ifh, dev);
+       spin_lock(&lan966x->tx_lock);
+       err = lan966x_port_ifh_xmit(skb, ifh, dev);
+       spin_unlock(&lan966x->tx_lock);
+
+       return err;
 }
 
 static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
@@ -600,7 +608,9 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
                                skb->offload_fwd_mark = 0;
                }
 
-               netif_rx(skb);
+               if (!skb_defer_rx_timestamp(skb))
+                       netif_rx(skb);
+
                dev->stats.rx_bytes += len;
                dev->stats.rx_packets++;
 
@@ -883,6 +893,8 @@ static void lan966x_init(struct lan966x *lan966x)
        lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
                ANA_ANAINTR_INTR_ENA,
                lan966x, ANA_ANAINTR);
+
+       spin_lock_init(&lan966x->tx_lock);
 }
 
 static int lan966x_ram_init(struct lan966x *lan966x)
index 058e435..ae282da 100644 (file)
@@ -108,6 +108,8 @@ struct lan966x {
 
        u8 base_mac[ETH_ALEN];
 
+       spinlock_t tx_lock; /* lock for frame transmition */
+
        struct net_device *bridge;
        u16 bridge_mask;
        u16 bridge_fwd_mask;
index e9dd348..4402c3e 100644 (file)
@@ -8,4 +8,4 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
 sparx5-switch-objs  := sparx5_main.o sparx5_packet.o \
  sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
  sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
- sparx5_ptp.o
+ sparx5_ptp.o sparx5_pgid.o
index 9a8e4f2..35abb3d 100644 (file)
@@ -186,11 +186,11 @@ bool sparx5_mact_getnext(struct sparx5 *sparx5,
        return ret == 0;
 }
 
-static int sparx5_mact_lookup(struct sparx5 *sparx5,
-                             const unsigned char mac[ETH_ALEN],
-                             u16 vid)
+bool sparx5_mact_find(struct sparx5 *sparx5,
+                     const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
 {
        int ret;
+       u32 cfg2;
 
        mutex_lock(&sparx5->lock);
 
@@ -202,16 +202,29 @@ static int sparx5_mact_lookup(struct sparx5 *sparx5,
                sparx5, LRN_COMMON_ACCESS_CTRL);
 
        ret = sparx5_mact_wait_for_completion(sparx5);
-       if (ret)
-               goto out;
-
-       ret = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET
-               (spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2));
+       if (ret == 0) {
+               cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
+               if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2))
+                       *pcfg2 = cfg2;
+               else
+                       ret = -ENOENT;
+       }
 
-out:
        mutex_unlock(&sparx5->lock);
 
-       return ret;
+       return ret == 0;
+}
+
+static int sparx5_mact_lookup(struct sparx5 *sparx5,
+                             const unsigned char mac[ETH_ALEN],
+                             u16 vid)
+{
+       u32 pcfg2;
+
+       if (sparx5_mact_find(sparx5, mac, vid, &pcfg2))
+               return 1;
+
+       return 0;
 }
 
 int sparx5_mact_forget(struct sparx5 *sparx5,
@@ -286,7 +299,8 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
 }
 
 int sparx5_add_mact_entry(struct sparx5 *sparx5,
-                         struct sparx5_port *port,
+                         struct net_device *dev,
+                         u16 portno,
                          const unsigned char *addr, u16 vid)
 {
        struct sparx5_mact_entry *mact_entry;
@@ -302,14 +316,14 @@ int sparx5_add_mact_entry(struct sparx5 *sparx5,
         * mact thread to start the frame will reach CPU and the CPU will
         * add the entry but without the extern_learn flag.
         */
-       mact_entry = find_mact_entry(sparx5, addr, vid, port->portno);
+       mact_entry = find_mact_entry(sparx5, addr, vid, portno);
        if (mact_entry)
                goto update_hw;
 
        /* Add the entry in SW MAC table not to get the notification when
         * SW is pulling again
         */
-       mact_entry = alloc_mact_entry(sparx5, addr, vid, port->portno);
+       mact_entry = alloc_mact_entry(sparx5, addr, vid, portno);
        if (!mact_entry)
                return -ENOMEM;
 
@@ -318,13 +332,13 @@ int sparx5_add_mact_entry(struct sparx5 *sparx5,
        mutex_unlock(&sparx5->mact_lock);
 
 update_hw:
-       ret = sparx5_mact_learn(sparx5, port->portno, addr, vid);
+       ret = sparx5_mact_learn(sparx5, portno, addr, vid);
 
        /* New entry? */
        if (mact_entry->flags == 0) {
                mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
                sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
-                                         port->ndev, true);
+                                         dev, true);
        }
 
        return ret;
index 5f7c703..01be7bd 100644 (file)
@@ -626,6 +626,9 @@ static int sparx5_start(struct sparx5 *sparx5)
        /* Init MAC table, ageing */
        sparx5_mact_init(sparx5);
 
+       /* Init PGID table arbitrator */
+       sparx5_pgid_init(sparx5);
+
        /* Setup VLANs */
        sparx5_vlan_init(sparx5);
 
index 33892df..7a04b8f 100644 (file)
@@ -66,6 +66,12 @@ enum sparx5_vlan_port_type {
 #define PGID_BCAST            (PGID_BASE + 6)
 #define PGID_CPU              (PGID_BASE + 7)
 
+#define PGID_TABLE_SIZE               3290
+
+#define PGID_MCAST_START 65
+#define PGID_GLAG_START 833
+#define PGID_GLAG_END 1088
+
 #define IFH_LEN                9 /* 36 bytes */
 #define NULL_VID               0
 #define SPX5_MACT_PULL_DELAY   (2 * HZ)
@@ -271,6 +277,8 @@ struct sparx5 {
        struct mutex ptp_lock; /* lock for ptp interface state */
        u16 ptp_skbs;
        int ptp_irq;
+       /* PGID allocation map */
+       u8 pgid_map[PGID_TABLE_SIZE];
 };
 
 /* sparx5_switchdev.c */
@@ -302,10 +310,13 @@ int sparx5_mact_learn(struct sparx5 *sparx5, int port,
                      const unsigned char mac[ETH_ALEN], u16 vid);
 bool sparx5_mact_getnext(struct sparx5 *sparx5,
                         unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
+bool sparx5_mact_find(struct sparx5 *sparx5,
+                     const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2);
 int sparx5_mact_forget(struct sparx5 *sparx5,
                       const unsigned char mac[ETH_ALEN], u16 vid);
 int sparx5_add_mact_entry(struct sparx5 *sparx5,
-                         struct sparx5_port *port,
+                         struct net_device *dev,
+                         u16 portno,
                          const unsigned char *addr, u16 vid);
 int sparx5_del_mact_entry(struct sparx5 *sparx5,
                          const unsigned char *addr,
@@ -358,6 +369,19 @@ void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
                                 struct sk_buff *skb);
 irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
 
+/* sparx5_pgid.c */
+enum sparx5_pgid_type {
+       SPX5_PGID_FREE,
+       SPX5_PGID_RESERVED,
+       SPX5_PGID_MULTICAST,
+       SPX5_PGID_GLAG
+};
+
+void sparx5_pgid_init(struct sparx5 *spx5);
+int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx);
+int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
+int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
+
 /* Clock period in picoseconds */
 static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
 {
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
new file mode 100644 (file)
index 0000000..90366fc
--- /dev/null
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include "sparx5_main.h"
+
+void sparx5_pgid_init(struct sparx5 *spx5)
+{
+       int i;
+
+       for (i = 0; i < PGID_TABLE_SIZE; i++)
+               spx5->pgid_map[i] = SPX5_PGID_FREE;
+
+       /* Reserved for unicast, flood control, broadcast, and CPU.
+        * These cannot be freed.
+        */
+       for (i = 0; i <= PGID_CPU; i++)
+               spx5->pgid_map[i] = SPX5_PGID_RESERVED;
+}
+
+int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx)
+{
+       int i;
+
+       for (i = PGID_GLAG_START; i <= PGID_GLAG_END; i++)
+               if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
+                       spx5->pgid_map[i] = SPX5_PGID_GLAG;
+                       *idx = i;
+                       return 0;
+               }
+
+       return -EBUSY;
+}
+
+int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx)
+{
+       int i;
+
+       for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) {
+               if (i == PGID_GLAG_START)
+                       i = PGID_GLAG_END + 1;
+
+               if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
+                       spx5->pgid_map[i] = SPX5_PGID_MULTICAST;
+                       *idx = i;
+                       return 0;
+               }
+       }
+
+       return -EBUSY;
+}
+
+int sparx5_pgid_free(struct sparx5 *spx5, u16 idx)
+{
+       if (idx <= PGID_CPU || idx >= PGID_TABLE_SIZE)
+               return -EINVAL;
+
+       if (spx5->pgid_map[idx] == SPX5_PGID_FREE)
+               return -EINVAL;
+
+       spx5->pgid_map[idx] = SPX5_PGID_FREE;
+       return 0;
+}
index cd110c3..0ed1ea7 100644 (file)
@@ -45,7 +45,7 @@ static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5)
                res =  920535763834;
                break;
        default:
-               WARN_ON("Invalid core clock");
+               WARN(1, "Invalid core clock");
                break;
        }
 
@@ -67,7 +67,7 @@ static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
                res = 0x0CC6666666666666;
                break;
        default:
-               WARN_ON("Invalid core clock");
+               WARN(1, "Invalid core clock");
                break;
        }
 
index dacb87f..2d8e0b8 100644 (file)
@@ -16,6 +16,7 @@ struct sparx5_switchdev_event_work {
        struct work_struct work;
        struct switchdev_notifier_fdb_info fdb_info;
        struct net_device *dev;
+       struct sparx5 *sparx5;
        unsigned long event;
 };
 
@@ -101,6 +102,11 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
                sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
                break;
        case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+               /* Used PVID 1 when default_pvid is 0, to avoid
+                * collision with non-bridged ports.
+                */
+               if (port->pvid == 0)
+                       port->pvid = 1;
                port->vlan_aware = attr->u.vlan_filtering;
                sparx5_vlan_port_apply(port->sparx5, port);
                break;
@@ -136,6 +142,9 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
        if (err)
                goto err_switchdev_offload;
 
+       /* Remove standalone port entry */
+       sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
+
        /* Port enters in bridge mode therefor don't need to copy to CPU
         * frames for multicast in case the bridge is not requesting them
         */
@@ -164,6 +173,9 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port,
        port->pvid = NULL_VID;
        port->vid = NULL_VID;
 
+       /* Forward frames to CPU */
+       sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
+
        /* Port enters in host more therefore restore mc list */
        __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
 }
@@ -247,31 +259,43 @@ static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
        struct switchdev_notifier_fdb_info *fdb_info;
        struct sparx5_port *port;
        struct sparx5 *sparx5;
+       bool host_addr;
+       u16 vid;
 
        rtnl_lock();
-       if (!sparx5_netdevice_check(dev))
-               goto out;
-
-       port = netdev_priv(dev);
-       sparx5 = port->sparx5;
+       if (!sparx5_netdevice_check(dev)) {
+               host_addr = true;
+               sparx5 = switchdev_work->sparx5;
+       } else {
+               host_addr = false;
+               sparx5 = switchdev_work->sparx5;
+               port = netdev_priv(dev);
+       }
 
        fdb_info = &switchdev_work->fdb_info;
 
+       /* Used PVID 1 when default_pvid is 0, to avoid
+        * collision with non-bridged ports.
+        */
+       if (fdb_info->vid == 0)
+               vid = 1;
+       else
+               vid = fdb_info->vid;
+
        switch (switchdev_work->event) {
        case SWITCHDEV_FDB_ADD_TO_DEVICE:
-               if (!fdb_info->added_by_user)
-                       break;
-               sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
-                                     fdb_info->vid);
+               if (host_addr)
+                       sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
+                                             fdb_info->addr, vid);
+               else
+                       sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
+                                             fdb_info->addr, vid);
                break;
        case SWITCHDEV_FDB_DEL_TO_DEVICE:
-               if (!fdb_info->added_by_user)
-                       break;
-               sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
+               sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
                break;
        }
 
-out:
        rtnl_unlock();
        kfree(switchdev_work->fdb_info.addr);
        kfree(switchdev_work);
@@ -283,15 +307,18 @@ static void sparx5_schedule_work(struct work_struct *work)
        queue_work(sparx5_owq, work);
 }
 
-static int sparx5_switchdev_event(struct notifier_block *unused,
+static int sparx5_switchdev_event(struct notifier_block *nb,
                                  unsigned long event, void *ptr)
 {
        struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
        struct sparx5_switchdev_event_work *switchdev_work;
        struct switchdev_notifier_fdb_info *fdb_info;
        struct switchdev_notifier_info *info = ptr;
+       struct sparx5 *spx5;
        int err;
 
+       spx5 = container_of(nb, struct sparx5, switchdev_nb);
+
        switch (event) {
        case SWITCHDEV_PORT_ATTR_SET:
                err = switchdev_handle_port_attr_set(dev, ptr,
@@ -307,6 +334,7 @@ static int sparx5_switchdev_event(struct notifier_block *unused,
 
                switchdev_work->dev = dev;
                switchdev_work->event = event;
+               switchdev_work->sparx5 = spx5;
 
                fdb_info = container_of(info,
                                        struct switchdev_notifier_fdb_info,
@@ -333,54 +361,6 @@ err_addr_alloc:
        return NOTIFY_BAD;
 }
 
-static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
-                                     struct sparx5_port *port,
-                                     u16 vid, bool add)
-{
-       if (!port ||
-           !test_bit(port->portno, sparx5->bridge_mask))
-               return; /* Skip null/host interfaces */
-
-       /* Bridge connects to vid? */
-       if (add) {
-               /* Add port MAC address from the VLAN */
-               sparx5_mact_learn(sparx5, PGID_CPU,
-                                 port->ndev->dev_addr, vid);
-       } else {
-               /* Control port addr visibility depending on
-                * port VLAN connectivity.
-                */
-               if (test_bit(port->portno, sparx5->vlan_mask[vid]))
-                       sparx5_mact_learn(sparx5, PGID_CPU,
-                                         port->ndev->dev_addr, vid);
-               else
-                       sparx5_mact_forget(sparx5,
-                                          port->ndev->dev_addr, vid);
-       }
-}
-
-static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
-                                       struct sparx5 *sparx5,
-                                       u16 vid, bool add)
-{
-       int i;
-
-       /* First, handle bridge address'es */
-       if (add) {
-               sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
-                                 vid);
-               sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
-                                 vid);
-       } else {
-               sparx5_mact_forget(sparx5, dev->dev_addr, vid);
-               sparx5_mact_forget(sparx5, dev->broadcast, vid);
-       }
-
-       /* Now look at bridged ports */
-       for (i = 0; i < SPX5_PORTS; i++)
-               sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
-}
-
 static int sparx5_handle_port_vlan_add(struct net_device *dev,
                                       struct notifier_block *nb,
                                       const struct switchdev_obj_port_vlan *v)
@@ -392,7 +372,9 @@ static int sparx5_handle_port_vlan_add(struct net_device *dev,
                        container_of(nb, struct sparx5,
                                     switchdev_blocking_nb);
 
-               sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
+               /* Flood broadcast to CPU */
+               sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
+                                 v->vid);
                return 0;
        }
 
@@ -404,6 +386,109 @@ static int sparx5_handle_port_vlan_add(struct net_device *dev,
                                  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
 }
 
+static int sparx5_handle_port_mdb_add(struct net_device *dev,
+                                     struct notifier_block *nb,
+                                     const struct switchdev_obj_port_mdb *v)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *spx5 = port->sparx5;
+       u16 pgid_idx, vid;
+       u32 mact_entry;
+       int res, err;
+
+       /* When VLAN unaware the vlan value is not parsed and we receive vid 0.
+        * Fall back to bridge vid 1.
+        */
+       if (!br_vlan_enabled(spx5->hw_bridge_dev))
+               vid = 1;
+       else
+               vid = v->vid;
+
+       res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
+
+       if (res) {
+               pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
+
+               /* MC_IDX has an offset of 65 in the PGID table. */
+               pgid_idx += PGID_MCAST_START;
+               sparx5_pgid_update_mask(port, pgid_idx, true);
+       } else {
+               err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
+               if (err) {
+                       netdev_warn(dev, "multicast pgid table full\n");
+                       return err;
+               }
+               sparx5_pgid_update_mask(port, pgid_idx, true);
+               err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
+               if (err) {
+                       netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
+                       sparx5_pgid_update_mask(port, pgid_idx, false);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static int sparx5_mdb_del_entry(struct net_device *dev,
+                               struct sparx5 *spx5,
+                               const unsigned char mac[ETH_ALEN],
+                               const u16 vid,
+                               u16 pgid_idx)
+{
+       int err;
+
+       err = sparx5_mact_forget(spx5, mac, vid);
+       if (err) {
+               netdev_warn(dev, "could not forget mac address %pM", mac);
+               return err;
+       }
+       err = sparx5_pgid_free(spx5, pgid_idx);
+       if (err) {
+               netdev_err(dev, "attempted to free already freed pgid\n");
+               return err;
+       }
+       return 0;
+}
+
+static int sparx5_handle_port_mdb_del(struct net_device *dev,
+                                     struct notifier_block *nb,
+                                     const struct switchdev_obj_port_mdb *v)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *spx5 = port->sparx5;
+       u16 pgid_idx, vid;
+       u32 mact_entry, res, pgid_entry[3];
+       int err;
+
+       if (!br_vlan_enabled(spx5->hw_bridge_dev))
+               vid = 1;
+       else
+               vid = v->vid;
+
+       res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
+
+       if (res) {
+               pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
+
+               /* MC_IDX has an offset of 65 in the PGID table. */
+               pgid_idx += PGID_MCAST_START;
+               sparx5_pgid_update_mask(port, pgid_idx, false);
+
+               pgid_entry[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid_idx));
+               pgid_entry[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid_idx));
+               pgid_entry[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid_idx));
+               if (pgid_entry[0] == 0 && pgid_entry[1] == 0 && pgid_entry[2] == 0) {
+                       /* No ports are in MC group. Remove entry */
+                       err = sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
 static int sparx5_handle_port_obj_add(struct net_device *dev,
                                      struct notifier_block *nb,
                                      struct switchdev_notifier_port_obj_info *info)
@@ -416,6 +501,10 @@ static int sparx5_handle_port_obj_add(struct net_device *dev,
                err = sparx5_handle_port_vlan_add(dev, nb,
                                                  SWITCHDEV_OBJ_PORT_VLAN(obj));
                break;
+       case SWITCHDEV_OBJ_ID_PORT_MDB:
+               err = sparx5_handle_port_mdb_add(dev, nb,
+                                                SWITCHDEV_OBJ_PORT_MDB(obj));
+               break;
        default:
                err = -EOPNOTSUPP;
                break;
@@ -438,7 +527,7 @@ static int sparx5_handle_port_vlan_del(struct net_device *dev,
                        container_of(nb, struct sparx5,
                                     switchdev_blocking_nb);
 
-               sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
+               sparx5_mact_forget(sparx5, dev->broadcast, vid);
                return 0;
        }
 
@@ -449,9 +538,6 @@ static int sparx5_handle_port_vlan_del(struct net_device *dev,
        if (ret)
                return ret;
 
-       /* Delete the port MAC address with the matching VLAN information */
-       sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
-
        return 0;
 }
 
@@ -467,6 +553,10 @@ static int sparx5_handle_port_obj_del(struct net_device *dev,
                err = sparx5_handle_port_vlan_del(dev, nb,
                                                  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
                break;
+       case SWITCHDEV_OBJ_ID_PORT_MDB:
+               err = sparx5_handle_port_mdb_del(dev, nb,
+                                                SWITCHDEV_OBJ_PORT_MDB(obj));
+               break;
        default:
                err = -EOPNOTSUPP;
                break;
index 2113412..e443bd8 100644 (file)
@@ -2907,6 +2907,198 @@ void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
 }
 EXPORT_SYMBOL(ocelot_port_bridge_flags);
 
+int ocelot_port_get_default_prio(struct ocelot *ocelot, int port)
+{
+       int val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+
+       return ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_default_prio);
+
+int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
+{
+       if (prio >= OCELOT_NUM_TC)
+               return -ERANGE;
+
+       ocelot_rmw_gix(ocelot,
+                      ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(prio),
+                      ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M,
+                      ANA_PORT_QOS_CFG,
+                      port);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
+
+int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp)
+{
+       int qos_cfg = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+       int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+
+       /* Return error if DSCP prioritization isn't enabled */
+       if (!(qos_cfg & ANA_PORT_QOS_CFG_QOS_DSCP_ENA))
+               return -EOPNOTSUPP;
+
+       if (qos_cfg & ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA) {
+               dscp = ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(dscp_cfg);
+               /* Re-read ANA_DSCP_CFG for the translated DSCP */
+               dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+       }
+
+       /* If the DSCP value is not trusted, the QoS classification falls back
+        * to VLAN PCP or port-based default.
+        */
+       if (!(dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA))
+               return -EOPNOTSUPP;
+
+       return ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_dscp_prio);
+
+int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
+{
+       int mask, val;
+
+       if (prio >= OCELOT_NUM_TC)
+               return -ERANGE;
+
+       /* There is at least one app table priority (this one), so we need to
+        * make sure DSCP prioritization is enabled on the port.
+        * Also make sure DSCP translation is disabled
+        * (dcbnl doesn't support it).
+        */
+       mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
+              ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
+
+       ocelot_rmw_gix(ocelot, ANA_PORT_QOS_CFG_QOS_DSCP_ENA, mask,
+                      ANA_PORT_QOS_CFG, port);
+
+       /* Trust this DSCP value and map it to the given QoS class */
+       val = ANA_DSCP_CFG_DSCP_TRUST_ENA | ANA_DSCP_CFG_QOS_DSCP_VAL(prio);
+
+       ocelot_write_rix(ocelot, val, ANA_DSCP_CFG, dscp);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_add_dscp_prio);
+
+int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
+{
+       int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+       int mask, i;
+
+       /* During a "dcb app replace" command, the new app table entry will be
+        * added first, then the old one will be deleted. But the hardware only
+        * supports one QoS class per DSCP value (duh), so if we blindly delete
+        * the app table entry for this DSCP value, we end up deleting the
+        * entry with the new priority. Avoid that by checking whether user
+        * space wants to delete the priority which is currently configured, or
+        * something else which is no longer current.
+        */
+       if (ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg) != prio)
+               return 0;
+
+       /* Untrust this DSCP value */
+       ocelot_write_rix(ocelot, 0, ANA_DSCP_CFG, dscp);
+
+       for (i = 0; i < 64; i++) {
+               int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, i);
+
+               /* There are still app table entries on the port, so we need to
+                * keep DSCP enabled, nothing to do.
+                */
+               if (dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA)
+                       return 0;
+       }
+
+       /* Disable DSCP QoS classification if there isn't any trusted
+        * DSCP value left.
+        */
+       mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
+              ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
+
+       ocelot_rmw_gix(ocelot, 0, mask, ANA_PORT_QOS_CFG, port);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_del_dscp_prio);
+
+struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
+                                       struct netlink_ext_ack *extack)
+{
+       struct ocelot_mirror *m = ocelot->mirror;
+
+       if (m) {
+               if (m->to != to) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Mirroring already configured towards different egress port");
+                       return ERR_PTR(-EBUSY);
+               }
+
+               refcount_inc(&m->refcount);
+               return m;
+       }
+
+       m = kzalloc(sizeof(*m), GFP_KERNEL);
+       if (!m)
+               return ERR_PTR(-ENOMEM);
+
+       m->to = to;
+       refcount_set(&m->refcount, 1);
+       ocelot->mirror = m;
+
+       /* Program the mirror port to hardware */
+       ocelot_write(ocelot, BIT(to), ANA_MIRRORPORTS);
+
+       return m;
+}
+
+void ocelot_mirror_put(struct ocelot *ocelot)
+{
+       struct ocelot_mirror *m = ocelot->mirror;
+
+       if (!refcount_dec_and_test(&m->refcount))
+               return;
+
+       ocelot_write(ocelot, 0, ANA_MIRRORPORTS);
+       ocelot->mirror = NULL;
+       kfree(m);
+}
+
+int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to,
+                          bool ingress, struct netlink_ext_ack *extack)
+{
+       struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack);
+
+       if (IS_ERR(m))
+               return PTR_ERR(m);
+
+       if (ingress) {
+               ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+                              ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+                              ANA_PORT_PORT_CFG, from);
+       } else {
+               ocelot_rmw(ocelot, BIT(from), BIT(from),
+                          ANA_EMIRRORPORTS);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mirror_add);
+
+void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress)
+{
+       if (ingress) {
+               ocelot_rmw_gix(ocelot, 0, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+                              ANA_PORT_PORT_CFG, from);
+       } else {
+               ocelot_rmw(ocelot, 0, BIT(from), ANA_EMIRRORPORTS);
+       }
+
+       ocelot_mirror_put(ocelot);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mirror_del);
+
 void ocelot_init_port(struct ocelot *ocelot, int port)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
index f8dc0d7..d0fa8ab 100644 (file)
@@ -38,7 +38,8 @@
 struct ocelot_port_tc {
        bool block_shared;
        unsigned long offload_cnt;
-
+       unsigned long ingress_mirred_id;
+       unsigned long egress_mirred_id;
        unsigned long police_id;
 };
 
@@ -111,6 +112,10 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
                    void (*populate)(struct ocelot_vcap_filter *f));
 int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie);
 
+struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
+                                       struct netlink_ext_ack *extack);
+void ocelot_mirror_put(struct ocelot *ocelot);
+
 extern struct notifier_block ocelot_netdevice_nb;
 extern struct notifier_block ocelot_switchdev_nb;
 extern struct notifier_block ocelot_switchdev_blocking_nb;
index b3f5418..03b5e59 100644 (file)
@@ -61,6 +61,12 @@ static int ocelot_chain_to_block(int chain, bool ingress)
  */
 static int ocelot_chain_to_lookup(int chain)
 {
+       /* Backwards compatibility with older, single-chain tc-flower
+        * offload support in Ocelot
+        */
+       if (chain == 0)
+               return 0;
+
        return (chain / VCAP_LOOKUP) % 10;
 }
 
@@ -69,7 +75,15 @@ static int ocelot_chain_to_lookup(int chain)
  */
 static int ocelot_chain_to_pag(int chain)
 {
-       int lookup = ocelot_chain_to_lookup(chain);
+       int lookup;
+
+       /* Backwards compatibility with older, single-chain tc-flower
+        * offload support in Ocelot
+        */
+       if (chain == 0)
+               return 0;
+
+       lookup = ocelot_chain_to_lookup(chain);
 
        /* calculate PAG value as chain index relative to the first PAG */
        return chain - VCAP_IS2_CHAIN(lookup, 0);
@@ -345,6 +359,27 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
                        filter->action.port_mask = BIT(egress_port);
                        filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
                        break;
+               case FLOW_ACTION_MIRRED:
+                       if (filter->block_id != VCAP_IS2) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Mirror action can only be offloaded to VCAP IS2");
+                               return -EOPNOTSUPP;
+                       }
+                       if (filter->goto_target != -1) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Last action must be GOTO");
+                               return -EOPNOTSUPP;
+                       }
+                       egress_port = ocelot->ops->netdev_to_port(a->dev);
+                       if (egress_port < 0) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Destination not an ocelot port");
+                               return -EOPNOTSUPP;
+                       }
+                       filter->egress_port.value = egress_port;
+                       filter->action.mirror_ena = true;
+                       filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+                       break;
                case FLOW_ACTION_VLAN_POP:
                        if (filter->block_id != VCAP_IS1) {
                                NL_SET_ERR_MSG_MOD(extack,
index 5767e38..247bc10 100644 (file)
@@ -20,6 +20,8 @@
 
 #define OCELOT_MAC_QUIRKS      OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
 
+static bool ocelot_netdevice_dev_check(const struct net_device *dev);
+
 static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
 {
        return devlink_priv(dlp->devlink);
@@ -216,14 +218,14 @@ int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
        }
 }
 
-static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
-                                       struct tc_cls_matchall_offload *f,
-                                       bool ingress)
+static int ocelot_setup_tc_cls_matchall_police(struct ocelot_port_private *priv,
+                                              struct tc_cls_matchall_offload *f,
+                                              bool ingress,
+                                              struct netlink_ext_ack *extack)
 {
-       struct netlink_ext_ack *extack = f->common.extack;
+       struct flow_action_entry *action = &f->rule->action.entries[0];
        struct ocelot *ocelot = priv->port.ocelot;
        struct ocelot_policer pol = { 0 };
-       struct flow_action_entry *action;
        int port = priv->chip_port;
        int err;
 
@@ -232,6 +234,119 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
                return -EOPNOTSUPP;
        }
 
+       if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Only one policer per port is supported");
+               return -EEXIST;
+       }
+
+       err = ocelot_policer_validate(&f->rule->action, action, extack);
+       if (err)
+               return err;
+
+       pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
+       pol.burst = action->police.burst;
+
+       err = ocelot_port_policer_add(ocelot, port, &pol);
+       if (err) {
+               NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
+               return err;
+       }
+
+       priv->tc.police_id = f->cookie;
+       priv->tc.offload_cnt++;
+
+       return 0;
+}
+
+static int ocelot_setup_tc_cls_matchall_mirred(struct ocelot_port_private *priv,
+                                              struct tc_cls_matchall_offload *f,
+                                              bool ingress,
+                                              struct netlink_ext_ack *extack)
+{
+       struct flow_action *action = &f->rule->action;
+       struct ocelot *ocelot = priv->port.ocelot;
+       struct ocelot_port_private *other_priv;
+       const struct flow_action_entry *a;
+       int err;
+
+       if (f->common.protocol != htons(ETH_P_ALL))
+               return -EOPNOTSUPP;
+
+       if (!flow_action_basic_hw_stats_check(action, extack))
+               return -EOPNOTSUPP;
+
+       a = &action->entries[0];
+       if (!a->dev)
+               return -EINVAL;
+
+       if (!ocelot_netdevice_dev_check(a->dev)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Destination not an ocelot port");
+               return -EOPNOTSUPP;
+       }
+
+       other_priv = netdev_priv(a->dev);
+
+       err = ocelot_port_mirror_add(ocelot, priv->chip_port,
+                                    other_priv->chip_port, ingress, extack);
+       if (err)
+               return err;
+
+       if (ingress)
+               priv->tc.ingress_mirred_id = f->cookie;
+       else
+               priv->tc.egress_mirred_id = f->cookie;
+       priv->tc.offload_cnt++;
+
+       return 0;
+}
+
+static int ocelot_del_tc_cls_matchall_police(struct ocelot_port_private *priv,
+                                            struct netlink_ext_ack *extack)
+{
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+       int err;
+
+       err = ocelot_port_policer_del(ocelot, port);
+       if (err) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Could not delete policer");
+               return err;
+       }
+
+       priv->tc.police_id = 0;
+       priv->tc.offload_cnt--;
+
+       return 0;
+}
+
+static int ocelot_del_tc_cls_matchall_mirred(struct ocelot_port_private *priv,
+                                            bool ingress,
+                                            struct netlink_ext_ack *extack)
+{
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       ocelot_port_mirror_del(ocelot, port, ingress);
+
+       if (ingress)
+               priv->tc.ingress_mirred_id = 0;
+       else
+               priv->tc.egress_mirred_id = 0;
+       priv->tc.offload_cnt--;
+
+       return 0;
+}
+
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
+                                       struct tc_cls_matchall_offload *f,
+                                       bool ingress)
+{
+       struct netlink_ext_ack *extack = f->common.extack;
+       struct flow_action_entry *action;
+
        switch (f->command) {
        case TC_CLSMATCHALL_REPLACE:
                if (!flow_offload_has_one_action(&f->rule->action)) {
@@ -242,53 +357,41 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
 
                if (priv->tc.block_shared) {
                        NL_SET_ERR_MSG_MOD(extack,
-                                          "Rate limit is not supported on shared blocks");
+                                          "Matchall offloads not supported on shared blocks");
                        return -EOPNOTSUPP;
                }
 
                action = &f->rule->action.entries[0];
 
-               if (action->id != FLOW_ACTION_POLICE) {
+               switch (action->id) {
+               case FLOW_ACTION_POLICE:
+                       return ocelot_setup_tc_cls_matchall_police(priv, f,
+                                                                  ingress,
+                                                                  extack);
+                       break;
+               case FLOW_ACTION_MIRRED:
+                       return ocelot_setup_tc_cls_matchall_mirred(priv, f,
+                                                                  ingress,
+                                                                  extack);
+               default:
                        NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
                        return -EOPNOTSUPP;
                }
 
-               if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
-                       NL_SET_ERR_MSG_MOD(extack,
-                                          "Only one policer per port is supported");
-                       return -EEXIST;
-               }
-
-               err = ocelot_policer_validate(&f->rule->action, action,
-                                             extack);
-               if (err)
-                       return err;
-
-               pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
-               pol.burst = action->police.burst;
-
-               err = ocelot_port_policer_add(ocelot, port, &pol);
-               if (err) {
-                       NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
-                       return err;
-               }
-
-               priv->tc.police_id = f->cookie;
-               priv->tc.offload_cnt++;
-               return 0;
+               break;
        case TC_CLSMATCHALL_DESTROY:
-               if (priv->tc.police_id != f->cookie)
+               action = &f->rule->action.entries[0];
+
+               if (f->cookie == priv->tc.police_id)
+                       return ocelot_del_tc_cls_matchall_police(priv, extack);
+               else if (f->cookie == priv->tc.ingress_mirred_id ||
+                        f->cookie == priv->tc.egress_mirred_id)
+                       return ocelot_del_tc_cls_matchall_mirred(priv, ingress,
+                                                                extack);
+               else
                        return -ENOENT;
 
-               err = ocelot_port_policer_del(ocelot, port);
-               if (err) {
-                       NL_SET_ERR_MSG_MOD(extack,
-                                          "Could not delete policer");
-                       return err;
-               }
-               priv->tc.police_id = 0;
-               priv->tc.offload_cnt--;
-               return 0;
+               break;
        case TC_CLSMATCHALL_STATS:
        default:
                return -EOPNOTSUPP;
index b976d48..c8701ac 100644 (file)
@@ -335,6 +335,7 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
 
        vcap_action_set(vcap, data, VCAP_IS2_ACT_MASK_MODE, a->mask_mode);
        vcap_action_set(vcap, data, VCAP_IS2_ACT_PORT_MASK, a->port_mask);
+       vcap_action_set(vcap, data, VCAP_IS2_ACT_MIRROR_ENA, a->mirror_ena);
        vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_ENA, a->police_ena);
        vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_IDX, a->pol_ix);
        vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_QU_NUM, a->cpu_qu_num);
@@ -955,14 +956,21 @@ int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix)
 }
 EXPORT_SYMBOL(ocelot_vcap_policer_del);
 
-static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
-                                          struct ocelot_vcap_block *block,
-                                          struct ocelot_vcap_filter *filter)
+static int
+ocelot_vcap_filter_add_aux_resources(struct ocelot *ocelot,
+                                    struct ocelot_vcap_filter *filter,
+                                    struct netlink_ext_ack *extack)
 {
-       struct ocelot_vcap_filter *tmp;
-       struct list_head *pos, *n;
+       struct ocelot_mirror *m;
        int ret;
 
+       if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena) {
+               m = ocelot_mirror_get(ocelot, filter->egress_port.value,
+                                     extack);
+               if (IS_ERR(m))
+                       return PTR_ERR(m);
+       }
+
        if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
                ret = ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
                                              &filter->action.pol);
@@ -970,6 +978,33 @@ static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
                        return ret;
        }
 
+       return 0;
+}
+
+static void
+ocelot_vcap_filter_del_aux_resources(struct ocelot *ocelot,
+                                    struct ocelot_vcap_filter *filter)
+{
+       if (filter->block_id == VCAP_IS2 && filter->action.police_ena)
+               ocelot_vcap_policer_del(ocelot, filter->action.pol_ix);
+
+       if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena)
+               ocelot_mirror_put(ocelot);
+}
+
+static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
+                                          struct ocelot_vcap_block *block,
+                                          struct ocelot_vcap_filter *filter,
+                                          struct netlink_ext_ack *extack)
+{
+       struct ocelot_vcap_filter *tmp;
+       struct list_head *pos, *n;
+       int ret;
+
+       ret = ocelot_vcap_filter_add_aux_resources(ocelot, filter, extack);
+       if (ret)
+               return ret;
+
        block->count++;
 
        if (list_empty(&block->rules)) {
@@ -1168,7 +1203,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
        }
 
        /* Add filter to the linked list */
-       ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+       ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter, extack);
        if (ret)
                return ret;
 
@@ -1199,11 +1234,7 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
 
        list_for_each_entry_safe(tmp, n, &block->rules, list) {
                if (ocelot_vcap_filter_equal(filter, tmp)) {
-                       if (tmp->block_id == VCAP_IS2 &&
-                           tmp->action.police_ena)
-                               ocelot_vcap_policer_del(ocelot,
-                                                       tmp->action.pol_ix);
-
+                       ocelot_vcap_filter_del_aux_resources(ocelot, tmp);
                        list_del(&tmp->list);
                        kfree(tmp);
                }
index 9c72b43..9c0861d 100644 (file)
@@ -5,6 +5,7 @@ nfp-objs := \
            nfpcore/nfp6000_pcie.o \
            nfpcore/nfp_cppcore.o \
            nfpcore/nfp_cpplib.o \
+           nfpcore/nfp_dev.o \
            nfpcore/nfp_hwinfo.o \
            nfpcore/nfp_mip.o \
            nfpcore/nfp_mutex.o \
@@ -19,12 +20,18 @@ nfp-objs := \
            ccm_mbox.o \
            devlink_param.o \
            nfp_asm.o \
+           nfd3/dp.o \
+           nfd3/rings.o \
+           nfd3/xsk.o \
+           nfdk/dp.o \
+           nfdk/rings.o \
            nfp_app.o \
            nfp_app_nic.o \
            nfp_devlink.o \
            nfp_hwmon.o \
            nfp_main.o \
            nfp_net_common.o \
+           nfp_net_dp.o \
            nfp_net_ctrl.o \
            nfp_net_debugdump.o \
            nfp_net_ethtool.o \
index 2c40a39..1b9421e 100644 (file)
@@ -956,7 +956,7 @@ nfp_flower_meter_action(struct nfp_app *app,
        meter_id = action->hw_index;
        if (!nfp_flower_search_meter_entry(app, meter_id)) {
                NL_SET_ERR_MSG_MOD(extack,
-                                  "can not offload flow table with unsupported police action.\n");
+                                  "can not offload flow table with unsupported police action.");
                return -EOPNOTSUPP;
        }
 
index ac1dcfa..4d960a9 100644 (file)
@@ -266,7 +266,7 @@ nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
        int i, err, count = 0;
 
        reprs = rcu_dereference_protected(app->reprs[type],
-                                         lockdep_is_held(&app->pf->lock));
+                                         nfp_app_is_locked(app));
        if (!reprs)
                return 0;
 
@@ -295,7 +295,7 @@ nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
        if (!tot_repl)
                return 0;
 
-       lockdep_assert_held(&app->pf->lock);
+       assert_nfp_app_locked(app);
        if (!wait_event_timeout(priv->reify_wait_queue,
                                atomic_read(replies) >= tot_repl,
                                NFP_FL_REPLY_TIMEOUT)) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
new file mode 100644 (file)
index 0000000..7db56ab
--- /dev/null
@@ -0,0 +1,1350 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "../crypto/crypto.h"
+#include "../crypto/fw.h"
+#include "nfd3.h"
+
+/* Transmit processing
+ *
+ * One queue controller peripheral queue is used for transmit.  The
+ * driver en-queues packets for transmit by advancing the write
+ * pointer.  The device indicates that packets have transmitted by
+ * advancing the read pointer.  The driver maintains a local copy of
+ * the read and write pointer in @struct nfp_net_tx_ring.  The driver
+ * keeps @wr_p in sync with the queue controller write pointer and can
+ * determine how many packets have been transmitted by comparing its
+ * copy of the read pointer @rd_p with the read pointer maintained by
+ * the queue controller peripheral.
+ */
+
+/* Wrappers for deciding when to stop and restart TX queues */
+static int nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
+{
+       return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
+}
+
+static int nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
+{
+       return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
+}
+
+/**
+ * nfp_nfd3_tx_ring_stop() - stop tx ring
+ * @nd_q:    netdev queue
+ * @tx_ring: driver tx queue structure
+ *
+ * Safely stop TX ring.  Remember that while we are running .start_xmit()
+ * someone else may be cleaning the TX ring completions so we need to be
+ * extra careful here.
+ */
+static void
+nfp_nfd3_tx_ring_stop(struct netdev_queue *nd_q,
+                     struct nfp_net_tx_ring *tx_ring)
+{
+       netif_tx_stop_queue(nd_q);
+
+       /* We can race with the TX completion out of NAPI so recheck */
+       smp_mb();
+       if (unlikely(nfp_nfd3_tx_ring_should_wake(tx_ring)))
+               netif_tx_start_queue(nd_q);
+}
+
+/**
+ * nfp_nfd3_tx_tso() - Set up Tx descriptor for LSO
+ * @r_vec: per-ring structure
+ * @txbuf: Pointer to driver soft TX descriptor
+ * @txd: Pointer to HW TX descriptor
+ * @skb: Pointer to SKB
+ * @md_bytes: Prepend length
+ *
+ * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
+ * Return error on packet header greater than maximum supported LSO header size.
+ */
+static void
+nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
+               struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb, u32 md_bytes)
+{
+       u32 l3_offset, l4_offset, hdrlen;
+       u16 mss;
+
+       if (!skb_is_gso(skb))
+               return;
+
+       if (!skb->encapsulation) {
+               l3_offset = skb_network_offset(skb);
+               l4_offset = skb_transport_offset(skb);
+               hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       } else {
+               l3_offset = skb_inner_network_offset(skb);
+               l4_offset = skb_inner_transport_offset(skb);
+               hdrlen = skb_inner_transport_header(skb) - skb->data +
+                       inner_tcp_hdrlen(skb);
+       }
+
+       txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
+       txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
+
+       mss = skb_shinfo(skb)->gso_size & NFD3_DESC_TX_MSS_MASK;
+       txd->l3_offset = l3_offset - md_bytes;
+       txd->l4_offset = l4_offset - md_bytes;
+       txd->lso_hdrlen = hdrlen - md_bytes;
+       txd->mss = cpu_to_le16(mss);
+       txd->flags |= NFD3_DESC_TX_LSO;
+
+       u64_stats_update_begin(&r_vec->tx_sync);
+       r_vec->tx_lso++;
+       u64_stats_update_end(&r_vec->tx_sync);
+}
+
+/**
+ * nfp_nfd3_tx_csum() - Set TX CSUM offload flags in TX descriptor
+ * @dp:  NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @txbuf: Pointer to driver soft TX descriptor
+ * @txd: Pointer to TX descriptor
+ * @skb: Pointer to SKB
+ *
+ * This function sets the TX checksum flags in the TX descriptor based
+ * on the configuration and the protocol of the packet to be transmitted.
+ */
+static void
+nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+                struct nfp_nfd3_tx_buf *txbuf, struct nfp_nfd3_tx_desc *txd,
+                struct sk_buff *skb)
+{
+       struct ipv6hdr *ipv6h;
+       struct iphdr *iph;
+       u8 l4_hdr;
+
+       if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+               return;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return;
+
+       txd->flags |= NFD3_DESC_TX_CSUM;
+       if (skb->encapsulation)
+               txd->flags |= NFD3_DESC_TX_ENCAP;
+
+       iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+       if (iph->version == 4) {
+               txd->flags |= NFD3_DESC_TX_IP4_CSUM;
+               l4_hdr = iph->protocol;
+       } else if (ipv6h->version == 6) {
+               l4_hdr = ipv6h->nexthdr;
+       } else {
+               nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
+               return;
+       }
+
+       switch (l4_hdr) {
+       case IPPROTO_TCP:
+               txd->flags |= NFD3_DESC_TX_TCP_CSUM;
+               break;
+       case IPPROTO_UDP:
+               txd->flags |= NFD3_DESC_TX_UDP_CSUM;
+               break;
+       default:
+               nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
+               return;
+       }
+
+       u64_stats_update_begin(&r_vec->tx_sync);
+       if (skb->encapsulation)
+               r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
+       else
+               r_vec->hw_csum_tx += txbuf->pkt_cnt;
+       u64_stats_update_end(&r_vec->tx_sync);
+}
+
+static int nfp_nfd3_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
+{
+       struct metadata_dst *md_dst = skb_metadata_dst(skb);
+       unsigned char *data;
+       u32 meta_id = 0;
+       int md_bytes;
+
+       if (likely(!md_dst && !tls_handle))
+               return 0;
+       if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
+               if (!tls_handle)
+                       return 0;
+               md_dst = NULL;
+       }
+
+       md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
+
+       if (unlikely(skb_cow_head(skb, md_bytes)))
+               return -ENOMEM;
+
+       meta_id = 0;
+       data = skb_push(skb, md_bytes) + md_bytes;
+       if (md_dst) {
+               data -= 4;
+               put_unaligned_be32(md_dst->u.port_info.port_id, data);
+               meta_id = NFP_NET_META_PORTID;
+       }
+       if (tls_handle) {
+               /* conn handle is opaque, we just use u64 to be able to quickly
+                * compare it to zero
+                */
+               data -= 8;
+               memcpy(data, &tls_handle, sizeof(tls_handle));
+               meta_id <<= NFP_NET_META_FIELD_SIZE;
+               meta_id |= NFP_NET_META_CONN_HANDLE;
+       }
+
+       data -= 4;
+       put_unaligned_be32(meta_id, data);
+
+       return md_bytes;
+}
+
+/**
+ * nfp_nfd3_tx() - Main transmit entry point
+ * @skb:    SKB to transmit
+ * @netdev: netdev structure
+ *
+ * Return: NETDEV_TX_OK on success.
+ */
+netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct nfp_net *nn = netdev_priv(netdev);
+       int f, nr_frags, wr_idx, md_bytes;
+       struct nfp_net_tx_ring *tx_ring;
+       struct nfp_net_r_vector *r_vec;
+       struct nfp_nfd3_tx_buf *txbuf;
+       struct nfp_nfd3_tx_desc *txd;
+       struct netdev_queue *nd_q;
+       const skb_frag_t *frag;
+       struct nfp_net_dp *dp;
+       dma_addr_t dma_addr;
+       unsigned int fsize;
+       u64 tls_handle = 0;
+       u16 qidx;
+
+       dp = &nn->dp;
+       qidx = skb_get_queue_mapping(skb);
+       tx_ring = &dp->tx_rings[qidx];
+       r_vec = tx_ring->r_vec;
+
+       nr_frags = skb_shinfo(skb)->nr_frags;
+
+       if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
+               nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+                          qidx, tx_ring->wr_p, tx_ring->rd_p);
+               nd_q = netdev_get_tx_queue(dp->netdev, qidx);
+               netif_tx_stop_queue(nd_q);
+               nfp_net_tx_xmit_more_flush(tx_ring);
+               u64_stats_update_begin(&r_vec->tx_sync);
+               r_vec->tx_busy++;
+               u64_stats_update_end(&r_vec->tx_sync);
+               return NETDEV_TX_BUSY;
+       }
+
+       skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
+       if (unlikely(!skb)) {
+               nfp_net_tx_xmit_more_flush(tx_ring);
+               return NETDEV_TX_OK;
+       }
+
+       md_bytes = nfp_nfd3_prep_tx_meta(skb, tls_handle);
+       if (unlikely(md_bytes < 0))
+               goto err_flush;
+
+       /* Start with the head skbuf */
+       dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(dp->dev, dma_addr))
+               goto err_dma_err;
+
+       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+       /* Stash the soft descriptor of the head then initialize it */
+       txbuf = &tx_ring->txbufs[wr_idx];
+       txbuf->skb = skb;
+       txbuf->dma_addr = dma_addr;
+       txbuf->fidx = -1;
+       txbuf->pkt_cnt = 1;
+       txbuf->real_len = skb->len;
+
+       /* Build TX descriptor */
+       txd = &tx_ring->txds[wr_idx];
+       txd->offset_eop = (nr_frags ? 0 : NFD3_DESC_TX_EOP) | md_bytes;
+       txd->dma_len = cpu_to_le16(skb_headlen(skb));
+       nfp_desc_set_dma_addr(txd, dma_addr);
+       txd->data_len = cpu_to_le16(skb->len);
+
+       txd->flags = 0;
+       txd->mss = 0;
+       txd->lso_hdrlen = 0;
+
+       /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
+       nfp_nfd3_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
+       nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
+       if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
+               txd->flags |= NFD3_DESC_TX_VLAN;
+               txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+       }
+
+       /* Gather DMA */
+       if (nr_frags > 0) {
+               __le64 second_half;
+
+               /* all descs must match except for in addr, length and eop */
+               second_half = txd->vals8[1];
+
+               for (f = 0; f < nr_frags; f++) {
+                       frag = &skb_shinfo(skb)->frags[f];
+                       fsize = skb_frag_size(frag);
+
+                       dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
+                                                   fsize, DMA_TO_DEVICE);
+                       if (dma_mapping_error(dp->dev, dma_addr))
+                               goto err_unmap;
+
+                       wr_idx = D_IDX(tx_ring, wr_idx + 1);
+                       tx_ring->txbufs[wr_idx].skb = skb;
+                       tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
+                       tx_ring->txbufs[wr_idx].fidx = f;
+
+                       txd = &tx_ring->txds[wr_idx];
+                       txd->dma_len = cpu_to_le16(fsize);
+                       nfp_desc_set_dma_addr(txd, dma_addr);
+                       txd->offset_eop = md_bytes |
+                               ((f == nr_frags - 1) ? NFD3_DESC_TX_EOP : 0);
+                       txd->vals8[1] = second_half;
+               }
+
+               u64_stats_update_begin(&r_vec->tx_sync);
+               r_vec->tx_gather++;
+               u64_stats_update_end(&r_vec->tx_sync);
+       }
+
+       skb_tx_timestamp(skb);
+
+       nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+
+       tx_ring->wr_p += nr_frags + 1;
+       if (nfp_nfd3_tx_ring_should_stop(tx_ring))
+               nfp_nfd3_tx_ring_stop(nd_q, tx_ring);
+
+       tx_ring->wr_ptr_add += nr_frags + 1;
+       if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
+               nfp_net_tx_xmit_more_flush(tx_ring);
+
+       return NETDEV_TX_OK;
+
+err_unmap:
+       while (--f >= 0) {
+               frag = &skb_shinfo(skb)->frags[f];
+               dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
+                              skb_frag_size(frag), DMA_TO_DEVICE);
+               tx_ring->txbufs[wr_idx].skb = NULL;
+               tx_ring->txbufs[wr_idx].dma_addr = 0;
+               tx_ring->txbufs[wr_idx].fidx = -2;
+               wr_idx = wr_idx - 1;
+               if (wr_idx < 0)
+                       wr_idx += tx_ring->cnt;
+       }
+       dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
+                        skb_headlen(skb), DMA_TO_DEVICE);
+       tx_ring->txbufs[wr_idx].skb = NULL;
+       tx_ring->txbufs[wr_idx].dma_addr = 0;
+       tx_ring->txbufs[wr_idx].fidx = -2;
+err_dma_err:
+       nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
+       nfp_net_tx_xmit_more_flush(tx_ring);
+       u64_stats_update_begin(&r_vec->tx_sync);
+       r_vec->tx_errors++;
+       u64_stats_update_end(&r_vec->tx_sync);
+       nfp_net_tls_tx_undo(skb, tls_handle);
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+/**
+ * nfp_nfd3_tx_complete() - Handled completed TX packets
+ * @tx_ring:   TX ring structure
+ * @budget:    NAPI budget (only used as bool to determine if in NAPI context)
+ */
+void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
+{
+       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+       u32 done_pkts = 0, done_bytes = 0;
+       struct netdev_queue *nd_q;
+       u32 qcp_rd_p;
+       int todo;
+
+       if (tx_ring->wr_p == tx_ring->rd_p)
+               return;
+
+       /* Work out how many descriptors have been transmitted */
+       qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+       if (qcp_rd_p == tx_ring->qcp_rd_p)
+               return;
+
+       todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+       while (todo--) {
+               const skb_frag_t *frag;
+               struct nfp_nfd3_tx_buf *tx_buf;
+               struct sk_buff *skb;
+               int fidx, nr_frags;
+               int idx;
+
+               idx = D_IDX(tx_ring, tx_ring->rd_p++);
+               tx_buf = &tx_ring->txbufs[idx];
+
+               skb = tx_buf->skb;
+               if (!skb)
+                       continue;
+
+               nr_frags = skb_shinfo(skb)->nr_frags;
+               fidx = tx_buf->fidx;
+
+               if (fidx == -1) {
+                       /* unmap head */
+                       dma_unmap_single(dp->dev, tx_buf->dma_addr,
+                                        skb_headlen(skb), DMA_TO_DEVICE);
+
+                       done_pkts += tx_buf->pkt_cnt;
+                       done_bytes += tx_buf->real_len;
+               } else {
+                       /* unmap fragment */
+                       frag = &skb_shinfo(skb)->frags[fidx];
+                       dma_unmap_page(dp->dev, tx_buf->dma_addr,
+                                      skb_frag_size(frag), DMA_TO_DEVICE);
+               }
+
+               /* check for last gather fragment */
+               if (fidx == nr_frags - 1)
+                       napi_consume_skb(skb, budget);
+
+               tx_buf->dma_addr = 0;
+               tx_buf->skb = NULL;
+               tx_buf->fidx = -2;
+       }
+
+       tx_ring->qcp_rd_p = qcp_rd_p;
+
+       u64_stats_update_begin(&r_vec->tx_sync);
+       r_vec->tx_bytes += done_bytes;
+       r_vec->tx_pkts += done_pkts;
+       u64_stats_update_end(&r_vec->tx_sync);
+
+       if (!dp->netdev)
+               return;
+
+       nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+       netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
+       if (nfp_nfd3_tx_ring_should_wake(tx_ring)) {
+               /* Make sure TX thread will see updated tx_ring->rd_p */
+               smp_mb();
+
+               if (unlikely(netif_tx_queue_stopped(nd_q)))
+                       netif_tx_wake_queue(nd_q);
+       }
+
+       WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+                 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+                 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+}
+
+static bool nfp_nfd3_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+{
+       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+       u32 done_pkts = 0, done_bytes = 0;
+       bool done_all;
+       int idx, todo;
+       u32 qcp_rd_p;
+
+       /* Work out how many descriptors have been transmitted */
+       qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+       if (qcp_rd_p == tx_ring->qcp_rd_p)
+               return true;
+
+       todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+       done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+       todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+       tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
+
+       done_pkts = todo;
+       while (todo--) {
+               idx = D_IDX(tx_ring, tx_ring->rd_p);
+               tx_ring->rd_p++;
+
+               done_bytes += tx_ring->txbufs[idx].real_len;
+       }
+
+       u64_stats_update_begin(&r_vec->tx_sync);
+       r_vec->tx_bytes += done_bytes;
+       r_vec->tx_pkts += done_pkts;
+       u64_stats_update_end(&r_vec->tx_sync);
+
+       WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+                 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+                 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+       return done_all;
+}
+
+/* Receive processing
+ */
+
+static void *
+nfp_nfd3_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+       void *frag;
+
+       if (!dp->xdp_prog) {
+               frag = napi_alloc_frag(dp->fl_bufsz);
+               if (unlikely(!frag))
+                       return NULL;
+       } else {
+               struct page *page;
+
+               page = dev_alloc_page();
+               if (unlikely(!page))
+                       return NULL;
+               frag = page_address(page);
+       }
+
+       *dma_addr = nfp_net_dma_map_rx(dp, frag);
+       if (dma_mapping_error(dp->dev, *dma_addr)) {
+               nfp_net_free_frag(frag, dp->xdp_prog);
+               nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+               return NULL;
+       }
+
+       return frag;
+}
+
+/**
+ * nfp_nfd3_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp:                NFP Net data path struct
+ * @rx_ring:   RX ring structure
+ * @frag:      page fragment buffer
+ * @dma_addr:  DMA address of skb mapping
+ */
+static void
+nfp_nfd3_rx_give_one(const struct nfp_net_dp *dp,
+                    struct nfp_net_rx_ring *rx_ring,
+                    void *frag, dma_addr_t dma_addr)
+{
+       unsigned int wr_idx;
+
+       wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+       nfp_net_dma_sync_dev_rx(dp, dma_addr);
+
+       /* Stash SKB and DMA address away */
+       rx_ring->rxbufs[wr_idx].frag = frag;
+       rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
+
+       /* Fill freelist descriptor */
+       rx_ring->rxds[wr_idx].fld.reserved = 0;
+       rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
+       nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+                             dma_addr + dp->rx_dma_off);
+
+       rx_ring->wr_p++;
+       if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
+               /* Update write pointer of the freelist queue. Make
+                * sure all writes are flushed before telling the hardware.
+                */
+               wmb();
+               nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
+       }
+}
+
+/**
+ * nfp_nfd3_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp:             NFP Net data path struct
+ * @rx_ring: RX ring to fill
+ */
+void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+                                   struct nfp_net_rx_ring *rx_ring)
+{
+       unsigned int i;
+
+       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+               return nfp_net_xsk_rx_ring_fill_freelist(rx_ring);
+
+       for (i = 0; i < rx_ring->cnt - 1; i++)
+               nfp_nfd3_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
+                                    rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
+ * nfp_nfd3_rx_csum_has_errors() - group check if rxd has any csum errors
+ * @flags: RX descriptor flags field in CPU byte order
+ */
+static int nfp_nfd3_rx_csum_has_errors(u16 flags)
+{
+       u16 csum_all_checked, csum_all_ok;
+
+       csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
+       csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
+
+       return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
+}
+
+/**
+ * nfp_nfd3_rx_csum() - set SKB checksum field based on RX descriptor flags
+ * @dp:  NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
+ * @skb: Pointer to SKB
+ */
+void
+nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+                const struct nfp_net_rx_desc *rxd,
+                const struct nfp_meta_parsed *meta, struct sk_buff *skb)
+{
+       skb_checksum_none_assert(skb);
+
+       if (!(dp->netdev->features & NETIF_F_RXCSUM))
+               return;
+
+       if (meta->csum_type) {
+               skb->ip_summed = meta->csum_type;
+               skb->csum = meta->csum;
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->hw_csum_rx_complete++;
+               u64_stats_update_end(&r_vec->rx_sync);
+               return;
+       }
+
+       if (nfp_nfd3_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->hw_csum_rx_error++;
+               u64_stats_update_end(&r_vec->rx_sync);
+               return;
+       }
+
+       /* Assume that the firmware will never report inner CSUM_OK unless outer
+        * L4 headers were successfully parsed. FW will always report zero UDP
+        * checksum as CSUM_OK.
+        */
+       if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
+           rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
+               __skb_incr_checksum_unnecessary(skb);
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->hw_csum_rx_ok++;
+               u64_stats_update_end(&r_vec->rx_sync);
+       }
+
+       if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
+           rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
+               __skb_incr_checksum_unnecessary(skb);
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->hw_csum_rx_inner_ok++;
+               u64_stats_update_end(&r_vec->rx_sync);
+       }
+}
+
+static void
+nfp_nfd3_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+                 unsigned int type, __be32 *hash)
+{
+       if (!(netdev->features & NETIF_F_RXHASH))
+               return;
+
+       switch (type) {
+       case NFP_NET_RSS_IPV4:
+       case NFP_NET_RSS_IPV6:
+       case NFP_NET_RSS_IPV6_EX:
+               meta->hash_type = PKT_HASH_TYPE_L3;
+               break;
+       default:
+               meta->hash_type = PKT_HASH_TYPE_L4;
+               break;
+       }
+
+       meta->hash = get_unaligned_be32(hash);
+}
+
+static void
+nfp_nfd3_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
+                      void *data, struct nfp_net_rx_desc *rxd)
+{
+       struct nfp_net_rx_hash *rx_hash = data;
+
+       if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+               return;
+
+       nfp_nfd3_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
+                         &rx_hash->hash);
+}
+
+bool
+nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+                   void *data, void *pkt, unsigned int pkt_len, int meta_len)
+{
+       u32 meta_info;
+
+       meta_info = get_unaligned_be32(data);
+       data += 4;
+
+       while (meta_info) {
+               switch (meta_info & NFP_NET_META_FIELD_MASK) {
+               case NFP_NET_META_HASH:
+                       meta_info >>= NFP_NET_META_FIELD_SIZE;
+                       nfp_nfd3_set_hash(netdev, meta,
+                                         meta_info & NFP_NET_META_FIELD_MASK,
+                                         (__be32 *)data);
+                       data += 4;
+                       break;
+               case NFP_NET_META_MARK:
+                       meta->mark = get_unaligned_be32(data);
+                       data += 4;
+                       break;
+               case NFP_NET_META_PORTID:
+                       meta->portid = get_unaligned_be32(data);
+                       data += 4;
+                       break;
+               case NFP_NET_META_CSUM:
+                       meta->csum_type = CHECKSUM_COMPLETE;
+                       meta->csum =
+                               (__force __wsum)__get_unaligned_cpu32(data);
+                       data += 4;
+                       break;
+               case NFP_NET_META_RESYNC_INFO:
+                       if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+                                                     pkt_len))
+                               return false;
+                       data += sizeof(struct nfp_net_tls_resync_req);
+                       break;
+               default:
+                       return true;
+               }
+
+               meta_info >>= NFP_NET_META_FIELD_SIZE;
+       }
+
+       return data != pkt;
+}
+
+static void
+nfp_nfd3_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+                struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+                struct sk_buff *skb)
+{
+       u64_stats_update_begin(&r_vec->rx_sync);
+       r_vec->rx_drops++;
+       /* If we have both skb and rxbuf the replacement buffer allocation
+        * must have failed, count this as an alloc failure.
+        */
+       if (skb && rxbuf)
+               r_vec->rx_replace_buf_alloc_fail++;
+       u64_stats_update_end(&r_vec->rx_sync);
+
+       /* skb is build based on the frag, free_skb() would free the frag
+        * so to be able to reuse it we need an extra ref.
+        */
+       if (skb && rxbuf && skb->head == rxbuf->frag)
+               page_ref_inc(virt_to_head_page(rxbuf->frag));
+       if (rxbuf)
+               nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
+       if (skb)
+               dev_kfree_skb_any(skb);
+}
+
+static bool
+nfp_nfd3_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
+                   struct nfp_net_tx_ring *tx_ring,
+                   struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
+                   unsigned int pkt_len, bool *completed)
+{
+       unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
+       struct nfp_nfd3_tx_buf *txbuf;
+       struct nfp_nfd3_tx_desc *txd;
+       int wr_idx;
+
+       /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+       if (pkt_len + dma_off > dma_map_sz)
+               return false;
+
+       if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+               if (!*completed) {
+                       nfp_nfd3_xdp_complete(tx_ring);
+                       *completed = true;
+               }
+
+               if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+                       nfp_nfd3_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+                                        NULL);
+                       return false;
+               }
+       }
+
+       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+       /* Stash the soft descriptor of the head then initialize it */
+       txbuf = &tx_ring->txbufs[wr_idx];
+
+       nfp_nfd3_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
+
+       txbuf->frag = rxbuf->frag;
+       txbuf->dma_addr = rxbuf->dma_addr;
+       txbuf->fidx = -1;
+       txbuf->pkt_cnt = 1;
+       txbuf->real_len = pkt_len;
+
+       dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
+                                  pkt_len, DMA_BIDIRECTIONAL);
+
+       /* Build TX descriptor */
+       txd = &tx_ring->txds[wr_idx];
+       txd->offset_eop = NFD3_DESC_TX_EOP;
+       txd->dma_len = cpu_to_le16(pkt_len);
+       nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
+       txd->data_len = cpu_to_le16(pkt_len);
+
+       txd->flags = 0;
+       txd->mss = 0;
+       txd->lso_hdrlen = 0;
+
+       tx_ring->wr_p++;
+       tx_ring->wr_ptr_add++;
+       return true;
+}
+
+/**
+ * nfp_nfd3_rx() - receive up to @budget packets on @rx_ring
+ * @rx_ring:   RX ring to receive from
+ * @budget:    NAPI budget
+ *
+ * Note, this function is separated out from the napi poll function to
+ * more cleanly separate packet receive code from other bookkeeping
+ * functions performed in the napi poll function.
+ *
+ * Return: Number of packets received.
+ */
+static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
+{
+       struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+       struct nfp_net_tx_ring *tx_ring;
+       struct bpf_prog *xdp_prog;
+       bool xdp_tx_cmpl = false;
+       unsigned int true_bufsz;
+       struct sk_buff *skb;
+       int pkts_polled = 0;
+       struct xdp_buff xdp;
+       int idx;
+
+       xdp_prog = READ_ONCE(dp->xdp_prog);
+       true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+       xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+                     &rx_ring->xdp_rxq);
+       tx_ring = r_vec->xdp_ring;
+
+       while (pkts_polled < budget) {
+               unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+               struct nfp_net_rx_buf *rxbuf;
+               struct nfp_net_rx_desc *rxd;
+               struct nfp_meta_parsed meta;
+               bool redir_egress = false;
+               struct net_device *netdev;
+               dma_addr_t new_dma_addr;
+               u32 meta_len_xdp = 0;
+               void *new_frag;
+
+               idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+               rxd = &rx_ring->rxds[idx];
+               if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+                       break;
+
+               /* Memory barrier to ensure that we won't do other reads
+                * before the DD bit.
+                */
+               dma_rmb();
+
+               memset(&meta, 0, sizeof(meta));
+
+               rx_ring->rd_p++;
+               pkts_polled++;
+
+               rxbuf = &rx_ring->rxbufs[idx];
+               /*         < meta_len >
+                *  <-- [rx_offset] -->
+                *  ---------------------------------------------------------
+                * | [XX] |  metadata  |             packet           | XXXX |
+                *  ---------------------------------------------------------
+                *         <---------------- data_len --------------->
+                *
+                * The rx_offset is fixed for all packets, the meta_len can vary
+                * on a packet by packet basis. If rx_offset is set to zero
+                * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+                * buffer and is immediately followed by the packet (no [XX]).
+                */
+               meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+               data_len = le16_to_cpu(rxd->rxd.data_len);
+               pkt_len = data_len - meta_len;
+
+               pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+               if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+                       pkt_off += meta_len;
+               else
+                       pkt_off += dp->rx_offset;
+               meta_off = pkt_off - meta_len;
+
+               /* Stats update */
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->rx_pkts++;
+               r_vec->rx_bytes += pkt_len;
+               u64_stats_update_end(&r_vec->rx_sync);
+
+               if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+                            (dp->rx_offset && meta_len > dp->rx_offset))) {
+                       nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+                                  meta_len);
+                       nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+                       continue;
+               }
+
+               nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
+                                       data_len);
+
+               if (!dp->chained_metadata_format) {
+                       nfp_nfd3_set_hash_desc(dp->netdev, &meta,
+                                              rxbuf->frag + meta_off, rxd);
+               } else if (meta_len) {
+                       if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
+                                                        rxbuf->frag + meta_off,
+                                                        rxbuf->frag + pkt_off,
+                                                        pkt_len, meta_len))) {
+                               nn_dp_warn(dp, "invalid RX packet metadata\n");
+                               nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
+                                                NULL);
+                               continue;
+                       }
+               }
+
+               if (xdp_prog && !meta.portid) {
+                       void *orig_data = rxbuf->frag + pkt_off;
+                       unsigned int dma_off;
+                       int act;
+
+                       xdp_prepare_buff(&xdp,
+                                        rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+                                        pkt_off - NFP_NET_RX_BUF_HEADROOM,
+                                        pkt_len, true);
+
+                       act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+                       pkt_len = xdp.data_end - xdp.data;
+                       pkt_off += xdp.data - orig_data;
+
+                       switch (act) {
+                       case XDP_PASS:
+                               meta_len_xdp = xdp.data - xdp.data_meta;
+                               break;
+                       case XDP_TX:
+                               dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
+                               if (unlikely(!nfp_nfd3_tx_xdp_buf(dp, rx_ring,
+                                                                 tx_ring,
+                                                                 rxbuf,
+                                                                 dma_off,
+                                                                 pkt_len,
+                                                                 &xdp_tx_cmpl)))
+                                       trace_xdp_exception(dp->netdev,
+                                                           xdp_prog, act);
+                               continue;
+                       default:
+                               bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+                               fallthrough;
+                       case XDP_ABORTED:
+                               trace_xdp_exception(dp->netdev, xdp_prog, act);
+                               fallthrough;
+                       case XDP_DROP:
+                               nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
+                                                    rxbuf->dma_addr);
+                               continue;
+                       }
+               }
+
+               if (likely(!meta.portid)) {
+                       netdev = dp->netdev;
+               } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+                       struct nfp_net *nn = netdev_priv(dp->netdev);
+
+                       nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+                                           pkt_len);
+                       nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
+                                            rxbuf->dma_addr);
+                       continue;
+               } else {
+                       struct nfp_net *nn;
+
+                       nn = netdev_priv(dp->netdev);
+                       netdev = nfp_app_dev_get(nn->app, meta.portid,
+                                                &redir_egress);
+                       if (unlikely(!netdev)) {
+                               nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
+                                                NULL);
+                               continue;
+                       }
+
+                       if (nfp_netdev_is_nfp_repr(netdev))
+                               nfp_repr_inc_rx_stats(netdev, pkt_len);
+               }
+
+               skb = build_skb(rxbuf->frag, true_bufsz);
+               if (unlikely(!skb)) {
+                       nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+                       continue;
+               }
+               new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
+               if (unlikely(!new_frag)) {
+                       nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+                       continue;
+               }
+
+               nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+               nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+               skb_reserve(skb, pkt_off);
+               skb_put(skb, pkt_len);
+
+               skb->mark = meta.mark;
+               skb_set_hash(skb, meta.hash, meta.hash_type);
+
+               skb_record_rx_queue(skb, rx_ring->idx);
+               skb->protocol = eth_type_trans(skb, netdev);
+
+               nfp_nfd3_rx_csum(dp, r_vec, rxd, &meta, skb);
+
+#ifdef CONFIG_TLS_DEVICE
+               if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
+                       skb->decrypted = true;
+                       u64_stats_update_begin(&r_vec->rx_sync);
+                       r_vec->hw_tls_rx++;
+                       u64_stats_update_end(&r_vec->rx_sync);
+               }
+#endif
+
+               if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                              le16_to_cpu(rxd->rxd.vlan));
+               if (meta_len_xdp)
+                       skb_metadata_set(skb, meta_len_xdp);
+
+               if (likely(!redir_egress)) {
+                       napi_gro_receive(&rx_ring->r_vec->napi, skb);
+               } else {
+                       skb->dev = netdev;
+                       skb_reset_network_header(skb);
+                       __skb_push(skb, ETH_HLEN);
+                       dev_queue_xmit(skb);
+               }
+       }
+
+       if (xdp_prog) {
+               if (tx_ring->wr_ptr_add)
+                       nfp_net_tx_xmit_more_flush(tx_ring);
+               else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+                        !xdp_tx_cmpl)
+                       if (!nfp_nfd3_xdp_complete(tx_ring))
+                               pkts_polled = budget;
+       }
+
+       return pkts_polled;
+}
+
+/**
+ * nfp_nfd3_poll() - napi poll function
+ * @napi:    NAPI structure
+ * @budget:  NAPI budget
+ *
+ * Return: number of packets polled.
+ */
+int nfp_nfd3_poll(struct napi_struct *napi, int budget)
+{
+       struct nfp_net_r_vector *r_vec =
+               container_of(napi, struct nfp_net_r_vector, napi);
+       unsigned int pkts_polled = 0;
+
+       if (r_vec->tx_ring)
+               nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
+       if (r_vec->rx_ring)
+               pkts_polled = nfp_nfd3_rx(r_vec->rx_ring, budget);
+
+       if (pkts_polled < budget)
+               if (napi_complete_done(napi, pkts_polled))
+                       nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+
+       if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
+               struct dim_sample dim_sample = {};
+               unsigned int start;
+               u64 pkts, bytes;
+
+               do {
+                       start = u64_stats_fetch_begin(&r_vec->rx_sync);
+                       pkts = r_vec->rx_pkts;
+                       bytes = r_vec->rx_bytes;
+               } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+
+               dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+               net_dim(&r_vec->rx_dim, dim_sample);
+       }
+
+       if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
+               struct dim_sample dim_sample = {};
+               unsigned int start;
+               u64 pkts, bytes;
+
+               do {
+                       start = u64_stats_fetch_begin(&r_vec->tx_sync);
+                       pkts = r_vec->tx_pkts;
+                       bytes = r_vec->tx_bytes;
+               } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+
+               dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+               net_dim(&r_vec->tx_dim, dim_sample);
+       }
+
+       return pkts_polled;
+}
+
+/* Control device data path
+ */
+
+bool
+nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+                    struct sk_buff *skb, bool old)
+{
+       unsigned int real_len = skb->len, meta_len = 0;
+       struct nfp_net_tx_ring *tx_ring;
+       struct nfp_nfd3_tx_buf *txbuf;
+       struct nfp_nfd3_tx_desc *txd;
+       struct nfp_net_dp *dp;
+       dma_addr_t dma_addr;
+       int wr_idx;
+
+       dp = &r_vec->nfp_net->dp;
+       tx_ring = r_vec->tx_ring;
+
+       if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+               nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+               goto err_free;
+       }
+
+       if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+               u64_stats_update_begin(&r_vec->tx_sync);
+               r_vec->tx_busy++;
+               u64_stats_update_end(&r_vec->tx_sync);
+               if (!old)
+                       __skb_queue_tail(&r_vec->queue, skb);
+               else
+                       __skb_queue_head(&r_vec->queue, skb);
+               return true;
+       }
+
+       if (nfp_app_ctrl_has_meta(nn->app)) {
+               if (unlikely(skb_headroom(skb) < 8)) {
+                       nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+                       goto err_free;
+               }
+               meta_len = 8;
+               put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+               put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
+       }
+
+       /* Start with the head skbuf */
+       dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(dp->dev, dma_addr))
+               goto err_dma_warn;
+
+       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+       /* Stash the soft descriptor of the head then initialize it */
+       txbuf = &tx_ring->txbufs[wr_idx];
+       txbuf->skb = skb;
+       txbuf->dma_addr = dma_addr;
+       txbuf->fidx = -1;
+       txbuf->pkt_cnt = 1;
+       txbuf->real_len = real_len;
+
+       /* Build TX descriptor */
+       txd = &tx_ring->txds[wr_idx];
+       txd->offset_eop = meta_len | NFD3_DESC_TX_EOP;
+       txd->dma_len = cpu_to_le16(skb_headlen(skb));
+       nfp_desc_set_dma_addr(txd, dma_addr);
+       txd->data_len = cpu_to_le16(skb->len);
+
+       txd->flags = 0;
+       txd->mss = 0;
+       txd->lso_hdrlen = 0;
+
+       tx_ring->wr_p++;
+       tx_ring->wr_ptr_add++;
+       nfp_net_tx_xmit_more_flush(tx_ring);
+
+       return false;
+
+err_dma_warn:
+       nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
+err_free:
+       u64_stats_update_begin(&r_vec->tx_sync);
+       r_vec->tx_errors++;
+       u64_stats_update_end(&r_vec->tx_sync);
+       dev_kfree_skb_any(skb);
+       return false;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+       struct sk_buff *skb;
+
+       while ((skb = __skb_dequeue(&r_vec->queue)))
+               if (nfp_nfd3_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+                       return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+       u32 meta_type, meta_tag;
+
+       if (!nfp_app_ctrl_has_meta(nn->app))
+               return !meta_len;
+
+       if (meta_len != 8)
+               return false;
+
+       meta_type = get_unaligned_be32(data);
+       meta_tag = get_unaligned_be32(data + 4);
+
+       return (meta_type == NFP_NET_META_PORTID &&
+               meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+               struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+       unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+       struct nfp_net_rx_buf *rxbuf;
+       struct nfp_net_rx_desc *rxd;
+       dma_addr_t new_dma_addr;
+       struct sk_buff *skb;
+       void *new_frag;
+       int idx;
+
+       idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+       rxd = &rx_ring->rxds[idx];
+       if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+               return false;
+
+       /* Memory barrier to ensure that we won't do other reads
+        * before the DD bit.
+        */
+       dma_rmb();
+
+       rx_ring->rd_p++;
+
+       rxbuf = &rx_ring->rxbufs[idx];
+       meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+       data_len = le16_to_cpu(rxd->rxd.data_len);
+       pkt_len = data_len - meta_len;
+
+       pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+       if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+               pkt_off += meta_len;
+       else
+               pkt_off += dp->rx_offset;
+       meta_off = pkt_off - meta_len;
+
+       /* Stats update */
+       u64_stats_update_begin(&r_vec->rx_sync);
+       r_vec->rx_pkts++;
+       r_vec->rx_bytes += pkt_len;
+       u64_stats_update_end(&r_vec->rx_sync);
+
+       nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
+
+       if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+               nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+                          meta_len);
+               nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+               return true;
+       }
+
+       skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+       if (unlikely(!skb)) {
+               nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+               return true;
+       }
+       new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
+       if (unlikely(!new_frag)) {
+               nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+               return true;
+       }
+
+       nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+       nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+       skb_reserve(skb, pkt_off);
+       skb_put(skb, pkt_len);
+
+       nfp_app_ctrl_rx(nn->app, skb);
+
+       return true;
+}
+
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+       struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+       struct nfp_net *nn = r_vec->nfp_net;
+       struct nfp_net_dp *dp = &nn->dp;
+       unsigned int budget = 512;
+
+       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
+               continue;
+
+       return budget;
+}
+
+void nfp_nfd3_ctrl_poll(struct tasklet_struct *t)
+{
+       struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+
+       spin_lock(&r_vec->lock);
+       nfp_nfd3_tx_complete(r_vec->tx_ring, 0);
+       __nfp_ctrl_tx_queued(r_vec);
+       spin_unlock(&r_vec->lock);
+
+       if (nfp_ctrl_rx(r_vec)) {
+               nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       } else {
+               tasklet_schedule(&r_vec->tasklet);
+               nn_dp_warn(&r_vec->nfp_net->dp,
+                          "control message budget exceeded!\n");
+       }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
new file mode 100644 (file)
index 0000000..7a0df9e
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DP_NFD3_H_
+#define _NFP_DP_NFD3_H_
+
+struct sk_buff;
+struct net_device;
+
+/* TX descriptor format */
+
+#define NFD3_DESC_TX_EOP               BIT(7)
+#define NFD3_DESC_TX_OFFSET_MASK       GENMASK(6, 0)
+#define NFD3_DESC_TX_MSS_MASK          GENMASK(13, 0)
+
+/* Flags in the host TX descriptor */
+#define NFD3_DESC_TX_CSUM              BIT(7)
+#define NFD3_DESC_TX_IP4_CSUM          BIT(6)
+#define NFD3_DESC_TX_TCP_CSUM          BIT(5)
+#define NFD3_DESC_TX_UDP_CSUM          BIT(4)
+#define NFD3_DESC_TX_VLAN              BIT(3)
+#define NFD3_DESC_TX_LSO               BIT(2)
+#define NFD3_DESC_TX_ENCAP             BIT(1)
+#define NFD3_DESC_TX_O_IP4_CSUM        BIT(0)
+
+struct nfp_nfd3_tx_desc {
+       union {
+               struct {
+                       u8 dma_addr_hi; /* High bits of host buf address */
+                       __le16 dma_len; /* Length to DMA for this desc */
+                       u8 offset_eop;  /* Offset in buf where pkt starts +
+                                        * highest bit is eop flag.
+                                        */
+                       __le32 dma_addr_lo; /* Low 32bit of host buf addr */
+
+                       __le16 mss;     /* MSS to be used for LSO */
+                       u8 lso_hdrlen;  /* LSO, TCP payload offset */
+                       u8 flags;       /* TX Flags, see @NFD3_DESC_TX_* */
+                       union {
+                               struct {
+                                       u8 l3_offset; /* L3 header offset */
+                                       u8 l4_offset; /* L4 header offset */
+                               };
+                               __le16 vlan; /* VLAN tag to add if indicated */
+                       };
+                       __le16 data_len; /* Length of frame + meta data */
+               } __packed;
+               __le32 vals[4];
+               __le64 vals8[2];
+       };
+};
+
+/**
+ * struct nfp_nfd3_tx_buf - software TX buffer descriptor
+ * @skb:       normal ring, sk_buff associated with this buffer
+ * @frag:      XDP ring, page frag associated with this buffer
+ * @xdp:       XSK buffer pool handle (for AF_XDP)
+ * @dma_addr:  DMA mapping address of the buffer
+ * @fidx:      Fragment index (-1 for the head and [0..nr_frags-1] for frags)
+ * @pkt_cnt:   Number of packets to be produced out of the skb associated
+ *             with this buffer (valid only on the head's buffer).
+ *             Will be 1 for all non-TSO packets.
+ * @is_xsk_tx: Flag if buffer is a RX buffer after a XDP_TX action and not a
+ *             buffer from the TX queue (for AF_XDP).
+ * @real_len:  Number of bytes which to be produced out of the skb (valid only
+ *             on the head's buffer). Equal to skb->len for non-TSO packets.
+ */
+struct nfp_nfd3_tx_buf {
+       union {
+               struct sk_buff *skb;
+               void *frag;
+               struct xdp_buff *xdp;
+       };
+       dma_addr_t dma_addr;
+       union {
+               struct {
+                       short int fidx;
+                       u16 pkt_cnt;
+               };
+               struct {
+                       bool is_xsk_tx;
+               };
+       };
+       u32 real_len;
+};
+
+void
+nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+                const struct nfp_net_rx_desc *rxd,
+                const struct nfp_meta_parsed *meta, struct sk_buff *skb);
+bool
+nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+                   void *data, void *pkt, unsigned int pkt_len, int meta_len);
+void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget);
+int nfp_nfd3_poll(struct napi_struct *napi, int budget);
+netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev);
+bool
+nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+                    struct sk_buff *skb, bool old);
+void nfp_nfd3_ctrl_poll(struct tasklet_struct *t);
+void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+                                   struct nfp_net_rx_ring *rx_ring);
+void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf);
+int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/rings.c b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
new file mode 100644 (file)
index 0000000..47604d5
--- /dev/null
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/seq_file.h>
+
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "nfd3.h"
+
+static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring)
+{
+       struct nfp_nfd3_tx_buf *txbuf;
+       unsigned int idx;
+
+       while (tx_ring->rd_p != tx_ring->wr_p) {
+               idx = D_IDX(tx_ring, tx_ring->rd_p);
+               txbuf = &tx_ring->txbufs[idx];
+
+               txbuf->real_len = 0;
+
+               tx_ring->qcp_rd_p++;
+               tx_ring->rd_p++;
+
+               if (tx_ring->r_vec->xsk_pool) {
+                       if (txbuf->is_xsk_tx)
+                               nfp_nfd3_xsk_tx_free(txbuf);
+
+                       xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1);
+               }
+       }
+}
+
+/**
+ * nfp_nfd3_tx_ring_reset() - Free any untransmitted buffers and reset pointers
+ * @dp:                NFP Net data path struct
+ * @tx_ring:   TX ring structure
+ *
+ * Assumes that the device is stopped, must be idempotent.
+ */
+static void
+nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+       struct netdev_queue *nd_q;
+       const skb_frag_t *frag;
+
+       while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
+               struct nfp_nfd3_tx_buf *tx_buf;
+               struct sk_buff *skb;
+               int idx, nr_frags;
+
+               idx = D_IDX(tx_ring, tx_ring->rd_p);
+               tx_buf = &tx_ring->txbufs[idx];
+
+               skb = tx_ring->txbufs[idx].skb;
+               nr_frags = skb_shinfo(skb)->nr_frags;
+
+               if (tx_buf->fidx == -1) {
+                       /* unmap head */
+                       dma_unmap_single(dp->dev, tx_buf->dma_addr,
+                                        skb_headlen(skb), DMA_TO_DEVICE);
+               } else {
+                       /* unmap fragment */
+                       frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
+                       dma_unmap_page(dp->dev, tx_buf->dma_addr,
+                                      skb_frag_size(frag), DMA_TO_DEVICE);
+               }
+
+               /* check for last gather fragment */
+               if (tx_buf->fidx == nr_frags - 1)
+                       dev_kfree_skb_any(skb);
+
+               tx_buf->dma_addr = 0;
+               tx_buf->skb = NULL;
+               tx_buf->fidx = -2;
+
+               tx_ring->qcp_rd_p++;
+               tx_ring->rd_p++;
+       }
+
+       if (tx_ring->is_xdp)
+               nfp_nfd3_xsk_tx_bufs_free(tx_ring);
+
+       memset(tx_ring->txds, 0, tx_ring->size);
+       tx_ring->wr_p = 0;
+       tx_ring->rd_p = 0;
+       tx_ring->qcp_rd_p = 0;
+       tx_ring->wr_ptr_add = 0;
+
+       if (tx_ring->is_xdp || !dp->netdev)
+               return;
+
+       nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+       netdev_tx_reset_queue(nd_q);
+}
+
+/**
+ * nfp_nfd3_tx_ring_free() - Free resources allocated to a TX ring
+ * @tx_ring:   TX ring to free
+ */
+static void nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
+{
+       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+       kvfree(tx_ring->txbufs);
+
+       if (tx_ring->txds)
+               dma_free_coherent(dp->dev, tx_ring->size,
+                                 tx_ring->txds, tx_ring->dma);
+
+       tx_ring->cnt = 0;
+       tx_ring->txbufs = NULL;
+       tx_ring->txds = NULL;
+       tx_ring->dma = 0;
+       tx_ring->size = 0;
+}
+
+/**
+ * nfp_nfd3_tx_ring_alloc() - Allocate resource for a TX ring
+ * @dp:        NFP Net data path struct
+ * @tx_ring:   TX Ring structure to allocate
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int
+nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+
+       tx_ring->cnt = dp->txd_cnt;
+
+       tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
+       tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
+                                          &tx_ring->dma,
+                                          GFP_KERNEL | __GFP_NOWARN);
+       if (!tx_ring->txds) {
+               netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+                           tx_ring->cnt);
+               goto err_alloc;
+       }
+
+       tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
+                                  GFP_KERNEL);
+       if (!tx_ring->txbufs)
+               goto err_alloc;
+
+       if (!tx_ring->is_xdp && dp->netdev)
+               netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
+                                   tx_ring->idx);
+
+       return 0;
+
+err_alloc:
+       nfp_nfd3_tx_ring_free(tx_ring);
+       return -ENOMEM;
+}
+
+static void
+nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
+                          struct nfp_net_tx_ring *tx_ring)
+{
+       unsigned int i;
+
+       if (!tx_ring->is_xdp)
+               return;
+
+       for (i = 0; i < tx_ring->cnt; i++) {
+               if (!tx_ring->txbufs[i].frag)
+                       return;
+
+               nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
+               __free_page(virt_to_page(tx_ring->txbufs[i].frag));
+       }
+}
+
+static int
+nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+                           struct nfp_net_tx_ring *tx_ring)
+{
+       struct nfp_nfd3_tx_buf *txbufs = tx_ring->txbufs;
+       unsigned int i;
+
+       if (!tx_ring->is_xdp)
+               return 0;
+
+       for (i = 0; i < tx_ring->cnt; i++) {
+               txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
+               if (!txbufs[i].frag) {
+                       nfp_nfd3_tx_ring_bufs_free(dp, tx_ring);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+static void
+nfp_nfd3_print_tx_descs(struct seq_file *file,
+                       struct nfp_net_r_vector *r_vec,
+                       struct nfp_net_tx_ring *tx_ring,
+                       u32 d_rd_p, u32 d_wr_p)
+{
+       struct nfp_nfd3_tx_desc *txd;
+       u32 txd_cnt = tx_ring->cnt;
+       int i;
+
+       for (i = 0; i < txd_cnt; i++) {
+               struct xdp_buff *xdp;
+               struct sk_buff *skb;
+
+               txd = &tx_ring->txds[i];
+               seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
+                          txd->vals[0], txd->vals[1],
+                          txd->vals[2], txd->vals[3]);
+
+               if (!tx_ring->is_xdp) {
+                       skb = READ_ONCE(tx_ring->txbufs[i].skb);
+                       if (skb)
+                               seq_printf(file, " skb->head=%p skb->data=%p",
+                                          skb->head, skb->data);
+               } else {
+                       xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
+                       if (xdp)
+                               seq_printf(file, " xdp->data=%p", xdp->data);
+               }
+
+               if (tx_ring->txbufs[i].dma_addr)
+                       seq_printf(file, " dma_addr=%pad",
+                                  &tx_ring->txbufs[i].dma_addr);
+
+               if (i == tx_ring->rd_p % txd_cnt)
+                       seq_puts(file, " H_RD");
+               if (i == tx_ring->wr_p % txd_cnt)
+                       seq_puts(file, " H_WR");
+               if (i == d_rd_p % txd_cnt)
+                       seq_puts(file, " D_RD");
+               if (i == d_wr_p % txd_cnt)
+                       seq_puts(file, " D_WR");
+
+               seq_putc(file, '\n');
+       }
+}
+
+#define NFP_NFD3_CFG_CTRL_SUPPORTED                                    \
+       (NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC |           \
+        NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC |                \
+        NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM |            \
+        NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_TXVLAN |            \
+        NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO |               \
+        NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA |    \
+        NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_RSS |              \
+        NFP_NET_CFG_CTRL_IRQMOD | NFP_NET_CFG_CTRL_TXRWB |             \
+        NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE |              \
+        NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 |                 \
+        NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE |       \
+        NFP_NET_CFG_CTRL_LIVE_ADDR)
+
+const struct nfp_dp_ops nfp_nfd3_ops = {
+       .version                = NFP_NFD_VER_NFD3,
+       .tx_min_desc_per_pkt    = 1,
+       .cap_mask               = NFP_NFD3_CFG_CTRL_SUPPORTED,
+       .poll                   = nfp_nfd3_poll,
+       .xsk_poll               = nfp_nfd3_xsk_poll,
+       .ctrl_poll              = nfp_nfd3_ctrl_poll,
+       .xmit                   = nfp_nfd3_tx,
+       .ctrl_tx_one            = nfp_nfd3_ctrl_tx_one,
+       .rx_ring_fill_freelist  = nfp_nfd3_rx_ring_fill_freelist,
+       .tx_ring_alloc          = nfp_nfd3_tx_ring_alloc,
+       .tx_ring_reset          = nfp_nfd3_tx_ring_reset,
+       .tx_ring_free           = nfp_nfd3_tx_ring_free,
+       .tx_ring_bufs_alloc     = nfp_nfd3_tx_ring_bufs_alloc,
+       .tx_ring_bufs_free      = nfp_nfd3_tx_ring_bufs_free,
+       .print_tx_descs         = nfp_nfd3_print_tx_descs
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
new file mode 100644 (file)
index 0000000..c16c4b4
--- /dev/null
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "nfd3.h"
+
+static bool
+nfp_nfd3_xsk_tx_xdp(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+                   struct nfp_net_rx_ring *rx_ring,
+                   struct nfp_net_tx_ring *tx_ring,
+                   struct nfp_net_xsk_rx_buf *xrxbuf, unsigned int pkt_len,
+                   int pkt_off)
+{
+       struct xsk_buff_pool *pool = r_vec->xsk_pool;
+       struct nfp_nfd3_tx_buf *txbuf;
+       struct nfp_nfd3_tx_desc *txd;
+       unsigned int wr_idx;
+
+       if (nfp_net_tx_space(tx_ring) < 1)
+               return false;
+
+       xsk_buff_raw_dma_sync_for_device(pool, xrxbuf->dma_addr + pkt_off,
+                                        pkt_len);
+
+       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+       txbuf = &tx_ring->txbufs[wr_idx];
+       txbuf->xdp = xrxbuf->xdp;
+       txbuf->real_len = pkt_len;
+       txbuf->is_xsk_tx = true;
+
+       /* Build TX descriptor */
+       txd = &tx_ring->txds[wr_idx];
+       txd->offset_eop = NFD3_DESC_TX_EOP;
+       txd->dma_len = cpu_to_le16(pkt_len);
+       nfp_desc_set_dma_addr(txd, xrxbuf->dma_addr + pkt_off);
+       txd->data_len = cpu_to_le16(pkt_len);
+
+       txd->flags = 0;
+       txd->mss = 0;
+       txd->lso_hdrlen = 0;
+
+       tx_ring->wr_ptr_add++;
+       tx_ring->wr_p++;
+
+       return true;
+}
+
+static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
+                               const struct nfp_net_rx_desc *rxd,
+                               struct nfp_net_xsk_rx_buf *xrxbuf,
+                               const struct nfp_meta_parsed *meta,
+                               unsigned int pkt_len,
+                               bool meta_xdp,
+                               unsigned int *skbs_polled)
+{
+       struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+       struct net_device *netdev;
+       struct sk_buff *skb;
+
+       if (likely(!meta->portid)) {
+               netdev = dp->netdev;
+       } else {
+               struct nfp_net *nn = netdev_priv(dp->netdev);
+
+               netdev = nfp_app_dev_get(nn->app, meta->portid, NULL);
+               if (unlikely(!netdev)) {
+                       nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+                       return;
+               }
+               nfp_repr_inc_rx_stats(netdev, pkt_len);
+       }
+
+       skb = napi_alloc_skb(&r_vec->napi, pkt_len);
+       if (!skb) {
+               nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+               return;
+       }
+       memcpy(skb_put(skb, pkt_len), xrxbuf->xdp->data, pkt_len);
+
+       skb->mark = meta->mark;
+       skb_set_hash(skb, meta->hash, meta->hash_type);
+
+       skb_record_rx_queue(skb, rx_ring->idx);
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       nfp_nfd3_rx_csum(dp, r_vec, rxd, meta, skb);
+
+       if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      le16_to_cpu(rxd->rxd.vlan));
+       if (meta_xdp)
+               skb_metadata_set(skb,
+                                xrxbuf->xdp->data - xrxbuf->xdp->data_meta);
+
+       napi_gro_receive(&rx_ring->r_vec->napi, skb);
+
+       nfp_net_xsk_rx_free(xrxbuf);
+
+       (*skbs_polled)++;
+}
+
+static unsigned int
+nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
+               unsigned int *skbs_polled)
+{
+       struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+       struct nfp_net_tx_ring *tx_ring;
+       struct bpf_prog *xdp_prog;
+       bool xdp_redir = false;
+       int pkts_polled = 0;
+
+       xdp_prog = READ_ONCE(dp->xdp_prog);
+       tx_ring = r_vec->xdp_ring;
+
+       while (pkts_polled < budget) {
+               unsigned int meta_len, data_len, pkt_len, pkt_off;
+               struct nfp_net_xsk_rx_buf *xrxbuf;
+               struct nfp_net_rx_desc *rxd;
+               struct nfp_meta_parsed meta;
+               int idx, act;
+
+               idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+               rxd = &rx_ring->rxds[idx];
+               if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+                       break;
+
+               rx_ring->rd_p++;
+               pkts_polled++;
+
+               xrxbuf = &rx_ring->xsk_rxbufs[idx];
+
+               /* If starved of buffers "drop" it and scream. */
+               if (rx_ring->rd_p >= rx_ring->wr_p) {
+                       nn_dp_warn(dp, "Starved of RX buffers\n");
+                       nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+                       break;
+               }
+
+               /* Memory barrier to ensure that we won't do other reads
+                * before the DD bit.
+                */
+               dma_rmb();
+
+               memset(&meta, 0, sizeof(meta));
+
+               /* Only supporting AF_XDP with dynamic metadata so buffer layout
+                * is always:
+                *
+                *  ---------------------------------------------------------
+                * |  off | metadata  |             packet           | XXXX  |
+                *  ---------------------------------------------------------
+                */
+               meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+               data_len = le16_to_cpu(rxd->rxd.data_len);
+               pkt_len = data_len - meta_len;
+
+               if (unlikely(meta_len > NFP_NET_MAX_PREPEND)) {
+                       nn_dp_warn(dp, "Oversized RX packet metadata %u\n",
+                                  meta_len);
+                       nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+                       continue;
+               }
+
+               /* Stats update. */
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->rx_pkts++;
+               r_vec->rx_bytes += pkt_len;
+               u64_stats_update_end(&r_vec->rx_sync);
+
+               xrxbuf->xdp->data += meta_len;
+               xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
+               xdp_set_data_meta_invalid(xrxbuf->xdp);
+               xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
+               net_prefetch(xrxbuf->xdp->data);
+
+               if (meta_len) {
+                       if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
+                                                        xrxbuf->xdp->data -
+                                                        meta_len,
+                                                        xrxbuf->xdp->data,
+                                                        pkt_len, meta_len))) {
+                               nn_dp_warn(dp, "Invalid RX packet metadata\n");
+                               nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+                               continue;
+                       }
+
+                       if (unlikely(meta.portid)) {
+                               struct nfp_net *nn = netdev_priv(dp->netdev);
+
+                               if (meta.portid != NFP_META_PORT_ID_CTRL) {
+                                       nfp_nfd3_xsk_rx_skb(rx_ring, rxd,
+                                                           xrxbuf, &meta,
+                                                           pkt_len, false,
+                                                           skbs_polled);
+                                       continue;
+                               }
+
+                               nfp_app_ctrl_rx_raw(nn->app, xrxbuf->xdp->data,
+                                                   pkt_len);
+                               nfp_net_xsk_rx_free(xrxbuf);
+                               continue;
+                       }
+               }
+
+               act = bpf_prog_run_xdp(xdp_prog, xrxbuf->xdp);
+
+               pkt_len = xrxbuf->xdp->data_end - xrxbuf->xdp->data;
+               pkt_off = xrxbuf->xdp->data - xrxbuf->xdp->data_hard_start;
+
+               switch (act) {
+               case XDP_PASS:
+                       nfp_nfd3_xsk_rx_skb(rx_ring, rxd, xrxbuf, &meta, pkt_len,
+                                           true, skbs_polled);
+                       break;
+               case XDP_TX:
+                       if (!nfp_nfd3_xsk_tx_xdp(dp, r_vec, rx_ring, tx_ring,
+                                                xrxbuf, pkt_len, pkt_off))
+                               nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+                       else
+                               nfp_net_xsk_rx_unstash(xrxbuf);
+                       break;
+               case XDP_REDIRECT:
+                       if (xdp_do_redirect(dp->netdev, xrxbuf->xdp, xdp_prog)) {
+                               nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+                       } else {
+                               nfp_net_xsk_rx_unstash(xrxbuf);
+                               xdp_redir = true;
+                       }
+                       break;
+               default:
+                       bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+                       fallthrough;
+               case XDP_ABORTED:
+                       trace_xdp_exception(dp->netdev, xdp_prog, act);
+                       fallthrough;
+               case XDP_DROP:
+                       nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+                       break;
+               }
+       }
+
+       nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
+
+       if (xdp_redir)
+               xdp_do_flush_map();
+
+       if (tx_ring->wr_ptr_add)
+               nfp_net_tx_xmit_more_flush(tx_ring);
+
+       return pkts_polled;
+}
+
+void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf)
+{
+       xsk_buff_free(txbuf->xdp);
+
+       txbuf->dma_addr = 0;
+       txbuf->xdp = NULL;
+}
+
+static bool nfp_nfd3_xsk_complete(struct nfp_net_tx_ring *tx_ring)
+{
+       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+       u32 done_pkts = 0, done_bytes = 0, reused = 0;
+       bool done_all;
+       int idx, todo;
+       u32 qcp_rd_p;
+
+       if (tx_ring->wr_p == tx_ring->rd_p)
+               return true;
+
+       /* Work out how many descriptors have been transmitted. */
+       qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
+
+       if (qcp_rd_p == tx_ring->qcp_rd_p)
+               return true;
+
+       todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+       done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+       todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+       tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
+
+       done_pkts = todo;
+       while (todo--) {
+               struct nfp_nfd3_tx_buf *txbuf;
+
+               idx = D_IDX(tx_ring, tx_ring->rd_p);
+               tx_ring->rd_p++;
+
+               txbuf = &tx_ring->txbufs[idx];
+               if (unlikely(!txbuf->real_len))
+                       continue;
+
+               done_bytes += txbuf->real_len;
+               txbuf->real_len = 0;
+
+               if (txbuf->is_xsk_tx) {
+                       nfp_nfd3_xsk_tx_free(txbuf);
+                       reused++;
+               }
+       }
+
+       u64_stats_update_begin(&r_vec->tx_sync);
+       r_vec->tx_bytes += done_bytes;
+       r_vec->tx_pkts += done_pkts;
+       u64_stats_update_end(&r_vec->tx_sync);
+
+       xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused);
+
+       WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+                 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+                 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+       return done_all;
+}
+
+static void nfp_nfd3_xsk_tx(struct nfp_net_tx_ring *tx_ring)
+{
+       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+       struct xdp_desc desc[NFP_NET_XSK_TX_BATCH];
+       struct xsk_buff_pool *xsk_pool;
+       struct nfp_nfd3_tx_desc *txd;
+       u32 pkts = 0, wr_idx;
+       u32 i, got;
+
+       xsk_pool = r_vec->xsk_pool;
+
+       while (nfp_net_tx_space(tx_ring) >= NFP_NET_XSK_TX_BATCH) {
+               for (i = 0; i < NFP_NET_XSK_TX_BATCH; i++)
+                       if (!xsk_tx_peek_desc(xsk_pool, &desc[i]))
+                               break;
+               got = i;
+               if (!got)
+                       break;
+
+               wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
+               prefetchw(&tx_ring->txds[wr_idx]);
+
+               for (i = 0; i < got; i++)
+                       xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr,
+                                                        desc[i].len);
+
+               for (i = 0; i < got; i++) {
+                       wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
+
+                       tx_ring->txbufs[wr_idx].real_len = desc[i].len;
+                       tx_ring->txbufs[wr_idx].is_xsk_tx = false;
+
+                       /* Build TX descriptor. */
+                       txd = &tx_ring->txds[wr_idx];
+                       nfp_desc_set_dma_addr(txd,
+                                             xsk_buff_raw_get_dma(xsk_pool,
+                                                                  desc[i].addr
+                                                                  ));
+                       txd->offset_eop = NFD3_DESC_TX_EOP;
+                       txd->dma_len = cpu_to_le16(desc[i].len);
+                       txd->data_len = cpu_to_le16(desc[i].len);
+               }
+
+               tx_ring->wr_p += got;
+               pkts += got;
+       }
+
+       if (!pkts)
+               return;
+
+       xsk_tx_release(xsk_pool);
+       /* Ensure all records are visible before incrementing write counter. */
+       wmb();
+       nfp_qcp_wr_ptr_add(tx_ring->qcp_q, pkts);
+}
+
+int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget)
+{
+       struct nfp_net_r_vector *r_vec =
+               container_of(napi, struct nfp_net_r_vector, napi);
+       unsigned int pkts_polled, skbs = 0;
+
+       pkts_polled = nfp_nfd3_xsk_rx(r_vec->rx_ring, budget, &skbs);
+
+       if (pkts_polled < budget) {
+               if (r_vec->tx_ring)
+                       nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
+
+               if (!nfp_nfd3_xsk_complete(r_vec->xdp_ring))
+                       pkts_polled = budget;
+
+               nfp_nfd3_xsk_tx(r_vec->xdp_ring);
+
+               if (pkts_polled < budget && napi_complete_done(napi, skbs))
+                       nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       }
+
+       return pkts_polled;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
new file mode 100644 (file)
index 0000000..e3da9ac
--- /dev/null
@@ -0,0 +1,1524 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+#include <linux/overflow.h>
+#include <linux/sizes.h>
+#include <linux/bitfield.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../crypto/crypto.h"
+#include "../crypto/fw.h"
+#include "nfdk.h"
+
+static int nfp_nfdk_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
+{
+       return !nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT * 2);
+}
+
+static int nfp_nfdk_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
+{
+       return nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT);
+}
+
+static void nfp_nfdk_tx_ring_stop(struct netdev_queue *nd_q,
+                                 struct nfp_net_tx_ring *tx_ring)
+{
+       netif_tx_stop_queue(nd_q);
+
+       /* We can race with the TX completion out of NAPI so recheck */
+       smp_mb();
+       if (unlikely(nfp_nfdk_tx_ring_should_wake(tx_ring)))
+               netif_tx_start_queue(nd_q);
+}
+
+static __le64
+nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
+               struct sk_buff *skb)
+{
+       u32 segs, hdrlen, l3_offset, l4_offset;
+       struct nfp_nfdk_tx_desc txd;
+       u16 mss;
+
+       if (!skb->encapsulation) {
+               l3_offset = skb_network_offset(skb);
+               l4_offset = skb_transport_offset(skb);
+               hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       } else {
+               l3_offset = skb_inner_network_offset(skb);
+               l4_offset = skb_inner_transport_offset(skb);
+               hdrlen = skb_inner_transport_header(skb) - skb->data +
+                       inner_tcp_hdrlen(skb);
+       }
+
+       segs = skb_shinfo(skb)->gso_segs;
+       mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
+
+       /* Note: TSO of the packet with metadata prepended to skb is not
+        * supported yet, in which case l3/l4_offset and lso_hdrlen need
+        * be correctly handled here.
+        * Concern:
+        * The driver doesn't have md_bytes easily available at this point.
+        * The PCI.IN PD ME won't have md_bytes bytes to add to lso_hdrlen,
+        * so it needs the full length there.  The app MEs might prefer
+        * l3_offset and l4_offset relative to the start of packet data,
+        * but could probably cope with it being relative to the CTM buf
+        * data offset.
+        */
+       txd.l3_offset = l3_offset;
+       txd.l4_offset = l4_offset;
+       txd.lso_meta_res = 0;
+       txd.mss = cpu_to_le16(mss);
+       txd.lso_hdrlen = hdrlen;
+       txd.lso_totsegs = segs;
+
+       txbuf->pkt_cnt = segs;
+       txbuf->real_len = skb->len + hdrlen * (txbuf->pkt_cnt - 1);
+
+       u64_stats_update_begin(&r_vec->tx_sync);
+       r_vec->tx_lso++;
+       u64_stats_update_end(&r_vec->tx_sync);
+
+       return txd.raw;
+}
+
+static u8
+nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+                unsigned int pkt_cnt, struct sk_buff *skb, u64 flags)
+{
+       struct ipv6hdr *ipv6h;
+       struct iphdr *iph;
+
+       if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+               return flags;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return flags;
+
+       flags |= NFDK_DESC_TX_L4_CSUM;
+
+       iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+       /* L3 checksum offloading flag is not required for ipv6 */
+       if (iph->version == 4) {
+               flags |= NFDK_DESC_TX_L3_CSUM;
+       } else if (ipv6h->version != 6) {
+               nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
+               return flags;
+       }
+
+       u64_stats_update_begin(&r_vec->tx_sync);
+       if (!skb->encapsulation) {
+               r_vec->hw_csum_tx += pkt_cnt;
+       } else {
+               flags |= NFDK_DESC_TX_ENCAP;
+               r_vec->hw_csum_tx_inner += pkt_cnt;
+       }
+       u64_stats_update_end(&r_vec->tx_sync);
+
+       return flags;
+}
+
+static int
+nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring,
+                             unsigned int nr_frags, struct sk_buff *skb)
+{
+       unsigned int n_descs, wr_p, nop_slots;
+       const skb_frag_t *frag, *fend;
+       struct nfp_nfdk_tx_desc *txd;
+       unsigned int wr_idx;
+       int err;
+
+recount_descs:
+       n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb));
+
+       frag = skb_shinfo(skb)->frags;
+       fend = frag + nr_frags;
+       for (; frag < fend; frag++)
+               n_descs += DIV_ROUND_UP(skb_frag_size(frag),
+                                       NFDK_TX_MAX_DATA_PER_DESC);
+
+       if (unlikely(n_descs > NFDK_TX_DESC_GATHER_MAX)) {
+               if (skb_is_nonlinear(skb)) {
+                       err = skb_linearize(skb);
+                       if (err)
+                               return err;
+                       goto recount_descs;
+               }
+               return -EINVAL;
+       }
+
+       /* Under count by 1 (don't count meta) for the round down to work out */
+       n_descs += !!skb_is_gso(skb);
+
+       if (round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
+           round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT))
+               goto close_block;
+
+       if ((u32)tx_ring->data_pending + skb->len > NFDK_TX_MAX_DATA_PER_BLOCK)
+               goto close_block;
+
+       return 0;
+
+close_block:
+       wr_p = tx_ring->wr_p;
+       nop_slots = D_BLOCK_CPL(wr_p);
+
+       wr_idx = D_IDX(tx_ring, wr_p);
+       tx_ring->ktxbufs[wr_idx].skb = NULL;
+       txd = &tx_ring->ktxds[wr_idx];
+
+       memset(txd, 0, array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
+
+       tx_ring->data_pending = 0;
+       tx_ring->wr_p += nop_slots;
+       tx_ring->wr_ptr_add += nop_slots;
+
+       return 0;
+}
+
+static int nfp_nfdk_prep_port_id(struct sk_buff *skb)
+{
+       struct metadata_dst *md_dst = skb_metadata_dst(skb);
+       unsigned char *data;
+
+       if (likely(!md_dst))
+               return 0;
+       if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
+               return 0;
+
+       /* Note: Unsupported case when TSO a skb with metedata prepended.
+        * See the comments in `nfp_nfdk_tx_tso` for details.
+        */
+       if (unlikely(md_dst && skb_is_gso(skb)))
+               return -EOPNOTSUPP;
+
+       if (unlikely(skb_cow_head(skb, sizeof(md_dst->u.port_info.port_id))))
+               return -ENOMEM;
+
+       data = skb_push(skb, sizeof(md_dst->u.port_info.port_id));
+       put_unaligned_be32(md_dst->u.port_info.port_id, data);
+
+       return sizeof(md_dst->u.port_info.port_id);
+}
+
+static int
+nfp_nfdk_prep_tx_meta(struct nfp_app *app, struct sk_buff *skb,
+                     struct nfp_net_r_vector *r_vec)
+{
+       unsigned char *data;
+       int res, md_bytes;
+       u32 meta_id = 0;
+
+       res = nfp_nfdk_prep_port_id(skb);
+       if (unlikely(res <= 0))
+               return res;
+
+       md_bytes = res;
+       meta_id = NFP_NET_META_PORTID;
+
+       if (unlikely(skb_cow_head(skb, sizeof(meta_id))))
+               return -ENOMEM;
+
+       md_bytes += sizeof(meta_id);
+
+       meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
+                 FIELD_PREP(NFDK_META_FIELDS, meta_id);
+
+       data = skb_push(skb, sizeof(meta_id));
+       put_unaligned_be32(meta_id, data);
+
+       return NFDK_DESC_TX_CHAIN_META;
+}
+
+/**
+ * nfp_nfdk_tx() - Main transmit entry point
+ * @skb:    SKB to transmit
+ * @netdev: netdev structure
+ *
+ * Return: NETDEV_TX_OK on success.
+ */
+netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct nfp_net *nn = netdev_priv(netdev);
+       struct nfp_nfdk_tx_buf *txbuf, *etxbuf;
+       u32 cnt, tmp_dlen, dlen_type = 0;
+       struct nfp_net_tx_ring *tx_ring;
+       struct nfp_net_r_vector *r_vec;
+       const skb_frag_t *frag, *fend;
+       struct nfp_nfdk_tx_desc *txd;
+       unsigned int real_len, qidx;
+       unsigned int dma_len, type;
+       struct netdev_queue *nd_q;
+       struct nfp_net_dp *dp;
+       int nr_frags, wr_idx;
+       dma_addr_t dma_addr;
+       u64 metadata;
+
+       dp = &nn->dp;
+       qidx = skb_get_queue_mapping(skb);
+       tx_ring = &dp->tx_rings[qidx];
+       r_vec = tx_ring->r_vec;
+       nd_q = netdev_get_tx_queue(dp->netdev, qidx);
+
+       /* Don't bother counting frags, assume the worst */
+       if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+               nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+                          qidx, tx_ring->wr_p, tx_ring->rd_p);
+               netif_tx_stop_queue(nd_q);
+               nfp_net_tx_xmit_more_flush(tx_ring);
+               u64_stats_update_begin(&r_vec->tx_sync);
+               r_vec->tx_busy++;
+               u64_stats_update_end(&r_vec->tx_sync);
+               return NETDEV_TX_BUSY;
+       }
+
+       metadata = nfp_nfdk_prep_tx_meta(nn->app, skb, r_vec);
+       if (unlikely((int)metadata < 0))
+               goto err_flush;
+
+       nr_frags = skb_shinfo(skb)->nr_frags;
+       if (nfp_nfdk_tx_maybe_close_block(tx_ring, nr_frags, skb))
+               goto err_flush;
+
+       /* DMA map all */
+       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+       txd = &tx_ring->ktxds[wr_idx];
+       txbuf = &tx_ring->ktxbufs[wr_idx];
+
+       dma_len = skb_headlen(skb);
+       if (skb_is_gso(skb))
+               type = NFDK_DESC_TX_TYPE_TSO;
+       else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+               type = NFDK_DESC_TX_TYPE_SIMPLE;
+       else
+               type = NFDK_DESC_TX_TYPE_GATHER;
+
+       dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(dp->dev, dma_addr))
+               goto err_warn_dma;
+
+       txbuf->skb = skb;
+       txbuf++;
+
+       txbuf->dma_addr = dma_addr;
+       txbuf++;
+
+       /* FIELD_PREP() implicitly truncates to chunk */
+       dma_len -= 1;
+       dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+                   FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+       txd->dma_len_type = cpu_to_le16(dlen_type);
+       nfp_desc_set_dma_addr(txd, dma_addr);
+
+       /* starts at bit 0 */
+       BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
+
+       /* Preserve the original dlen_type, this way below the EOP logic
+        * can use dlen_type.
+        */
+       tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+       dma_len -= tmp_dlen;
+       dma_addr += tmp_dlen + 1;
+       txd++;
+
+       /* The rest of the data (if any) will be in larger dma descritors
+        * and is handled with the fragment loop.
+        */
+       frag = skb_shinfo(skb)->frags;
+       fend = frag + nr_frags;
+
+       while (true) {
+               while (dma_len > 0) {
+                       dma_len -= 1;
+                       dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+
+                       txd->dma_len_type = cpu_to_le16(dlen_type);
+                       nfp_desc_set_dma_addr(txd, dma_addr);
+
+                       dma_len -= dlen_type;
+                       dma_addr += dlen_type + 1;
+                       txd++;
+               }
+
+               if (frag >= fend)
+                       break;
+
+               dma_len = skb_frag_size(frag);
+               dma_addr = skb_frag_dma_map(dp->dev, frag, 0, dma_len,
+                                           DMA_TO_DEVICE);
+               if (dma_mapping_error(dp->dev, dma_addr))
+                       goto err_unmap;
+
+               txbuf->dma_addr = dma_addr;
+               txbuf++;
+
+               frag++;
+       }
+
+       (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+       if (!skb_is_gso(skb)) {
+               real_len = skb->len;
+               /* Metadata desc */
+               metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
+               txd->raw = cpu_to_le64(metadata);
+               txd++;
+       } else {
+               /* lso desc should be placed after metadata desc */
+               (txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb);
+               real_len = txbuf->real_len;
+               /* Metadata desc */
+               metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
+               txd->raw = cpu_to_le64(metadata);
+               txd += 2;
+               txbuf++;
+       }
+
+       cnt = txd - tx_ring->ktxds - wr_idx;
+       if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
+                    round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
+               goto err_warn_overflow;
+
+       skb_tx_timestamp(skb);
+
+       tx_ring->wr_p += cnt;
+       if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+               tx_ring->data_pending += skb->len;
+       else
+               tx_ring->data_pending = 0;
+
+       if (nfp_nfdk_tx_ring_should_stop(tx_ring))
+               nfp_nfdk_tx_ring_stop(nd_q, tx_ring);
+
+       tx_ring->wr_ptr_add += cnt;
+       if (__netdev_tx_sent_queue(nd_q, real_len, netdev_xmit_more()))
+               nfp_net_tx_xmit_more_flush(tx_ring);
+
+       return NETDEV_TX_OK;
+
+err_warn_overflow:
+       WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
+                 wr_idx, skb_headlen(skb), nr_frags, cnt);
+       if (skb_is_gso(skb))
+               txbuf--;
+err_unmap:
+       /* txbuf pointed to the next-to-use */
+       etxbuf = txbuf;
+       /* first txbuf holds the skb */
+       txbuf = &tx_ring->ktxbufs[wr_idx + 1];
+       if (txbuf < etxbuf) {
+               dma_unmap_single(dp->dev, txbuf->dma_addr,
+                                skb_headlen(skb), DMA_TO_DEVICE);
+               txbuf->raw = 0;
+               txbuf++;
+       }
+       frag = skb_shinfo(skb)->frags;
+       while (etxbuf < txbuf) {
+               dma_unmap_page(dp->dev, txbuf->dma_addr,
+                              skb_frag_size(frag), DMA_TO_DEVICE);
+               txbuf->raw = 0;
+               frag++;
+               txbuf++;
+       }
+err_warn_dma:
+       nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
+       nfp_net_tx_xmit_more_flush(tx_ring);
+       u64_stats_update_begin(&r_vec->tx_sync);
+       r_vec->tx_errors++;
+       u64_stats_update_end(&r_vec->tx_sync);
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+/**
+ * nfp_nfdk_tx_complete() - Handled completed TX packets
+ * @tx_ring:   TX ring structure
+ * @budget:    NAPI budget (only used as bool to determine if in NAPI context)
+ */
+static void nfp_nfdk_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
+{
+       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+       u32 done_pkts = 0, done_bytes = 0;
+       struct nfp_nfdk_tx_buf *ktxbufs;
+       struct device *dev = dp->dev;
+       struct netdev_queue *nd_q;
+       u32 rd_p, qcp_rd_p;
+       int todo;
+
+       rd_p = tx_ring->rd_p;
+       if (tx_ring->wr_p == rd_p)
+               return;
+
+       /* Work out how many descriptors have been transmitted */
+       qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+       if (qcp_rd_p == tx_ring->qcp_rd_p)
+               return;
+
+       todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+       ktxbufs = tx_ring->ktxbufs;
+
+       while (todo > 0) {
+               const skb_frag_t *frag, *fend;
+               unsigned int size, n_descs = 1;
+               struct nfp_nfdk_tx_buf *txbuf;
+               struct sk_buff *skb;
+
+               txbuf = &ktxbufs[D_IDX(tx_ring, rd_p)];
+               skb = txbuf->skb;
+               txbuf++;
+
+               /* Closed block */
+               if (!skb) {
+                       n_descs = D_BLOCK_CPL(rd_p);
+                       goto next;
+               }
+
+               /* Unmap head */
+               size = skb_headlen(skb);
+               n_descs += nfp_nfdk_headlen_to_segs(size);
+               dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
+               txbuf++;
+
+               /* Unmap frags */
+               frag = skb_shinfo(skb)->frags;
+               fend = frag + skb_shinfo(skb)->nr_frags;
+               for (; frag < fend; frag++) {
+                       size = skb_frag_size(frag);
+                       n_descs += DIV_ROUND_UP(size,
+                                               NFDK_TX_MAX_DATA_PER_DESC);
+                       dma_unmap_page(dev, txbuf->dma_addr,
+                                      skb_frag_size(frag), DMA_TO_DEVICE);
+                       txbuf++;
+               }
+
+               if (!skb_is_gso(skb)) {
+                       done_bytes += skb->len;
+                       done_pkts++;
+               } else {
+                       done_bytes += txbuf->real_len;
+                       done_pkts += txbuf->pkt_cnt;
+                       n_descs++;
+               }
+
+               napi_consume_skb(skb, budget);
+next:
+               rd_p += n_descs;
+               todo -= n_descs;
+       }
+
+       tx_ring->rd_p = rd_p;
+       tx_ring->qcp_rd_p = qcp_rd_p;
+
+       u64_stats_update_begin(&r_vec->tx_sync);
+       r_vec->tx_bytes += done_bytes;
+       r_vec->tx_pkts += done_pkts;
+       u64_stats_update_end(&r_vec->tx_sync);
+
+       if (!dp->netdev)
+               return;
+
+       nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+       netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
+       if (nfp_nfdk_tx_ring_should_wake(tx_ring)) {
+               /* Make sure TX thread will see updated tx_ring->rd_p */
+               smp_mb();
+
+               if (unlikely(netif_tx_queue_stopped(nd_q)))
+                       netif_tx_wake_queue(nd_q);
+       }
+
+       WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+                 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+                 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+}
+
+/* Receive processing */
+static void *
+nfp_nfdk_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+       void *frag;
+
+       if (!dp->xdp_prog) {
+               frag = napi_alloc_frag(dp->fl_bufsz);
+               if (unlikely(!frag))
+                       return NULL;
+       } else {
+               struct page *page;
+
+               page = dev_alloc_page();
+               if (unlikely(!page))
+                       return NULL;
+               frag = page_address(page);
+       }
+
+       *dma_addr = nfp_net_dma_map_rx(dp, frag);
+       if (dma_mapping_error(dp->dev, *dma_addr)) {
+               nfp_net_free_frag(frag, dp->xdp_prog);
+               nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+               return NULL;
+       }
+
+       return frag;
+}
+
+/**
+ * nfp_nfdk_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp:                NFP Net data path struct
+ * @rx_ring:   RX ring structure
+ * @frag:      page fragment buffer
+ * @dma_addr:  DMA address of skb mapping
+ */
+static void
+nfp_nfdk_rx_give_one(const struct nfp_net_dp *dp,
+                    struct nfp_net_rx_ring *rx_ring,
+                    void *frag, dma_addr_t dma_addr)
+{
+       unsigned int wr_idx;
+
+       wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+       nfp_net_dma_sync_dev_rx(dp, dma_addr);
+
+       /* Stash SKB and DMA address away */
+       rx_ring->rxbufs[wr_idx].frag = frag;
+       rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
+
+       /* Fill freelist descriptor */
+       rx_ring->rxds[wr_idx].fld.reserved = 0;
+       rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
+       nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+                             dma_addr + dp->rx_dma_off);
+
+       rx_ring->wr_p++;
+       if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
+               /* Update write pointer of the freelist queue. Make
+                * sure all writes are flushed before telling the hardware.
+                */
+               wmb();
+               nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
+       }
+}
+
+/**
+ * nfp_nfdk_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp:             NFP Net data path struct
+ * @rx_ring: RX ring to fill
+ */
+void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+                                   struct nfp_net_rx_ring *rx_ring)
+{
+       unsigned int i;
+
+       for (i = 0; i < rx_ring->cnt - 1; i++)
+               nfp_nfdk_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
+                                    rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
+ * nfp_nfdk_rx_csum_has_errors() - group check if rxd has any csum errors
+ * @flags: RX descriptor flags field in CPU byte order
+ */
+static int nfp_nfdk_rx_csum_has_errors(u16 flags)
+{
+       u16 csum_all_checked, csum_all_ok;
+
+       csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
+       csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
+
+       return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
+}
+
+/**
+ * nfp_nfdk_rx_csum() - set SKB checksum field based on RX descriptor flags
+ * @dp:  NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
+ * @skb: Pointer to SKB
+ */
+static void
+nfp_nfdk_rx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+                struct nfp_net_rx_desc *rxd, struct nfp_meta_parsed *meta,
+                struct sk_buff *skb)
+{
+       skb_checksum_none_assert(skb);
+
+       if (!(dp->netdev->features & NETIF_F_RXCSUM))
+               return;
+
+       if (meta->csum_type) {
+               skb->ip_summed = meta->csum_type;
+               skb->csum = meta->csum;
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->hw_csum_rx_complete++;
+               u64_stats_update_end(&r_vec->rx_sync);
+               return;
+       }
+
+       if (nfp_nfdk_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->hw_csum_rx_error++;
+               u64_stats_update_end(&r_vec->rx_sync);
+               return;
+       }
+
+       /* Assume that the firmware will never report inner CSUM_OK unless outer
+        * L4 headers were successfully parsed. FW will always report zero UDP
+        * checksum as CSUM_OK.
+        */
+       if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
+           rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
+               __skb_incr_checksum_unnecessary(skb);
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->hw_csum_rx_ok++;
+               u64_stats_update_end(&r_vec->rx_sync);
+       }
+
+       if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
+           rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
+               __skb_incr_checksum_unnecessary(skb);
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->hw_csum_rx_inner_ok++;
+               u64_stats_update_end(&r_vec->rx_sync);
+       }
+}
+
+static void
+nfp_nfdk_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+                 unsigned int type, __be32 *hash)
+{
+       if (!(netdev->features & NETIF_F_RXHASH))
+               return;
+
+       switch (type) {
+       case NFP_NET_RSS_IPV4:
+       case NFP_NET_RSS_IPV6:
+       case NFP_NET_RSS_IPV6_EX:
+               meta->hash_type = PKT_HASH_TYPE_L3;
+               break;
+       default:
+               meta->hash_type = PKT_HASH_TYPE_L4;
+               break;
+       }
+
+       meta->hash = get_unaligned_be32(hash);
+}
+
+static bool
+nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+                   void *data, void *pkt, unsigned int pkt_len, int meta_len)
+{
+       u32 meta_info;
+
+       meta_info = get_unaligned_be32(data);
+       data += 4;
+
+       while (meta_info) {
+               switch (meta_info & NFP_NET_META_FIELD_MASK) {
+               case NFP_NET_META_HASH:
+                       meta_info >>= NFP_NET_META_FIELD_SIZE;
+                       nfp_nfdk_set_hash(netdev, meta,
+                                         meta_info & NFP_NET_META_FIELD_MASK,
+                                         (__be32 *)data);
+                       data += 4;
+                       break;
+               case NFP_NET_META_MARK:
+                       meta->mark = get_unaligned_be32(data);
+                       data += 4;
+                       break;
+               case NFP_NET_META_PORTID:
+                       meta->portid = get_unaligned_be32(data);
+                       data += 4;
+                       break;
+               case NFP_NET_META_CSUM:
+                       meta->csum_type = CHECKSUM_COMPLETE;
+                       meta->csum =
+                               (__force __wsum)__get_unaligned_cpu32(data);
+                       data += 4;
+                       break;
+               case NFP_NET_META_RESYNC_INFO:
+                       if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+                                                     pkt_len))
+                               return false;
+                       data += sizeof(struct nfp_net_tls_resync_req);
+                       break;
+               default:
+                       return true;
+               }
+
+               meta_info >>= NFP_NET_META_FIELD_SIZE;
+       }
+
+       return data != pkt;
+}
+
+static void
+nfp_nfdk_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+                struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+                struct sk_buff *skb)
+{
+       u64_stats_update_begin(&r_vec->rx_sync);
+       r_vec->rx_drops++;
+       /* If we have both skb and rxbuf the replacement buffer allocation
+        * must have failed, count this as an alloc failure.
+        */
+       if (skb && rxbuf)
+               r_vec->rx_replace_buf_alloc_fail++;
+       u64_stats_update_end(&r_vec->rx_sync);
+
+       /* skb is build based on the frag, free_skb() would free the frag
+        * so to be able to reuse it we need an extra ref.
+        */
+       if (skb && rxbuf && skb->head == rxbuf->frag)
+               page_ref_inc(virt_to_head_page(rxbuf->frag));
+       if (rxbuf)
+               nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
+       if (skb)
+               dev_kfree_skb_any(skb);
+}
+
+static bool nfp_nfdk_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+{
+       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+       struct nfp_net_rx_ring *rx_ring;
+       u32 qcp_rd_p, done = 0;
+       bool done_all;
+       int todo;
+
+       /* Work out how many descriptors have been transmitted */
+       qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+       if (qcp_rd_p == tx_ring->qcp_rd_p)
+               return true;
+
+       todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+       done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+       todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+       rx_ring = r_vec->rx_ring;
+       while (todo > 0) {
+               int idx = D_IDX(tx_ring, tx_ring->rd_p + done);
+               struct nfp_nfdk_tx_buf *txbuf;
+               unsigned int step = 1;
+
+               txbuf = &tx_ring->ktxbufs[idx];
+               if (!txbuf->raw)
+                       goto next;
+
+               if (NFDK_TX_BUF_INFO(txbuf->val) != NFDK_TX_BUF_INFO_SOP) {
+                       WARN_ONCE(1, "Unexpected TX buffer in XDP TX ring\n");
+                       goto next;
+               }
+
+               /* Two successive txbufs are used to stash virtual and dma
+                * address respectively, recycle and clean them here.
+                */
+               nfp_nfdk_rx_give_one(dp, rx_ring,
+                                    (void *)NFDK_TX_BUF_PTR(txbuf[0].val),
+                                    txbuf[1].dma_addr);
+               txbuf[0].raw = 0;
+               txbuf[1].raw = 0;
+               step = 2;
+
+               u64_stats_update_begin(&r_vec->tx_sync);
+               /* Note: tx_bytes not accumulated. */
+               r_vec->tx_pkts++;
+               u64_stats_update_end(&r_vec->tx_sync);
+next:
+               todo -= step;
+               done += step;
+       }
+
+       tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + done);
+       tx_ring->rd_p += done;
+
+       WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+                 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+                 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+       return done_all;
+}
+
+static bool
+nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
+                   struct nfp_net_tx_ring *tx_ring,
+                   struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
+                   unsigned int pkt_len, bool *completed)
+{
+       unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
+       unsigned int dma_len, type, cnt, dlen_type, tmp_dlen;
+       struct nfp_nfdk_tx_buf *txbuf;
+       struct nfp_nfdk_tx_desc *txd;
+       unsigned int n_descs;
+       dma_addr_t dma_addr;
+       int wr_idx;
+
+       /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+       if (pkt_len + dma_off > dma_map_sz)
+               return false;
+
+       /* Make sure there's still at least one block available after
+        * aligning to block boundary, so that the txds used below
+        * won't wrap around the tx_ring.
+        */
+       if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+               if (!*completed) {
+                       nfp_nfdk_xdp_complete(tx_ring);
+                       *completed = true;
+               }
+
+               if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+                       nfp_nfdk_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+                                        NULL);
+                       return false;
+               }
+       }
+
+       /* Check if cross block boundary */
+       n_descs = nfp_nfdk_headlen_to_segs(pkt_len);
+       if ((round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
+            round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT)) ||
+           ((u32)tx_ring->data_pending + pkt_len >
+            NFDK_TX_MAX_DATA_PER_BLOCK)) {
+               unsigned int nop_slots = D_BLOCK_CPL(tx_ring->wr_p);
+
+               wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+               txd = &tx_ring->ktxds[wr_idx];
+               memset(txd, 0,
+                      array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
+
+               tx_ring->data_pending = 0;
+               tx_ring->wr_p += nop_slots;
+               tx_ring->wr_ptr_add += nop_slots;
+       }
+
+       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+       txbuf = &tx_ring->ktxbufs[wr_idx];
+
+       txbuf[0].val = (unsigned long)rxbuf->frag | NFDK_TX_BUF_INFO_SOP;
+       txbuf[1].dma_addr = rxbuf->dma_addr;
+       /* Note: pkt len not stored */
+
+       dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
+                                  pkt_len, DMA_BIDIRECTIONAL);
+
+       /* Build TX descriptor */
+       txd = &tx_ring->ktxds[wr_idx];
+       dma_len = pkt_len;
+       dma_addr = rxbuf->dma_addr + dma_off;
+
+       if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+               type = NFDK_DESC_TX_TYPE_SIMPLE;
+       else
+               type = NFDK_DESC_TX_TYPE_GATHER;
+
+       /* FIELD_PREP() implicitly truncates to chunk */
+       dma_len -= 1;
+       dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+                   FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+       txd->dma_len_type = cpu_to_le16(dlen_type);
+       nfp_desc_set_dma_addr(txd, dma_addr);
+
+       tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+       dma_len -= tmp_dlen;
+       dma_addr += tmp_dlen + 1;
+       txd++;
+
+       while (dma_len > 0) {
+               dma_len -= 1;
+               dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+               txd->dma_len_type = cpu_to_le16(dlen_type);
+               nfp_desc_set_dma_addr(txd, dma_addr);
+
+               dlen_type &= NFDK_DESC_TX_DMA_LEN;
+               dma_len -= dlen_type;
+               dma_addr += dlen_type + 1;
+               txd++;
+       }
+
+       (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+       /* Metadata desc */
+       txd->raw = 0;
+       txd++;
+
+       cnt = txd - tx_ring->ktxds - wr_idx;
+       tx_ring->wr_p += cnt;
+       if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+               tx_ring->data_pending += pkt_len;
+       else
+               tx_ring->data_pending = 0;
+
+       tx_ring->wr_ptr_add += cnt;
+       return true;
+}
+
+/**
+ * nfp_nfdk_rx() - receive up to @budget packets on @rx_ring
+ * @rx_ring:   RX ring to receive from
+ * @budget:    NAPI budget
+ *
+ * Note, this function is separated out from the napi poll function to
+ * more cleanly separate packet receive code from other bookkeeping
+ * functions performed in the napi poll function.
+ *
+ * Return: Number of packets received.
+ */
+static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
+{
+       struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+       struct nfp_net_tx_ring *tx_ring;
+       struct bpf_prog *xdp_prog;
+       bool xdp_tx_cmpl = false;
+       unsigned int true_bufsz;
+       struct sk_buff *skb;
+       int pkts_polled = 0;
+       struct xdp_buff xdp;
+       int idx;
+
+       xdp_prog = READ_ONCE(dp->xdp_prog);
+       true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+       xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+                     &rx_ring->xdp_rxq);
+       tx_ring = r_vec->xdp_ring;
+
+       while (pkts_polled < budget) {
+               unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+               struct nfp_net_rx_buf *rxbuf;
+               struct nfp_net_rx_desc *rxd;
+               struct nfp_meta_parsed meta;
+               bool redir_egress = false;
+               struct net_device *netdev;
+               dma_addr_t new_dma_addr;
+               u32 meta_len_xdp = 0;
+               void *new_frag;
+
+               idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+               rxd = &rx_ring->rxds[idx];
+               if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+                       break;
+
+               /* Memory barrier to ensure that we won't do other reads
+                * before the DD bit.
+                */
+               dma_rmb();
+
+               memset(&meta, 0, sizeof(meta));
+
+               rx_ring->rd_p++;
+               pkts_polled++;
+
+               rxbuf = &rx_ring->rxbufs[idx];
+               /*         < meta_len >
+                *  <-- [rx_offset] -->
+                *  ---------------------------------------------------------
+                * | [XX] |  metadata  |             packet           | XXXX |
+                *  ---------------------------------------------------------
+                *         <---------------- data_len --------------->
+                *
+                * The rx_offset is fixed for all packets, the meta_len can vary
+                * on a packet by packet basis. If rx_offset is set to zero
+                * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+                * buffer and is immediately followed by the packet (no [XX]).
+                */
+               meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+               data_len = le16_to_cpu(rxd->rxd.data_len);
+               pkt_len = data_len - meta_len;
+
+               pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+               if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+                       pkt_off += meta_len;
+               else
+                       pkt_off += dp->rx_offset;
+               meta_off = pkt_off - meta_len;
+
+               /* Stats update */
+               u64_stats_update_begin(&r_vec->rx_sync);
+               r_vec->rx_pkts++;
+               r_vec->rx_bytes += pkt_len;
+               u64_stats_update_end(&r_vec->rx_sync);
+
+               if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+                            (dp->rx_offset && meta_len > dp->rx_offset))) {
+                       nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+                                  meta_len);
+                       nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+                       continue;
+               }
+
+               nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
+                                       data_len);
+
+               if (meta_len) {
+                       if (unlikely(nfp_nfdk_parse_meta(dp->netdev, &meta,
+                                                        rxbuf->frag + meta_off,
+                                                        rxbuf->frag + pkt_off,
+                                                        pkt_len, meta_len))) {
+                               nn_dp_warn(dp, "invalid RX packet metadata\n");
+                               nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
+                                                NULL);
+                               continue;
+                       }
+               }
+
+               if (xdp_prog && !meta.portid) {
+                       void *orig_data = rxbuf->frag + pkt_off;
+                       unsigned int dma_off;
+                       int act;
+
+                       xdp_prepare_buff(&xdp,
+                                        rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+                                        pkt_off - NFP_NET_RX_BUF_HEADROOM,
+                                        pkt_len, true);
+
+                       act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+                       pkt_len = xdp.data_end - xdp.data;
+                       pkt_off += xdp.data - orig_data;
+
+                       switch (act) {
+                       case XDP_PASS:
+                               meta_len_xdp = xdp.data - xdp.data_meta;
+                               break;
+                       case XDP_TX:
+                               dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
+                               if (unlikely(!nfp_nfdk_tx_xdp_buf(dp, rx_ring,
+                                                                 tx_ring,
+                                                                 rxbuf,
+                                                                 dma_off,
+                                                                 pkt_len,
+                                                                 &xdp_tx_cmpl)))
+                                       trace_xdp_exception(dp->netdev,
+                                                           xdp_prog, act);
+                               continue;
+                       default:
+                               bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+                               fallthrough;
+                       case XDP_ABORTED:
+                               trace_xdp_exception(dp->netdev, xdp_prog, act);
+                               fallthrough;
+                       case XDP_DROP:
+                               nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
+                                                    rxbuf->dma_addr);
+                               continue;
+                       }
+               }
+
+               if (likely(!meta.portid)) {
+                       netdev = dp->netdev;
+               } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+                       struct nfp_net *nn = netdev_priv(dp->netdev);
+
+                       nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+                                           pkt_len);
+                       nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
+                                            rxbuf->dma_addr);
+                       continue;
+               } else {
+                       struct nfp_net *nn;
+
+                       nn = netdev_priv(dp->netdev);
+                       netdev = nfp_app_dev_get(nn->app, meta.portid,
+                                                &redir_egress);
+                       if (unlikely(!netdev)) {
+                               nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
+                                                NULL);
+                               continue;
+                       }
+
+                       if (nfp_netdev_is_nfp_repr(netdev))
+                               nfp_repr_inc_rx_stats(netdev, pkt_len);
+               }
+
+               skb = build_skb(rxbuf->frag, true_bufsz);
+               if (unlikely(!skb)) {
+                       nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+                       continue;
+               }
+               new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
+               if (unlikely(!new_frag)) {
+                       nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+                       continue;
+               }
+
+               nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+               nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+               skb_reserve(skb, pkt_off);
+               skb_put(skb, pkt_len);
+
+               skb->mark = meta.mark;
+               skb_set_hash(skb, meta.hash, meta.hash_type);
+
+               skb_record_rx_queue(skb, rx_ring->idx);
+               skb->protocol = eth_type_trans(skb, netdev);
+
+               nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb);
+
+               if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                              le16_to_cpu(rxd->rxd.vlan));
+               if (meta_len_xdp)
+                       skb_metadata_set(skb, meta_len_xdp);
+
+               if (likely(!redir_egress)) {
+                       napi_gro_receive(&rx_ring->r_vec->napi, skb);
+               } else {
+                       skb->dev = netdev;
+                       skb_reset_network_header(skb);
+                       __skb_push(skb, ETH_HLEN);
+                       dev_queue_xmit(skb);
+               }
+       }
+
+       if (xdp_prog) {
+               if (tx_ring->wr_ptr_add)
+                       nfp_net_tx_xmit_more_flush(tx_ring);
+               else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+                        !xdp_tx_cmpl)
+                       if (!nfp_nfdk_xdp_complete(tx_ring))
+                               pkts_polled = budget;
+       }
+
+       return pkts_polled;
+}
+
+/**
+ * nfp_nfdk_poll() - napi poll function
+ * @napi:    NAPI structure
+ * @budget:  NAPI budget
+ *
+ * Return: number of packets polled.
+ */
+int nfp_nfdk_poll(struct napi_struct *napi, int budget)
+{
+       struct nfp_net_r_vector *r_vec =
+               container_of(napi, struct nfp_net_r_vector, napi);
+       unsigned int pkts_polled = 0;
+
+       if (r_vec->tx_ring)
+               nfp_nfdk_tx_complete(r_vec->tx_ring, budget);
+       if (r_vec->rx_ring)
+               pkts_polled = nfp_nfdk_rx(r_vec->rx_ring, budget);
+
+       if (pkts_polled < budget)
+               if (napi_complete_done(napi, pkts_polled))
+                       nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+
+       if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
+               struct dim_sample dim_sample = {};
+               unsigned int start;
+               u64 pkts, bytes;
+
+               do {
+                       start = u64_stats_fetch_begin(&r_vec->rx_sync);
+                       pkts = r_vec->rx_pkts;
+                       bytes = r_vec->rx_bytes;
+               } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+
+               dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+               net_dim(&r_vec->rx_dim, dim_sample);
+       }
+
+       if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
+               struct dim_sample dim_sample = {};
+               unsigned int start;
+               u64 pkts, bytes;
+
+               do {
+                       start = u64_stats_fetch_begin(&r_vec->tx_sync);
+                       pkts = r_vec->tx_pkts;
+                       bytes = r_vec->tx_bytes;
+               } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+
+               dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+               net_dim(&r_vec->tx_dim, dim_sample);
+       }
+
+       return pkts_polled;
+}
+
+/* Control device data path
+ */
+
+bool
+nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+                    struct sk_buff *skb, bool old)
+{
+       u32 cnt, tmp_dlen, dlen_type = 0;
+       struct nfp_net_tx_ring *tx_ring;
+       struct nfp_nfdk_tx_buf *txbuf;
+       struct nfp_nfdk_tx_desc *txd;
+       unsigned int dma_len, type;
+       struct nfp_net_dp *dp;
+       dma_addr_t dma_addr;
+       u64 metadata = 0;
+       int wr_idx;
+
+       dp = &r_vec->nfp_net->dp;
+       tx_ring = r_vec->tx_ring;
+
+       if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+               nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+               goto err_free;
+       }
+
+       /* Don't bother counting frags, assume the worst */
+       if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+               u64_stats_update_begin(&r_vec->tx_sync);
+               r_vec->tx_busy++;
+               u64_stats_update_end(&r_vec->tx_sync);
+               if (!old)
+                       __skb_queue_tail(&r_vec->queue, skb);
+               else
+                       __skb_queue_head(&r_vec->queue, skb);
+               return NETDEV_TX_BUSY;
+       }
+
+       if (nfp_app_ctrl_has_meta(nn->app)) {
+               if (unlikely(skb_headroom(skb) < 8)) {
+                       nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+                       goto err_free;
+               }
+               metadata = NFDK_DESC_TX_CHAIN_META;
+               put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+               put_unaligned_be32(FIELD_PREP(NFDK_META_LEN, 8) |
+                                  FIELD_PREP(NFDK_META_FIELDS,
+                                             NFP_NET_META_PORTID),
+                                  skb_push(skb, 4));
+       }
+
+       if (nfp_nfdk_tx_maybe_close_block(tx_ring, 0, skb))
+               goto err_free;
+
+       /* DMA map all */
+       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+       txd = &tx_ring->ktxds[wr_idx];
+       txbuf = &tx_ring->ktxbufs[wr_idx];
+
+       dma_len = skb_headlen(skb);
+       if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+               type = NFDK_DESC_TX_TYPE_SIMPLE;
+       else
+               type = NFDK_DESC_TX_TYPE_GATHER;
+
+       dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(dp->dev, dma_addr))
+               goto err_warn_dma;
+
+       txbuf->skb = skb;
+       txbuf++;
+
+       txbuf->dma_addr = dma_addr;
+       txbuf++;
+
+       dma_len -= 1;
+       dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+                   FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+       txd->dma_len_type = cpu_to_le16(dlen_type);
+       nfp_desc_set_dma_addr(txd, dma_addr);
+
+       tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+       dma_len -= tmp_dlen;
+       dma_addr += tmp_dlen + 1;
+       txd++;
+
+       while (dma_len > 0) {
+               dma_len -= 1;
+               dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+               txd->dma_len_type = cpu_to_le16(dlen_type);
+               nfp_desc_set_dma_addr(txd, dma_addr);
+
+               dlen_type &= NFDK_DESC_TX_DMA_LEN;
+               dma_len -= dlen_type;
+               dma_addr += dlen_type + 1;
+               txd++;
+       }
+
+       (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+       /* Metadata desc */
+       txd->raw = cpu_to_le64(metadata);
+       txd++;
+
+       cnt = txd - tx_ring->ktxds - wr_idx;
+       if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
+                    round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
+               goto err_warn_overflow;
+
+       tx_ring->wr_p += cnt;
+       if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+               tx_ring->data_pending += skb->len;
+       else
+               tx_ring->data_pending = 0;
+
+       tx_ring->wr_ptr_add += cnt;
+       nfp_net_tx_xmit_more_flush(tx_ring);
+
+       return NETDEV_TX_OK;
+
+err_warn_overflow:
+       WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
+                 wr_idx, skb_headlen(skb), 0, cnt);
+       txbuf--;
+       dma_unmap_single(dp->dev, txbuf->dma_addr,
+                        skb_headlen(skb), DMA_TO_DEVICE);
+       txbuf->raw = 0;
+err_warn_dma:
+       nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_free:
+       u64_stats_update_begin(&r_vec->tx_sync);
+       r_vec->tx_errors++;
+       u64_stats_update_end(&r_vec->tx_sync);
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+       struct sk_buff *skb;
+
+       while ((skb = __skb_dequeue(&r_vec->queue)))
+               if (nfp_nfdk_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+                       return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+       u32 meta_type, meta_tag;
+
+       if (!nfp_app_ctrl_has_meta(nn->app))
+               return !meta_len;
+
+       if (meta_len != 8)
+               return false;
+
+       meta_type = get_unaligned_be32(data);
+       meta_tag = get_unaligned_be32(data + 4);
+
+       return (meta_type == NFP_NET_META_PORTID &&
+               meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+               struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+       unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+       struct nfp_net_rx_buf *rxbuf;
+       struct nfp_net_rx_desc *rxd;
+       dma_addr_t new_dma_addr;
+       struct sk_buff *skb;
+       void *new_frag;
+       int idx;
+
+       idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+       rxd = &rx_ring->rxds[idx];
+       if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+               return false;
+
+       /* Memory barrier to ensure that we won't do other reads
+        * before the DD bit.
+        */
+       dma_rmb();
+
+       rx_ring->rd_p++;
+
+       rxbuf = &rx_ring->rxbufs[idx];
+       meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+       data_len = le16_to_cpu(rxd->rxd.data_len);
+       pkt_len = data_len - meta_len;
+
+       pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+       if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+               pkt_off += meta_len;
+       else
+               pkt_off += dp->rx_offset;
+       meta_off = pkt_off - meta_len;
+
+       /* Stats update */
+       u64_stats_update_begin(&r_vec->rx_sync);
+       r_vec->rx_pkts++;
+       r_vec->rx_bytes += pkt_len;
+       u64_stats_update_end(&r_vec->rx_sync);
+
+       nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
+
+       if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+               nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+                          meta_len);
+               nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+               return true;
+       }
+
+       skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+       if (unlikely(!skb)) {
+               nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+               return true;
+       }
+       new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
+       if (unlikely(!new_frag)) {
+               nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+               return true;
+       }
+
+       nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+       nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+       skb_reserve(skb, pkt_off);
+       skb_put(skb, pkt_len);
+
+       nfp_app_ctrl_rx(nn->app, skb);
+
+       return true;
+}
+
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+       struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+       struct nfp_net *nn = r_vec->nfp_net;
+       struct nfp_net_dp *dp = &nn->dp;
+       unsigned int budget = 512;
+
+       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
+               continue;
+
+       return budget;
+}
+
+void nfp_nfdk_ctrl_poll(struct tasklet_struct *t)
+{
+       struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+
+       spin_lock(&r_vec->lock);
+       nfp_nfdk_tx_complete(r_vec->tx_ring, 0);
+       __nfp_ctrl_tx_queued(r_vec);
+       spin_unlock(&r_vec->lock);
+
+       if (nfp_ctrl_rx(r_vec)) {
+               nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       } else {
+               tasklet_schedule(&r_vec->tasklet);
+               nn_dp_warn(&r_vec->nfp_net->dp,
+                          "control message budget exceeded!\n");
+       }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
new file mode 100644 (file)
index 0000000..c41e097
--- /dev/null
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DP_NFDK_H_
+#define _NFP_DP_NFDK_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+#define NFDK_TX_DESC_PER_SIMPLE_PKT    2
+
+#define NFDK_TX_MAX_DATA_PER_HEAD      SZ_4K
+#define NFDK_TX_MAX_DATA_PER_DESC      SZ_16K
+#define NFDK_TX_DESC_BLOCK_SZ          256
+#define NFDK_TX_DESC_BLOCK_CNT         (NFDK_TX_DESC_BLOCK_SZ /        \
+                                        sizeof(struct nfp_nfdk_tx_desc))
+#define NFDK_TX_DESC_STOP_CNT          (NFDK_TX_DESC_BLOCK_CNT *       \
+                                        NFDK_TX_DESC_PER_SIMPLE_PKT)
+#define NFDK_TX_MAX_DATA_PER_BLOCK     SZ_64K
+#define NFDK_TX_DESC_GATHER_MAX                17
+
+/* TX descriptor format */
+
+#define NFDK_DESC_TX_MSS_MASK          GENMASK(13, 0)
+
+#define NFDK_DESC_TX_CHAIN_META                BIT(3)
+#define NFDK_DESC_TX_ENCAP             BIT(2)
+#define NFDK_DESC_TX_L4_CSUM           BIT(1)
+#define NFDK_DESC_TX_L3_CSUM           BIT(0)
+
+#define NFDK_DESC_TX_DMA_LEN_HEAD      GENMASK(11, 0)
+#define NFDK_DESC_TX_TYPE_HEAD         GENMASK(15, 12)
+#define NFDK_DESC_TX_DMA_LEN           GENMASK(13, 0)
+#define NFDK_DESC_TX_TYPE_NOP          0
+#define NFDK_DESC_TX_TYPE_GATHER       1
+#define NFDK_DESC_TX_TYPE_TSO          2
+#define NFDK_DESC_TX_TYPE_SIMPLE       8
+#define NFDK_DESC_TX_EOP               BIT(14)
+
+#define NFDK_META_LEN                  GENMASK(7, 0)
+#define NFDK_META_FIELDS               GENMASK(31, 8)
+
+#define D_BLOCK_CPL(idx)               (NFDK_TX_DESC_BLOCK_CNT -       \
+                                        (idx) % NFDK_TX_DESC_BLOCK_CNT)
+
+struct nfp_nfdk_tx_desc {
+       union {
+               struct {
+                       u8 dma_addr_hi;  /* High bits of host buf address */
+                       u8 padding;  /* Must be zero */
+                       __le16 dma_len_type; /* Length to DMA for this desc */
+                       __le32 dma_addr_lo;  /* Low 32bit of host buf addr */
+               };
+
+               struct {
+                       __le16 mss;     /* MSS to be used for LSO */
+                       u8 lso_hdrlen;  /* LSO, TCP payload offset */
+                       u8 lso_totsegs; /* LSO, total segments */
+                       u8 l3_offset;   /* L3 header offset */
+                       u8 l4_offset;   /* L4 header offset */
+                       __le16 lso_meta_res; /* Rsvd bits in TSO metadata */
+               };
+
+               struct {
+                       u8 flags;       /* TX Flags, see @NFDK_DESC_TX_* */
+                       u8 reserved[7]; /* meta byte placeholder */
+               };
+
+               __le32 vals[2];
+               __le64 raw;
+       };
+};
+
+/* The device don't make use of the 2 or 3 least significant bits of the address
+ * due to alignment constraints. The driver can make use of those bits to carry
+ * information about the buffer before giving it to the device.
+ *
+ * NOTE: The driver must clear the lower bits before handing the buffer to the
+ * device.
+ *
+ * - NFDK_TX_BUF_INFO_SOP - Start of a packet
+ *   Mark the buffer as a start of a packet. This is used in the XDP TX process
+ *   to stash virtual and DMA address so that they can be recycled when the TX
+ *   operation is completed.
+ */
+#define NFDK_TX_BUF_PTR(val) ((val) & ~(sizeof(void *) - 1))
+#define NFDK_TX_BUF_INFO(val) ((val) & (sizeof(void *) - 1))
+#define NFDK_TX_BUF_INFO_SOP BIT(0)
+
+struct nfp_nfdk_tx_buf {
+       union {
+               /* First slot */
+               union {
+                       struct sk_buff *skb;
+                       void *frag;
+                       unsigned long val;
+               };
+
+               /* 1 + nr_frags next slots */
+               dma_addr_t dma_addr;
+
+               /* TSO (optional) */
+               struct {
+                       u32 pkt_cnt;
+                       u32 real_len;
+               };
+
+               u64 raw;
+       };
+};
+
+static inline int nfp_nfdk_headlen_to_segs(unsigned int headlen)
+{
+       /* First descriptor fits less data, so adjust for that */
+       return DIV_ROUND_UP(headlen +
+                           NFDK_TX_MAX_DATA_PER_DESC -
+                           NFDK_TX_MAX_DATA_PER_HEAD,
+                           NFDK_TX_MAX_DATA_PER_DESC);
+}
+
+int nfp_nfdk_poll(struct napi_struct *napi, int budget);
+netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev);
+bool
+nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+                    struct sk_buff *skb, bool old);
+void nfp_nfdk_ctrl_poll(struct tasklet_struct *t);
+void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+                                   struct nfp_net_rx_ring *rx_ring);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/rings.c b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
new file mode 100644 (file)
index 0000000..301f111
--- /dev/null
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/seq_file.h>
+
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "nfdk.h"
+
+static void
+nfp_nfdk_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+       struct device *dev = dp->dev;
+       struct netdev_queue *nd_q;
+
+       while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
+               const skb_frag_t *frag, *fend;
+               unsigned int size, n_descs = 1;
+               struct nfp_nfdk_tx_buf *txbuf;
+               int nr_frags, rd_idx;
+               struct sk_buff *skb;
+
+               rd_idx = D_IDX(tx_ring, tx_ring->rd_p);
+               txbuf = &tx_ring->ktxbufs[rd_idx];
+
+               skb = txbuf->skb;
+               if (!skb) {
+                       n_descs = D_BLOCK_CPL(tx_ring->rd_p);
+                       goto next;
+               }
+
+               nr_frags = skb_shinfo(skb)->nr_frags;
+               txbuf++;
+
+               /* Unmap head */
+               size = skb_headlen(skb);
+               dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
+               n_descs += nfp_nfdk_headlen_to_segs(size);
+               txbuf++;
+
+               frag = skb_shinfo(skb)->frags;
+               fend = frag + nr_frags;
+               for (; frag < fend; frag++) {
+                       size = skb_frag_size(frag);
+                       dma_unmap_page(dev, txbuf->dma_addr,
+                                      skb_frag_size(frag), DMA_TO_DEVICE);
+                       n_descs += DIV_ROUND_UP(size,
+                                               NFDK_TX_MAX_DATA_PER_DESC);
+                       txbuf++;
+               }
+
+               if (skb_is_gso(skb))
+                       n_descs++;
+
+               dev_kfree_skb_any(skb);
+next:
+               tx_ring->rd_p += n_descs;
+       }
+
+       memset(tx_ring->txds, 0, tx_ring->size);
+       tx_ring->data_pending = 0;
+       tx_ring->wr_p = 0;
+       tx_ring->rd_p = 0;
+       tx_ring->qcp_rd_p = 0;
+       tx_ring->wr_ptr_add = 0;
+
+       if (tx_ring->is_xdp || !dp->netdev)
+               return;
+
+       nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+       netdev_tx_reset_queue(nd_q);
+}
+
+static void nfp_nfdk_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
+{
+       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+       kvfree(tx_ring->ktxbufs);
+
+       if (tx_ring->ktxds)
+               dma_free_coherent(dp->dev, tx_ring->size,
+                                 tx_ring->ktxds, tx_ring->dma);
+
+       tx_ring->cnt = 0;
+       tx_ring->txbufs = NULL;
+       tx_ring->txds = NULL;
+       tx_ring->dma = 0;
+       tx_ring->size = 0;
+}
+
+static int
+nfp_nfdk_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+
+       tx_ring->cnt = dp->txd_cnt * NFDK_TX_DESC_PER_SIMPLE_PKT;
+       tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->ktxds));
+       tx_ring->ktxds = dma_alloc_coherent(dp->dev, tx_ring->size,
+                                           &tx_ring->dma,
+                                           GFP_KERNEL | __GFP_NOWARN);
+       if (!tx_ring->ktxds) {
+               netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+                           tx_ring->cnt);
+               goto err_alloc;
+       }
+
+       tx_ring->ktxbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->ktxbufs),
+                                   GFP_KERNEL);
+       if (!tx_ring->ktxbufs)
+               goto err_alloc;
+
+       if (!tx_ring->is_xdp && dp->netdev)
+               netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
+                                   tx_ring->idx);
+
+       return 0;
+
+err_alloc:
+       nfp_nfdk_tx_ring_free(tx_ring);
+       return -ENOMEM;
+}
+
+static void
+nfp_nfdk_tx_ring_bufs_free(struct nfp_net_dp *dp,
+                          struct nfp_net_tx_ring *tx_ring)
+{
+}
+
+static int
+nfp_nfdk_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+                           struct nfp_net_tx_ring *tx_ring)
+{
+       return 0;
+}
+
+static void
+nfp_nfdk_print_tx_descs(struct seq_file *file,
+                       struct nfp_net_r_vector *r_vec,
+                       struct nfp_net_tx_ring *tx_ring,
+                       u32 d_rd_p, u32 d_wr_p)
+{
+       struct nfp_nfdk_tx_desc *txd;
+       u32 txd_cnt = tx_ring->cnt;
+       int i;
+
+       for (i = 0; i < txd_cnt; i++) {
+               txd = &tx_ring->ktxds[i];
+
+               seq_printf(file, "%04d: 0x%08x 0x%08x 0x%016llx", i,
+                          txd->vals[0], txd->vals[1], tx_ring->ktxbufs[i].raw);
+
+               if (i == tx_ring->rd_p % txd_cnt)
+                       seq_puts(file, " H_RD");
+               if (i == tx_ring->wr_p % txd_cnt)
+                       seq_puts(file, " H_WR");
+               if (i == d_rd_p % txd_cnt)
+                       seq_puts(file, " D_RD");
+               if (i == d_wr_p % txd_cnt)
+                       seq_puts(file, " D_WR");
+
+               seq_putc(file, '\n');
+       }
+}
+
+#define NFP_NFDK_CFG_CTRL_SUPPORTED                                    \
+       (NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC |           \
+        NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC |                \
+        NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM |            \
+        NFP_NET_CFG_CTRL_RXVLAN |                                      \
+        NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO |               \
+        NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA |    \
+        NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_IRQMOD |           \
+        NFP_NET_CFG_CTRL_TXRWB |                                       \
+        NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE |              \
+        NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 |                 \
+        NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE |       \
+        NFP_NET_CFG_CTRL_LIVE_ADDR)
+
+const struct nfp_dp_ops nfp_nfdk_ops = {
+       .version                = NFP_NFD_VER_NFDK,
+       .tx_min_desc_per_pkt    = NFDK_TX_DESC_PER_SIMPLE_PKT,
+       .cap_mask               = NFP_NFDK_CFG_CTRL_SUPPORTED,
+       .poll                   = nfp_nfdk_poll,
+       .ctrl_poll              = nfp_nfdk_ctrl_poll,
+       .xmit                   = nfp_nfdk_tx,
+       .ctrl_tx_one            = nfp_nfdk_ctrl_tx_one,
+       .rx_ring_fill_freelist  = nfp_nfdk_rx_ring_fill_freelist,
+       .tx_ring_alloc          = nfp_nfdk_tx_ring_alloc,
+       .tx_ring_reset          = nfp_nfdk_tx_ring_reset,
+       .tx_ring_free           = nfp_nfdk_tx_ring_free,
+       .tx_ring_bufs_alloc     = nfp_nfdk_tx_ring_bufs_alloc,
+       .tx_ring_bufs_free      = nfp_nfdk_tx_ring_bufs_free,
+       .print_tx_descs         = nfp_nfdk_print_tx_descs
+};
index 3a97328..09f250e 100644 (file)
@@ -121,7 +121,7 @@ struct nfp_reprs *
 nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type)
 {
        return rcu_dereference_protected(app->reprs[type],
-                                        lockdep_is_held(&app->pf->lock));
+                                        nfp_app_is_locked(app));
 }
 
 struct nfp_reprs *
index 3e9baff..dd56207 100644 (file)
@@ -75,7 +75,7 @@ extern const struct nfp_app_type app_abm;
  * @bpf:       BPF ndo offload-related calls
  * @xdp_offload:    offload an XDP program
  * @eswitch_mode_get:    get SR-IOV eswitch mode
- * @eswitch_mode_set:    set SR-IOV eswitch mode (under pf->lock)
+ * @eswitch_mode_set:    set SR-IOV eswitch mode
  * @sriov_enable: app-specific sriov initialisation
  * @sriov_disable: app-specific sriov clean-up
  * @dev_get:   get representor or internal port representing netdev
@@ -174,6 +174,16 @@ struct nfp_app {
        void *priv;
 };
 
+static inline void assert_nfp_app_locked(struct nfp_app *app)
+{
+       devl_assert_locked(priv_to_devlink(app->pf));
+}
+
+static inline bool nfp_app_is_locked(struct nfp_app *app)
+{
+       return devl_lock_is_held(priv_to_devlink(app->pf));
+}
+
 void nfp_check_rhashtable_empty(void *ptr, void *arg);
 bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
index bea978d..405786c 100644 (file)
@@ -26,12 +26,11 @@ nfp_devlink_fill_eth_port(struct nfp_port *port,
 }
 
 static int
-nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf, unsigned int port_index,
+nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf,
+                                 struct devlink_port *dl_port,
                                  struct nfp_eth_table_port *copy)
 {
-       struct nfp_port *port;
-
-       port = nfp_port_from_id(pf, NFP_PORT_PHYS_PORT, port_index);
+       struct nfp_port *port = container_of(dl_port, struct nfp_port, dl_port);
 
        return nfp_devlink_fill_eth_port(port, copy);
 }
@@ -62,7 +61,7 @@ nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes)
 }
 
 static int
-nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
+nfp_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
                       unsigned int count, struct netlink_ext_ack *extack)
 {
        struct nfp_pf *pf = devlink_priv(devlink);
@@ -70,33 +69,25 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
        unsigned int lanes;
        int ret;
 
-       mutex_lock(&pf->lock);
-
        rtnl_lock();
-       ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+       ret = nfp_devlink_fill_eth_port_from_id(pf, port, &eth_port);
        rtnl_unlock();
        if (ret)
-               goto out;
+               return ret;
 
-       if (eth_port.port_lanes % count) {
-               ret = -EINVAL;
-               goto out;
-       }
+       if (eth_port.port_lanes % count)
+               return -EINVAL;
 
        /* Special case the 100G CXP -> 2x40G split */
        lanes = eth_port.port_lanes / count;
        if (eth_port.lanes == 10 && count == 2)
                lanes = 8 / count;
 
-       ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
-out:
-       mutex_unlock(&pf->lock);
-
-       return ret;
+       return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
 }
 
 static int
-nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
+nfp_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
                         struct netlink_ext_ack *extack)
 {
        struct nfp_pf *pf = devlink_priv(devlink);
@@ -104,29 +95,21 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
        unsigned int lanes;
        int ret;
 
-       mutex_lock(&pf->lock);
-
        rtnl_lock();
-       ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+       ret = nfp_devlink_fill_eth_port_from_id(pf, port, &eth_port);
        rtnl_unlock();
        if (ret)
-               goto out;
+               return ret;
 
-       if (!eth_port.is_split) {
-               ret = -EINVAL;
-               goto out;
-       }
+       if (!eth_port.is_split)
+               return -EINVAL;
 
        /* Special case the 100G CXP -> 2x40G unsplit */
        lanes = eth_port.port_lanes;
        if (eth_port.port_lanes == 8)
                lanes = 10;
 
-       ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
-out:
-       mutex_unlock(&pf->lock);
-
-       return ret;
+       return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
 }
 
 static int
@@ -161,13 +144,8 @@ static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
                                        struct netlink_ext_ack *extack)
 {
        struct nfp_pf *pf = devlink_priv(devlink);
-       int ret;
-
-       mutex_lock(&pf->lock);
-       ret = nfp_app_eswitch_mode_set(pf->app, mode);
-       mutex_unlock(&pf->lock);
 
-       return ret;
+       return nfp_app_eswitch_mode_set(pf->app, mode);
 }
 
 static const struct nfp_devlink_versions_simple {
@@ -375,12 +353,12 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
 
        devlink = priv_to_devlink(app->pf);
 
-       return devlink_port_register(devlink, &port->dl_port, port->eth_id);
+       return devl_port_register(devlink, &port->dl_port, port->eth_id);
 }
 
 void nfp_devlink_port_unregister(struct nfp_port *port)
 {
-       devlink_port_unregister(&port->dl_port);
+       devl_port_unregister(&port->dl_port);
 }
 
 void nfp_devlink_port_type_eth_set(struct nfp_port *port)
index bb3b8a7..eeda39e 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "nfpcore/nfp.h"
 #include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_dev.h"
 #include "nfpcore/nfp_nffw.h"
 #include "nfpcore/nfp_nsp.h"
 
 static const char nfp_driver_name[] = "nfp";
 
 static const struct pci_device_id nfp_pci_device_ids[] = {
-       { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
+       { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP3800,
          PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
-         PCI_ANY_ID, 0,
+         PCI_ANY_ID, 0, NFP_DEV_NFP3800,
+       },
+       { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000,
+         PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+         PCI_ANY_ID, 0, NFP_DEV_NFP6000,
        },
        { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000,
          PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
-         PCI_ANY_ID, 0,
+         PCI_ANY_ID, 0, NFP_DEV_NFP6000,
        },
-       { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000,
+       { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
          PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
-         PCI_ANY_ID, 0,
+         PCI_ANY_ID, 0, NFP_DEV_NFP6000,
        },
        { 0, } /* Required last entry. */
 };
@@ -222,6 +227,7 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
 {
 #ifdef CONFIG_PCI_IOV
        struct nfp_pf *pf = pci_get_drvdata(pdev);
+       struct devlink *devlink;
        int err;
 
        if (num_vfs > pf->limit_vfs) {
@@ -236,7 +242,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
                return err;
        }
 
-       mutex_lock(&pf->lock);
+       devlink = priv_to_devlink(pf);
+       devl_lock(devlink);
 
        err = nfp_app_sriov_enable(pf->app, num_vfs);
        if (err) {
@@ -250,11 +257,11 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
 
        dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
 
-       mutex_unlock(&pf->lock);
+       devl_unlock(devlink);
        return num_vfs;
 
 err_sriov_disable:
-       mutex_unlock(&pf->lock);
+       devl_unlock(devlink);
        pci_disable_sriov(pdev);
        return err;
 #endif
@@ -265,8 +272,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
 {
 #ifdef CONFIG_PCI_IOV
        struct nfp_pf *pf = pci_get_drvdata(pdev);
+       struct devlink *devlink;
 
-       mutex_lock(&pf->lock);
+       devlink = priv_to_devlink(pf);
+       devl_lock(devlink);
 
        /* If the VFs are assigned we cannot shut down SR-IOV without
         * causing issues, so just leave the hardware available but
@@ -274,7 +283,7 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
         */
        if (pci_vfs_assigned(pdev)) {
                dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
-               mutex_unlock(&pf->lock);
+               devl_unlock(devlink);
                return -EPERM;
        }
 
@@ -282,7 +291,7 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
 
        pf->num_vfs = 0;
 
-       mutex_unlock(&pf->lock);
+       devl_unlock(devlink);
 
        pci_disable_sriov(pdev);
        dev_dbg(&pdev->dev, "Removed VFs.\n");
@@ -667,6 +676,7 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf)
 static int nfp_pci_probe(struct pci_dev *pdev,
                         const struct pci_device_id *pci_id)
 {
+       const struct nfp_dev_info *dev_info;
        struct devlink *devlink;
        struct nfp_pf *pf;
        int err;
@@ -675,14 +685,15 @@ static int nfp_pci_probe(struct pci_dev *pdev,
            pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000_VF)
                dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n");
 
+       dev_info = &nfp_dev_info[pci_id->driver_data];
+
        err = pci_enable_device(pdev);
        if (err < 0)
                return err;
 
        pci_set_master(pdev);
 
-       err = dma_set_mask_and_coherent(&pdev->dev,
-                                       DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
+       err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask);
        if (err)
                goto err_pci_disable;
 
@@ -700,9 +711,9 @@ static int nfp_pci_probe(struct pci_dev *pdev,
        pf = devlink_priv(devlink);
        INIT_LIST_HEAD(&pf->vnics);
        INIT_LIST_HEAD(&pf->ports);
-       mutex_init(&pf->lock);
        pci_set_drvdata(pdev, pf);
        pf->pdev = pdev;
+       pf->dev_info = dev_info;
 
        pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
        if (!pf->wq) {
@@ -710,7 +721,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
                goto err_pci_priv_unset;
        }
 
-       pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
+       pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info);
        if (IS_ERR(pf->cpp)) {
                err = PTR_ERR(pf->cpp);
                goto err_disable_msix;
@@ -790,7 +801,6 @@ err_disable_msix:
        destroy_workqueue(pf->wq);
 err_pci_priv_unset:
        pci_set_drvdata(pdev, NULL);
-       mutex_destroy(&pf->lock);
        devlink_free(devlink);
 err_rel_regions:
        pci_release_regions(pdev);
@@ -827,7 +837,6 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
 
        kfree(pf->eth_tbl);
        kfree(pf->nspi);
-       mutex_destroy(&pf->lock);
        devlink_free(priv_to_devlink(pf));
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index a7dede9..f56ca11 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/list.h>
 #include <linux/types.h>
 #include <linux/msi.h>
-#include <linux/mutex.h>
 #include <linux/pci.h>
 #include <linux/workqueue.h>
 #include <net/devlink.h>
@@ -48,6 +47,7 @@ struct nfp_dumpspec {
 /**
  * struct nfp_pf - NFP PF-specific device structure
  * @pdev:              Backpointer to PCI device
+ * @dev_info:          NFP ASIC params
  * @cpp:               Pointer to the CPP handle
  * @app:               Pointer to the APP handle
  * @data_vnic_bar:     Pointer to the CPP area for the data vNICs' BARs
@@ -84,10 +84,12 @@ struct nfp_dumpspec {
  * @port_refresh_work: Work entry for taking netdevs out
  * @shared_bufs:       Array of shared buffer structures if FW has any SBs
  * @num_shared_bufs:   Number of elements in @shared_bufs
- * @lock:              Protects all fields which may change after probe
+ *
+ * Fields which may change after proble are protected by devlink instance lock.
  */
 struct nfp_pf {
        struct pci_dev *pdev;
+       const struct nfp_dev_info *dev_info;
 
        struct nfp_cpp *cpp;
 
@@ -139,8 +141,6 @@ struct nfp_pf {
 
        struct nfp_shared_buf *shared_bufs;
        unsigned int num_shared_bufs;
-
-       struct mutex lock;
 };
 
 extern struct pci_driver nfp_netvf_pci_driver;
index 437a197..428783b 100644 (file)
@@ -63,9 +63,6 @@
 #define NFP_NET_Q0_BAR         2
 #define NFP_NET_Q1_BAR         4       /* OBSOLETE */
 
-/* Max bits in DMA address */
-#define NFP_NET_MAX_DMA_BITS   40
-
 /* Default size for MTU and freelist buffer sizes */
 #define NFP_NET_DEFAULT_MTU            1500U
 
                                 NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
 #define NFP_NET_MAX_IRQS       (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
 
-#define NFP_NET_MIN_TX_DESCS   256     /* Min. # of Tx descs per ring */
-#define NFP_NET_MIN_RX_DESCS   256     /* Min. # of Rx descs per ring */
-#define NFP_NET_MAX_TX_DESCS   (256 * 1024) /* Max. # of Tx descs per ring */
-#define NFP_NET_MAX_RX_DESCS   (256 * 1024) /* Max. # of Rx descs per ring */
-
 #define NFP_NET_TX_DESCS_DEFAULT 4096  /* Default # of Tx descs per ring */
 #define NFP_NET_RX_DESCS_DEFAULT 4096  /* Default # of Rx descs per ring */
 
 
 /* Forward declarations */
 struct nfp_cpp;
+struct nfp_dev_info;
+struct nfp_dp_ops;
 struct nfp_eth_table_port;
 struct nfp_net;
 struct nfp_net_r_vector;
 struct nfp_port;
 struct xsk_buff_pool;
 
+struct nfp_nfd3_tx_desc;
+struct nfp_nfd3_tx_buf;
+
+struct nfp_nfdk_tx_desc;
+struct nfp_nfdk_tx_buf;
+
 /* Convenience macro for wrapping descriptor index on ring size */
 #define D_IDX(ring, idx)       ((idx) & ((ring)->cnt - 1))
 
@@ -124,97 +124,25 @@ struct xsk_buff_pool;
                __d->dma_addr_hi = upper_32_bits(__addr) & 0xff;        \
        } while (0)
 
-/* TX descriptor format */
-
-#define PCIE_DESC_TX_EOP               BIT(7)
-#define PCIE_DESC_TX_OFFSET_MASK       GENMASK(6, 0)
-#define PCIE_DESC_TX_MSS_MASK          GENMASK(13, 0)
-
-/* Flags in the host TX descriptor */
-#define PCIE_DESC_TX_CSUM              BIT(7)
-#define PCIE_DESC_TX_IP4_CSUM          BIT(6)
-#define PCIE_DESC_TX_TCP_CSUM          BIT(5)
-#define PCIE_DESC_TX_UDP_CSUM          BIT(4)
-#define PCIE_DESC_TX_VLAN              BIT(3)
-#define PCIE_DESC_TX_LSO               BIT(2)
-#define PCIE_DESC_TX_ENCAP             BIT(1)
-#define PCIE_DESC_TX_O_IP4_CSUM        BIT(0)
-
-struct nfp_net_tx_desc {
-       union {
-               struct {
-                       u8 dma_addr_hi; /* High bits of host buf address */
-                       __le16 dma_len; /* Length to DMA for this desc */
-                       u8 offset_eop;  /* Offset in buf where pkt starts +
-                                        * highest bit is eop flag.
-                                        */
-                       __le32 dma_addr_lo; /* Low 32bit of host buf addr */
-
-                       __le16 mss;     /* MSS to be used for LSO */
-                       u8 lso_hdrlen;  /* LSO, TCP payload offset */
-                       u8 flags;       /* TX Flags, see @PCIE_DESC_TX_* */
-                       union {
-                               struct {
-                                       u8 l3_offset; /* L3 header offset */
-                                       u8 l4_offset; /* L4 header offset */
-                               };
-                               __le16 vlan; /* VLAN tag to add if indicated */
-                       };
-                       __le16 data_len; /* Length of frame + meta data */
-               } __packed;
-               __le32 vals[4];
-               __le64 vals8[2];
-       };
-};
-
-/**
- * struct nfp_net_tx_buf - software TX buffer descriptor
- * @skb:       normal ring, sk_buff associated with this buffer
- * @frag:      XDP ring, page frag associated with this buffer
- * @xdp:       XSK buffer pool handle (for AF_XDP)
- * @dma_addr:  DMA mapping address of the buffer
- * @fidx:      Fragment index (-1 for the head and [0..nr_frags-1] for frags)
- * @pkt_cnt:   Number of packets to be produced out of the skb associated
- *             with this buffer (valid only on the head's buffer).
- *             Will be 1 for all non-TSO packets.
- * @is_xsk_tx: Flag if buffer is a RX buffer after a XDP_TX action and not a
- *             buffer from the TX queue (for AF_XDP).
- * @real_len:  Number of bytes which to be produced out of the skb (valid only
- *             on the head's buffer). Equal to skb->len for non-TSO packets.
- */
-struct nfp_net_tx_buf {
-       union {
-               struct sk_buff *skb;
-               void *frag;
-               struct xdp_buff *xdp;
-       };
-       dma_addr_t dma_addr;
-       union {
-               struct {
-                       short int fidx;
-                       u16 pkt_cnt;
-               };
-               struct {
-                       bool is_xsk_tx;
-               };
-       };
-       u32 real_len;
-};
-
 /**
  * struct nfp_net_tx_ring - TX ring structure
  * @r_vec:      Back pointer to ring vector structure
  * @idx:        Ring index from Linux's perspective
- * @qcidx:      Queue Controller Peripheral (QCP) queue index for the TX queue
+ * @data_pending: number of bytes added to current block (NFDK only)
  * @qcp_q:      Pointer to base of the QCP TX queue
+ * @txrwb:     TX pointer write back area
  * @cnt:        Size of the queue in number of descriptors
  * @wr_p:       TX ring write pointer (free running)
  * @rd_p:       TX ring read pointer (free running)
  * @qcp_rd_p:   Local copy of QCP TX queue read pointer
  * @wr_ptr_add:        Accumulated number of buffers to add to QCP write pointer
  *             (used for .xmit_more delayed kick)
- * @txbufs:     Array of transmitted TX buffers, to free on transmit
- * @txds:       Virtual address of TX ring in host memory
+ * @txbufs:    Array of transmitted TX buffers, to free on transmit (NFD3)
+ * @ktxbufs:   Array of transmitted TX buffers, to free on transmit (NFDK)
+ * @txds:      Virtual address of TX ring in host memory (NFD3)
+ * @ktxds:     Virtual address of TX ring in host memory (NFDK)
+ *
+ * @qcidx:      Queue Controller Peripheral (QCP) queue index for the TX queue
  * @dma:        DMA address of the TX ring
  * @size:       Size, in bytes, of the TX ring (needed to free)
  * @is_xdp:    Is this a XDP TX ring?
@@ -222,9 +150,10 @@ struct nfp_net_tx_buf {
 struct nfp_net_tx_ring {
        struct nfp_net_r_vector *r_vec;
 
-       u32 idx;
-       int qcidx;
+       u16 idx;
+       u16 data_pending;
        u8 __iomem *qcp_q;
+       u64 *txrwb;
 
        u32 cnt;
        u32 wr_p;
@@ -233,8 +162,17 @@ struct nfp_net_tx_ring {
 
        u32 wr_ptr_add;
 
-       struct nfp_net_tx_buf *txbufs;
-       struct nfp_net_tx_desc *txds;
+       union {
+               struct nfp_nfd3_tx_buf *txbufs;
+               struct nfp_nfdk_tx_buf *ktxbufs;
+       };
+       union {
+               struct nfp_nfd3_tx_desc *txds;
+               struct nfp_nfdk_tx_desc *ktxds;
+       };
+
+       /* Cold data follows */
+       int qcidx;
 
        dma_addr_t dma;
        size_t size;
@@ -486,13 +424,17 @@ struct nfp_net_fw_version {
        u8 minor;
        u8 major;
        u8 class;
-       u8 resv;
+
+       /* This byte can be exploited for more use, currently,
+        * BIT0: dp type, BIT[7:1]: reserved
+        */
+       u8 extend;
 } __packed;
 
 static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
-                                    u8 resv, u8 class, u8 major, u8 minor)
+                                    u8 extend, u8 class, u8 major, u8 minor)
 {
-       return fw_ver->resv == resv &&
+       return fw_ver->extend == extend &&
               fw_ver->class == class &&
               fw_ver->major == major &&
               fw_ver->minor == minor;
@@ -520,8 +462,11 @@ struct nfp_stat_pair {
  * @rx_rings:          Array of pre-allocated RX ring structures
  * @ctrl_bar:          Pointer to mapped control BAR
  *
- * @txd_cnt:           Size of the TX ring in number of descriptors
- * @rxd_cnt:           Size of the RX ring in number of descriptors
+ * @ops:               Callbacks and parameters for this vNIC's NFD version
+ * @txrwb:             TX pointer write back area (indexed by queue id)
+ * @txrwb_dma:         TX pointer write back area DMA address
+ * @txd_cnt:           Size of the TX ring in number of min size packets
+ * @rxd_cnt:           Size of the RX ring in number of min size packets
  * @num_r_vecs:                Number of used ring vectors
  * @num_tx_rings:      Currently configured number of TX rings
  * @num_stack_tx_rings:        Number of TX rings used by the stack (not XDP)
@@ -554,6 +499,11 @@ struct nfp_net_dp {
 
        /* Cold data follows */
 
+       const struct nfp_dp_ops *ops;
+
+       u64 *txrwb;
+       dma_addr_t txrwb_dma;
+
        unsigned int txd_cnt;
        unsigned int rxd_cnt;
 
@@ -571,6 +521,7 @@ struct nfp_net_dp {
 /**
  * struct nfp_net - NFP network device structure
  * @dp:                        Datapath structure
+ * @dev_info:          NFP ASIC params
  * @id:                        vNIC id within the PF (0 for VFs)
  * @fw_ver:            Firmware version
  * @cap:                Capabilities advertised by the Firmware
@@ -644,6 +595,7 @@ struct nfp_net_dp {
 struct nfp_net {
        struct nfp_net_dp dp;
 
+       const struct nfp_dev_info *dev_info;
        struct nfp_net_fw_version fw_ver;
 
        u32 id;
@@ -796,7 +748,6 @@ static inline void nn_pci_flush(struct nfp_net *nn)
  * either add to a pointer or to read the pointer value.
  */
 #define NFP_QCP_QUEUE_ADDR_SZ                  0x800
-#define NFP_QCP_QUEUE_AREA_SZ                  0x80000
 #define NFP_QCP_QUEUE_OFF(_x)                  ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
 #define NFP_QCP_QUEUE_ADD_RPTR                 0x0000
 #define NFP_QCP_QUEUE_ADD_WPTR                 0x0004
@@ -805,50 +756,21 @@ static inline void nn_pci_flush(struct nfp_net *nn)
 #define NFP_QCP_QUEUE_STS_HI                   0x000c
 #define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask     0x3ffff
 
-/* The offset of a QCP queues in the PCIe Target */
-#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
-
 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
 enum nfp_qcp_ptr {
        NFP_QCP_READ_PTR = 0,
        NFP_QCP_WRITE_PTR
 };
 
-/* There appear to be an *undocumented* upper limit on the value which
- * one can add to a queue and that value is either 0x3f or 0x7f.  We
- * go with 0x3f as a conservative measure.
- */
-#define NFP_QCP_MAX_ADD                                0x3f
-
-static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
-                                   enum nfp_qcp_ptr ptr, u32 val)
-{
-       u32 off;
-
-       if (ptr == NFP_QCP_READ_PTR)
-               off = NFP_QCP_QUEUE_ADD_RPTR;
-       else
-               off = NFP_QCP_QUEUE_ADD_WPTR;
-
-       while (val > NFP_QCP_MAX_ADD) {
-               writel(NFP_QCP_MAX_ADD, q + off);
-               val -= NFP_QCP_MAX_ADD;
-       }
-
-       writel(val, q + off);
-}
-
 /**
  * nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue
  *
  * @q:   Base address for queue structure
  * @val: Value to add to the queue pointer
- *
- * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
  */
 static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
 {
-       _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val);
+       writel(val, q + NFP_QCP_QUEUE_ADD_RPTR);
 }
 
 /**
@@ -856,12 +778,10 @@ static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
  *
  * @q:   Base address for queue structure
  * @val: Value to add to the queue pointer
- *
- * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
  */
 static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
 {
-       _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val);
+       writel(val, q + NFP_QCP_QUEUE_ADD_WPTR);
 }
 
 static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
@@ -904,6 +824,8 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
        return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
 }
 
+u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue);
+
 static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
 {
        WARN_ON_ONCE(!nn->dp.netdev && nn->port);
@@ -950,11 +872,13 @@ static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
 /* Globals */
 extern const char nfp_driver_version[];
 
-extern const struct net_device_ops nfp_net_netdev_ops;
+extern const struct net_device_ops nfp_nfd3_netdev_ops;
+extern const struct net_device_ops nfp_nfdk_netdev_ops;
 
 static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
 {
-       return netdev->netdev_ops == &nfp_net_netdev_ops;
+       return netdev->netdev_ops == &nfp_nfd3_netdev_ops ||
+              netdev->netdev_ops == &nfp_nfdk_netdev_ops;
 }
 
 static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
@@ -970,7 +894,8 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
                            void __iomem *ctrl_bar);
 
 struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
+             void __iomem *ctrl_bar, bool needs_netdev,
              unsigned int max_tx_rings, unsigned int max_rx_rings);
 void nfp_net_free(struct nfp_net *nn);
 
@@ -994,7 +919,6 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
 void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
 int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
 
-void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr);
 unsigned int
 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
                   unsigned int min_irqs, unsigned int want_irqs);
@@ -1002,19 +926,10 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
 void
 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
                    unsigned int n);
-
-void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring);
-void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget);
-
-bool
-nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
-                  void *data, void *pkt, unsigned int pkt_len, int meta_len);
-
-void nfp_net_rx_csum(const struct nfp_net_dp *dp,
-                    struct nfp_net_r_vector *r_vec,
-                    const struct nfp_net_rx_desc *rxd,
-                    const struct nfp_meta_parsed *meta,
-                    struct sk_buff *skb);
+struct sk_buff *
+nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+              struct sk_buff *skb, u64 *tls_handle, int *nr_frags);
+void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle);
 
 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
index 00a09b9..b412670 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
 
 /*
  * nfp_net_common.c
@@ -13,7 +13,6 @@
 
 #include <linux/bitfield.h>
 #include <linux/bpf.h>
-#include <linux/bpf_trace.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <net/vxlan.h>
 #include <net/xdp_sock_drv.h>
 
+#include "nfpcore/nfp_dev.h"
 #include "nfpcore/nfp_nsp.h"
 #include "ccm.h"
 #include "nfp_app.h"
 #include "nfp_net_ctrl.h"
 #include "nfp_net.h"
+#include "nfp_net_dp.h"
 #include "nfp_net_sriov.h"
 #include "nfp_net_xsk.h"
 #include "nfp_port.h"
@@ -65,33 +66,10 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
        put_unaligned_le32(reg, fw_ver);
 }
 
-static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
+u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue)
 {
-       return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
-                                   dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
-                                   dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-}
-
-static void
-nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
-{
-       dma_sync_single_for_device(dp->dev, dma_addr,
-                                  dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
-                                  dp->rx_dma_dir);
-}
-
-static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
-{
-       dma_unmap_single_attrs(dp->dev, dma_addr,
-                              dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
-                              dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-}
-
-static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
-                                   unsigned int len)
-{
-       dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
-                               len, dp->rx_dma_dir);
+       queue &= dev_info->qc_idx_mask;
+       return dev_info->qc_addr_offset + NFP_QCP_QUEUE_ADDR_SZ * queue;
 }
 
 /* Firmware reconfig
@@ -376,19 +354,6 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
 /* Interrupt configuration and handling
  */
 
-/**
- * nfp_net_irq_unmask() - Unmask automasked interrupt
- * @nn:       NFP Network structure
- * @entry_nr: MSI-X table entry
- *
- * Clear the ICR for the IRQ entry.
- */
-void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
-{
-       nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
-       nn_pci_flush(nn);
-}
-
 /**
  * nfp_net_irqs_alloc() - allocates MSI-X irqs
  * @pdev:        PCI device structure
@@ -570,49 +535,6 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-/**
- * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
- * @tx_ring:  TX ring structure
- * @r_vec:    IRQ vector servicing this ring
- * @idx:      Ring index
- * @is_xdp:   Is this an XDP TX ring?
- */
-static void
-nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
-                    struct nfp_net_r_vector *r_vec, unsigned int idx,
-                    bool is_xdp)
-{
-       struct nfp_net *nn = r_vec->nfp_net;
-
-       tx_ring->idx = idx;
-       tx_ring->r_vec = r_vec;
-       tx_ring->is_xdp = is_xdp;
-       u64_stats_init(&tx_ring->r_vec->tx_sync);
-
-       tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
-       tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
-}
-
-/**
- * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
- * @rx_ring:  RX ring structure
- * @r_vec:    IRQ vector servicing this ring
- * @idx:      Ring index
- */
-static void
-nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
-                    struct nfp_net_r_vector *r_vec, unsigned int idx)
-{
-       struct nfp_net *nn = r_vec->nfp_net;
-
-       rx_ring->idx = idx;
-       rx_ring->r_vec = r_vec;
-       u64_stats_init(&rx_ring->r_vec->rx_sync);
-
-       rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
-       rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
-}
-
 /**
  * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
  * @nn:                NFP Network structure
@@ -660,178 +582,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
        free_irq(nn->irq_entries[vector_idx].vector, nn);
 }
 
-/* Transmit
- *
- * One queue controller peripheral queue is used for transmit.  The
- * driver en-queues packets for transmit by advancing the write
- * pointer.  The device indicates that packets have transmitted by
- * advancing the read pointer.  The driver maintains a local copy of
- * the read and write pointer in @struct nfp_net_tx_ring.  The driver
- * keeps @wr_p in sync with the queue controller write pointer and can
- * determine how many packets have been transmitted by comparing its
- * copy of the read pointer @rd_p with the read pointer maintained by
- * the queue controller peripheral.
- */
-
-/**
- * nfp_net_tx_full() - Check if the TX ring is full
- * @tx_ring: TX ring to check
- * @dcnt:    Number of descriptors that need to be enqueued (must be >= 1)
- *
- * This function checks, based on the *host copy* of read/write
- * pointer if a given TX ring is full.  The real TX queue may have
- * some newly made available slots.
- *
- * Return: True if the ring is full.
- */
-static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
-{
-       return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
-}
-
-/* Wrappers for deciding when to stop and restart TX queues */
-static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
-{
-       return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
-}
-
-static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
-{
-       return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
-}
-
-/**
- * nfp_net_tx_ring_stop() - stop tx ring
- * @nd_q:    netdev queue
- * @tx_ring: driver tx queue structure
- *
- * Safely stop TX ring.  Remember that while we are running .start_xmit()
- * someone else may be cleaning the TX ring completions so we need to be
- * extra careful here.
- */
-static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
-                                struct nfp_net_tx_ring *tx_ring)
-{
-       netif_tx_stop_queue(nd_q);
-
-       /* We can race with the TX completion out of NAPI so recheck */
-       smp_mb();
-       if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
-               netif_tx_start_queue(nd_q);
-}
-
-/**
- * nfp_net_tx_tso() - Set up Tx descriptor for LSO
- * @r_vec: per-ring structure
- * @txbuf: Pointer to driver soft TX descriptor
- * @txd: Pointer to HW TX descriptor
- * @skb: Pointer to SKB
- * @md_bytes: Prepend length
- *
- * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
- * Return error on packet header greater than maximum supported LSO header size.
- */
-static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
-                          struct nfp_net_tx_buf *txbuf,
-                          struct nfp_net_tx_desc *txd, struct sk_buff *skb,
-                          u32 md_bytes)
-{
-       u32 l3_offset, l4_offset, hdrlen;
-       u16 mss;
-
-       if (!skb_is_gso(skb))
-               return;
-
-       if (!skb->encapsulation) {
-               l3_offset = skb_network_offset(skb);
-               l4_offset = skb_transport_offset(skb);
-               hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
-       } else {
-               l3_offset = skb_inner_network_offset(skb);
-               l4_offset = skb_inner_transport_offset(skb);
-               hdrlen = skb_inner_transport_header(skb) - skb->data +
-                       inner_tcp_hdrlen(skb);
-       }
-
-       txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
-       txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
-
-       mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
-       txd->l3_offset = l3_offset - md_bytes;
-       txd->l4_offset = l4_offset - md_bytes;
-       txd->lso_hdrlen = hdrlen - md_bytes;
-       txd->mss = cpu_to_le16(mss);
-       txd->flags |= PCIE_DESC_TX_LSO;
-
-       u64_stats_update_begin(&r_vec->tx_sync);
-       r_vec->tx_lso++;
-       u64_stats_update_end(&r_vec->tx_sync);
-}
-
-/**
- * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
- * @dp:  NFP Net data path struct
- * @r_vec: per-ring structure
- * @txbuf: Pointer to driver soft TX descriptor
- * @txd: Pointer to TX descriptor
- * @skb: Pointer to SKB
- *
- * This function sets the TX checksum flags in the TX descriptor based
- * on the configuration and the protocol of the packet to be transmitted.
- */
-static void nfp_net_tx_csum(struct nfp_net_dp *dp,
-                           struct nfp_net_r_vector *r_vec,
-                           struct nfp_net_tx_buf *txbuf,
-                           struct nfp_net_tx_desc *txd, struct sk_buff *skb)
-{
-       struct ipv6hdr *ipv6h;
-       struct iphdr *iph;
-       u8 l4_hdr;
-
-       if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
-               return;
-
-       if (skb->ip_summed != CHECKSUM_PARTIAL)
-               return;
-
-       txd->flags |= PCIE_DESC_TX_CSUM;
-       if (skb->encapsulation)
-               txd->flags |= PCIE_DESC_TX_ENCAP;
-
-       iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
-       ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
-
-       if (iph->version == 4) {
-               txd->flags |= PCIE_DESC_TX_IP4_CSUM;
-               l4_hdr = iph->protocol;
-       } else if (ipv6h->version == 6) {
-               l4_hdr = ipv6h->nexthdr;
-       } else {
-               nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
-               return;
-       }
-
-       switch (l4_hdr) {
-       case IPPROTO_TCP:
-               txd->flags |= PCIE_DESC_TX_TCP_CSUM;
-               break;
-       case IPPROTO_UDP:
-               txd->flags |= PCIE_DESC_TX_UDP_CSUM;
-               break;
-       default:
-               nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
-               return;
-       }
-
-       u64_stats_update_begin(&r_vec->tx_sync);
-       if (skb->encapsulation)
-               r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
-       else
-               r_vec->hw_csum_tx += txbuf->pkt_cnt;
-       u64_stats_update_end(&r_vec->tx_sync);
-}
-
-static struct sk_buff *
+struct sk_buff *
 nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
               struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
 {
@@ -882,1498 +633,93 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
                /* jump forward, a TX may have gotten lost, need to sync TX */
                if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
                        tls_offload_tx_resync_request(nskb->sk, seq,
-                                                     ntls->next_seq);
-
-               *nr_frags = 0;
-               return nskb;
-       }
-
-       if (datalen) {
-               u64_stats_update_begin(&r_vec->tx_sync);
-               if (!skb_is_gso(skb))
-                       r_vec->hw_tls_tx++;
-               else
-                       r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
-               u64_stats_update_end(&r_vec->tx_sync);
-       }
-
-       memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
-       ntls->next_seq += datalen;
-#endif
-       return skb;
-}
-
-static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
-{
-#ifdef CONFIG_TLS_DEVICE
-       struct nfp_net_tls_offload_ctx *ntls;
-       u32 datalen, seq;
-
-       if (!tls_handle)
-               return;
-       if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
-               return;
-
-       datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
-       seq = ntohl(tcp_hdr(skb)->seq);
-
-       ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
-       if (ntls->next_seq == seq + datalen)
-               ntls->next_seq = seq;
-       else
-               WARN_ON_ONCE(1);
-#endif
-}
-
-void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
-{
-       wmb();
-       nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
-       tx_ring->wr_ptr_add = 0;
-}
-
-static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
-{
-       struct metadata_dst *md_dst = skb_metadata_dst(skb);
-       unsigned char *data;
-       u32 meta_id = 0;
-       int md_bytes;
-
-       if (likely(!md_dst && !tls_handle))
-               return 0;
-       if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
-               if (!tls_handle)
-                       return 0;
-               md_dst = NULL;
-       }
-
-       md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
-
-       if (unlikely(skb_cow_head(skb, md_bytes)))
-               return -ENOMEM;
-
-       meta_id = 0;
-       data = skb_push(skb, md_bytes) + md_bytes;
-       if (md_dst) {
-               data -= 4;
-               put_unaligned_be32(md_dst->u.port_info.port_id, data);
-               meta_id = NFP_NET_META_PORTID;
-       }
-       if (tls_handle) {
-               /* conn handle is opaque, we just use u64 to be able to quickly
-                * compare it to zero
-                */
-               data -= 8;
-               memcpy(data, &tls_handle, sizeof(tls_handle));
-               meta_id <<= NFP_NET_META_FIELD_SIZE;
-               meta_id |= NFP_NET_META_CONN_HANDLE;
-       }
-
-       data -= 4;
-       put_unaligned_be32(meta_id, data);
-
-       return md_bytes;
-}
-
-/**
- * nfp_net_tx() - Main transmit entry point
- * @skb:    SKB to transmit
- * @netdev: netdev structure
- *
- * Return: NETDEV_TX_OK on success.
- */
-static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
-{
-       struct nfp_net *nn = netdev_priv(netdev);
-       const skb_frag_t *frag;
-       int f, nr_frags, wr_idx, md_bytes;
-       struct nfp_net_tx_ring *tx_ring;
-       struct nfp_net_r_vector *r_vec;
-       struct nfp_net_tx_buf *txbuf;
-       struct nfp_net_tx_desc *txd;
-       struct netdev_queue *nd_q;
-       struct nfp_net_dp *dp;
-       dma_addr_t dma_addr;
-       unsigned int fsize;
-       u64 tls_handle = 0;
-       u16 qidx;
-
-       dp = &nn->dp;
-       qidx = skb_get_queue_mapping(skb);
-       tx_ring = &dp->tx_rings[qidx];
-       r_vec = tx_ring->r_vec;
-
-       nr_frags = skb_shinfo(skb)->nr_frags;
-
-       if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
-               nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
-                          qidx, tx_ring->wr_p, tx_ring->rd_p);
-               nd_q = netdev_get_tx_queue(dp->netdev, qidx);
-               netif_tx_stop_queue(nd_q);
-               nfp_net_tx_xmit_more_flush(tx_ring);
-               u64_stats_update_begin(&r_vec->tx_sync);
-               r_vec->tx_busy++;
-               u64_stats_update_end(&r_vec->tx_sync);
-               return NETDEV_TX_BUSY;
-       }
-
-       skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
-       if (unlikely(!skb)) {
-               nfp_net_tx_xmit_more_flush(tx_ring);
-               return NETDEV_TX_OK;
-       }
-
-       md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
-       if (unlikely(md_bytes < 0))
-               goto err_flush;
-
-       /* Start with the head skbuf */
-       dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
-                                 DMA_TO_DEVICE);
-       if (dma_mapping_error(dp->dev, dma_addr))
-               goto err_dma_err;
-
-       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
-       /* Stash the soft descriptor of the head then initialize it */
-       txbuf = &tx_ring->txbufs[wr_idx];
-       txbuf->skb = skb;
-       txbuf->dma_addr = dma_addr;
-       txbuf->fidx = -1;
-       txbuf->pkt_cnt = 1;
-       txbuf->real_len = skb->len;
-
-       /* Build TX descriptor */
-       txd = &tx_ring->txds[wr_idx];
-       txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
-       txd->dma_len = cpu_to_le16(skb_headlen(skb));
-       nfp_desc_set_dma_addr(txd, dma_addr);
-       txd->data_len = cpu_to_le16(skb->len);
-
-       txd->flags = 0;
-       txd->mss = 0;
-       txd->lso_hdrlen = 0;
-
-       /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
-       nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
-       nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
-       if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
-               txd->flags |= PCIE_DESC_TX_VLAN;
-               txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
-       }
-
-       /* Gather DMA */
-       if (nr_frags > 0) {
-               __le64 second_half;
-
-               /* all descs must match except for in addr, length and eop */
-               second_half = txd->vals8[1];
-
-               for (f = 0; f < nr_frags; f++) {
-                       frag = &skb_shinfo(skb)->frags[f];
-                       fsize = skb_frag_size(frag);
-
-                       dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
-                                                   fsize, DMA_TO_DEVICE);
-                       if (dma_mapping_error(dp->dev, dma_addr))
-                               goto err_unmap;
-
-                       wr_idx = D_IDX(tx_ring, wr_idx + 1);
-                       tx_ring->txbufs[wr_idx].skb = skb;
-                       tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
-                       tx_ring->txbufs[wr_idx].fidx = f;
-
-                       txd = &tx_ring->txds[wr_idx];
-                       txd->dma_len = cpu_to_le16(fsize);
-                       nfp_desc_set_dma_addr(txd, dma_addr);
-                       txd->offset_eop = md_bytes |
-                               ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0);
-                       txd->vals8[1] = second_half;
-               }
-
-               u64_stats_update_begin(&r_vec->tx_sync);
-               r_vec->tx_gather++;
-               u64_stats_update_end(&r_vec->tx_sync);
-       }
-
-       skb_tx_timestamp(skb);
-
-       nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
-
-       tx_ring->wr_p += nr_frags + 1;
-       if (nfp_net_tx_ring_should_stop(tx_ring))
-               nfp_net_tx_ring_stop(nd_q, tx_ring);
-
-       tx_ring->wr_ptr_add += nr_frags + 1;
-       if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
-               nfp_net_tx_xmit_more_flush(tx_ring);
-
-       return NETDEV_TX_OK;
-
-err_unmap:
-       while (--f >= 0) {
-               frag = &skb_shinfo(skb)->frags[f];
-               dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
-                              skb_frag_size(frag), DMA_TO_DEVICE);
-               tx_ring->txbufs[wr_idx].skb = NULL;
-               tx_ring->txbufs[wr_idx].dma_addr = 0;
-               tx_ring->txbufs[wr_idx].fidx = -2;
-               wr_idx = wr_idx - 1;
-               if (wr_idx < 0)
-                       wr_idx += tx_ring->cnt;
-       }
-       dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
-                        skb_headlen(skb), DMA_TO_DEVICE);
-       tx_ring->txbufs[wr_idx].skb = NULL;
-       tx_ring->txbufs[wr_idx].dma_addr = 0;
-       tx_ring->txbufs[wr_idx].fidx = -2;
-err_dma_err:
-       nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
-err_flush:
-       nfp_net_tx_xmit_more_flush(tx_ring);
-       u64_stats_update_begin(&r_vec->tx_sync);
-       r_vec->tx_errors++;
-       u64_stats_update_end(&r_vec->tx_sync);
-       nfp_net_tls_tx_undo(skb, tls_handle);
-       dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
-}
-
-/**
- * nfp_net_tx_complete() - Handled completed TX packets
- * @tx_ring:   TX ring structure
- * @budget:    NAPI budget (only used as bool to determine if in NAPI context)
- */
-void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
-{
-       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-       struct netdev_queue *nd_q;
-       u32 done_pkts = 0, done_bytes = 0;
-       u32 qcp_rd_p;
-       int todo;
-
-       if (tx_ring->wr_p == tx_ring->rd_p)
-               return;
-
-       /* Work out how many descriptors have been transmitted */
-       qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
-
-       if (qcp_rd_p == tx_ring->qcp_rd_p)
-               return;
-
-       todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
-
-       while (todo--) {
-               const skb_frag_t *frag;
-               struct nfp_net_tx_buf *tx_buf;
-               struct sk_buff *skb;
-               int fidx, nr_frags;
-               int idx;
-
-               idx = D_IDX(tx_ring, tx_ring->rd_p++);
-               tx_buf = &tx_ring->txbufs[idx];
-
-               skb = tx_buf->skb;
-               if (!skb)
-                       continue;
-
-               nr_frags = skb_shinfo(skb)->nr_frags;
-               fidx = tx_buf->fidx;
-
-               if (fidx == -1) {
-                       /* unmap head */
-                       dma_unmap_single(dp->dev, tx_buf->dma_addr,
-                                        skb_headlen(skb), DMA_TO_DEVICE);
-
-                       done_pkts += tx_buf->pkt_cnt;
-                       done_bytes += tx_buf->real_len;
-               } else {
-                       /* unmap fragment */
-                       frag = &skb_shinfo(skb)->frags[fidx];
-                       dma_unmap_page(dp->dev, tx_buf->dma_addr,
-                                      skb_frag_size(frag), DMA_TO_DEVICE);
-               }
-
-               /* check for last gather fragment */
-               if (fidx == nr_frags - 1)
-                       napi_consume_skb(skb, budget);
-
-               tx_buf->dma_addr = 0;
-               tx_buf->skb = NULL;
-               tx_buf->fidx = -2;
-       }
-
-       tx_ring->qcp_rd_p = qcp_rd_p;
-
-       u64_stats_update_begin(&r_vec->tx_sync);
-       r_vec->tx_bytes += done_bytes;
-       r_vec->tx_pkts += done_pkts;
-       u64_stats_update_end(&r_vec->tx_sync);
-
-       if (!dp->netdev)
-               return;
-
-       nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
-       netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
-       if (nfp_net_tx_ring_should_wake(tx_ring)) {
-               /* Make sure TX thread will see updated tx_ring->rd_p */
-               smp_mb();
-
-               if (unlikely(netif_tx_queue_stopped(nd_q)))
-                       netif_tx_wake_queue(nd_q);
-       }
-
-       WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
-                 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
-                 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
-}
-
-static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
-{
-       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-       u32 done_pkts = 0, done_bytes = 0;
-       bool done_all;
-       int idx, todo;
-       u32 qcp_rd_p;
-
-       /* Work out how many descriptors have been transmitted */
-       qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
-
-       if (qcp_rd_p == tx_ring->qcp_rd_p)
-               return true;
-
-       todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
-
-       done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
-       todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
-
-       tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
-
-       done_pkts = todo;
-       while (todo--) {
-               idx = D_IDX(tx_ring, tx_ring->rd_p);
-               tx_ring->rd_p++;
-
-               done_bytes += tx_ring->txbufs[idx].real_len;
-       }
-
-       u64_stats_update_begin(&r_vec->tx_sync);
-       r_vec->tx_bytes += done_bytes;
-       r_vec->tx_pkts += done_pkts;
-       u64_stats_update_end(&r_vec->tx_sync);
-
-       WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
-                 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
-                 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
-
-       return done_all;
-}
-
-/**
- * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
- * @dp:                NFP Net data path struct
- * @tx_ring:   TX ring structure
- *
- * Assumes that the device is stopped, must be idempotent.
- */
-static void
-nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
-{
-       const skb_frag_t *frag;
-       struct netdev_queue *nd_q;
-
-       while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
-               struct nfp_net_tx_buf *tx_buf;
-               struct sk_buff *skb;
-               int idx, nr_frags;
-
-               idx = D_IDX(tx_ring, tx_ring->rd_p);
-               tx_buf = &tx_ring->txbufs[idx];
-
-               skb = tx_ring->txbufs[idx].skb;
-               nr_frags = skb_shinfo(skb)->nr_frags;
-
-               if (tx_buf->fidx == -1) {
-                       /* unmap head */
-                       dma_unmap_single(dp->dev, tx_buf->dma_addr,
-                                        skb_headlen(skb), DMA_TO_DEVICE);
-               } else {
-                       /* unmap fragment */
-                       frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
-                       dma_unmap_page(dp->dev, tx_buf->dma_addr,
-                                      skb_frag_size(frag), DMA_TO_DEVICE);
-               }
-
-               /* check for last gather fragment */
-               if (tx_buf->fidx == nr_frags - 1)
-                       dev_kfree_skb_any(skb);
-
-               tx_buf->dma_addr = 0;
-               tx_buf->skb = NULL;
-               tx_buf->fidx = -2;
-
-               tx_ring->qcp_rd_p++;
-               tx_ring->rd_p++;
-       }
-
-       if (tx_ring->is_xdp)
-               nfp_net_xsk_tx_bufs_free(tx_ring);
-
-       memset(tx_ring->txds, 0, tx_ring->size);
-       tx_ring->wr_p = 0;
-       tx_ring->rd_p = 0;
-       tx_ring->qcp_rd_p = 0;
-       tx_ring->wr_ptr_add = 0;
-
-       if (tx_ring->is_xdp || !dp->netdev)
-               return;
-
-       nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
-       netdev_tx_reset_queue(nd_q);
-}
-
-static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
-{
-       struct nfp_net *nn = netdev_priv(netdev);
-
-       nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
-}
-
-/* Receive processing
- */
-static unsigned int
-nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
-{
-       unsigned int fl_bufsz = 0;
-
-       if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
-               fl_bufsz += NFP_NET_MAX_PREPEND;
-       else
-               fl_bufsz += dp->rx_offset;
-       fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
-
-       return fl_bufsz;
-}
-
-static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
-{
-       unsigned int fl_bufsz;
-
-       fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
-       fl_bufsz += dp->rx_dma_off;
-       fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
-
-       fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
-       fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
-       return fl_bufsz;
-}
-
-static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
-{
-       unsigned int fl_bufsz;
-
-       fl_bufsz = XDP_PACKET_HEADROOM;
-       fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
-
-       return fl_bufsz;
-}
-
-static void
-nfp_net_free_frag(void *frag, bool xdp)
-{
-       if (!xdp)
-               skb_free_frag(frag);
-       else
-               __free_page(virt_to_page(frag));
-}
-
-/**
- * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
- * @dp:                NFP Net data path struct
- * @dma_addr:  Pointer to storage for DMA address (output param)
- *
- * This function will allcate a new page frag, map it for DMA.
- *
- * Return: allocated page frag or NULL on failure.
- */
-static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
-{
-       void *frag;
-
-       if (!dp->xdp_prog) {
-               frag = netdev_alloc_frag(dp->fl_bufsz);
-       } else {
-               struct page *page;
-
-               page = alloc_page(GFP_KERNEL);
-               frag = page ? page_address(page) : NULL;
-       }
-       if (!frag) {
-               nn_dp_warn(dp, "Failed to alloc receive page frag\n");
-               return NULL;
-       }
-
-       *dma_addr = nfp_net_dma_map_rx(dp, frag);
-       if (dma_mapping_error(dp->dev, *dma_addr)) {
-               nfp_net_free_frag(frag, dp->xdp_prog);
-               nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
-               return NULL;
-       }
-
-       return frag;
-}
-
-static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
-{
-       void *frag;
-
-       if (!dp->xdp_prog) {
-               frag = napi_alloc_frag(dp->fl_bufsz);
-               if (unlikely(!frag))
-                       return NULL;
-       } else {
-               struct page *page;
-
-               page = dev_alloc_page();
-               if (unlikely(!page))
-                       return NULL;
-               frag = page_address(page);
-       }
-
-       *dma_addr = nfp_net_dma_map_rx(dp, frag);
-       if (dma_mapping_error(dp->dev, *dma_addr)) {
-               nfp_net_free_frag(frag, dp->xdp_prog);
-               nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
-               return NULL;
-       }
-
-       return frag;
-}
-
-/**
- * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
- * @dp:                NFP Net data path struct
- * @rx_ring:   RX ring structure
- * @frag:      page fragment buffer
- * @dma_addr:  DMA address of skb mapping
- */
-static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
-                               struct nfp_net_rx_ring *rx_ring,
-                               void *frag, dma_addr_t dma_addr)
-{
-       unsigned int wr_idx;
-
-       wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
-
-       nfp_net_dma_sync_dev_rx(dp, dma_addr);
-
-       /* Stash SKB and DMA address away */
-       rx_ring->rxbufs[wr_idx].frag = frag;
-       rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
-
-       /* Fill freelist descriptor */
-       rx_ring->rxds[wr_idx].fld.reserved = 0;
-       rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
-       nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
-                             dma_addr + dp->rx_dma_off);
-
-       rx_ring->wr_p++;
-       if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
-               /* Update write pointer of the freelist queue. Make
-                * sure all writes are flushed before telling the hardware.
-                */
-               wmb();
-               nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
-       }
-}
-
-/**
- * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
- * @rx_ring:   RX ring structure
- *
- * Assumes that the device is stopped, must be idempotent.
- */
-static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
-{
-       unsigned int wr_idx, last_idx;
-
-       /* wr_p == rd_p means ring was never fed FL bufs.  RX rings are always
-        * kept at cnt - 1 FL bufs.
-        */
-       if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
-               return;
-
-       /* Move the empty entry to the end of the list */
-       wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
-       last_idx = rx_ring->cnt - 1;
-       if (rx_ring->r_vec->xsk_pool) {
-               rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
-               memset(&rx_ring->xsk_rxbufs[last_idx], 0,
-                      sizeof(*rx_ring->xsk_rxbufs));
-       } else {
-               rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
-               memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
-       }
-
-       memset(rx_ring->rxds, 0, rx_ring->size);
-       rx_ring->wr_p = 0;
-       rx_ring->rd_p = 0;
-}
-
-/**
- * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
- * @dp:                NFP Net data path struct
- * @rx_ring:   RX ring to remove buffers from
- *
- * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
- * entries.  After device is disabled nfp_net_rx_ring_reset() must be called
- * to restore required ring geometry.
- */
-static void
-nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
-                         struct nfp_net_rx_ring *rx_ring)
-{
-       unsigned int i;
-
-       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
-               return;
-
-       for (i = 0; i < rx_ring->cnt - 1; i++) {
-               /* NULL skb can only happen when initial filling of the ring
-                * fails to allocate enough buffers and calls here to free
-                * already allocated ones.
-                */
-               if (!rx_ring->rxbufs[i].frag)
-                       continue;
-
-               nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
-               nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
-               rx_ring->rxbufs[i].dma_addr = 0;
-               rx_ring->rxbufs[i].frag = NULL;
-       }
-}
-
-/**
- * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
- * @dp:                NFP Net data path struct
- * @rx_ring:   RX ring to remove buffers from
- */
-static int
-nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
-                          struct nfp_net_rx_ring *rx_ring)
-{
-       struct nfp_net_rx_buf *rxbufs;
-       unsigned int i;
-
-       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
-               return 0;
-
-       rxbufs = rx_ring->rxbufs;
-
-       for (i = 0; i < rx_ring->cnt - 1; i++) {
-               rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
-               if (!rxbufs[i].frag) {
-                       nfp_net_rx_ring_bufs_free(dp, rx_ring);
-                       return -ENOMEM;
-               }
-       }
-
-       return 0;
-}
-
-/**
- * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
- * @dp:             NFP Net data path struct
- * @rx_ring: RX ring to fill
- */
-static void
-nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
-                             struct nfp_net_rx_ring *rx_ring)
-{
-       unsigned int i;
-
-       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
-               return nfp_net_xsk_rx_ring_fill_freelist(rx_ring);
-
-       for (i = 0; i < rx_ring->cnt - 1; i++)
-               nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
-                                   rx_ring->rxbufs[i].dma_addr);
-}
-
-/**
- * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
- * @flags: RX descriptor flags field in CPU byte order
- */
-static int nfp_net_rx_csum_has_errors(u16 flags)
-{
-       u16 csum_all_checked, csum_all_ok;
-
-       csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
-       csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
-
-       return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
-}
-
-/**
- * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
- * @dp:  NFP Net data path struct
- * @r_vec: per-ring structure
- * @rxd: Pointer to RX descriptor
- * @meta: Parsed metadata prepend
- * @skb: Pointer to SKB
- */
-void nfp_net_rx_csum(const struct nfp_net_dp *dp,
-                    struct nfp_net_r_vector *r_vec,
-                    const struct nfp_net_rx_desc *rxd,
-                    const struct nfp_meta_parsed *meta, struct sk_buff *skb)
-{
-       skb_checksum_none_assert(skb);
-
-       if (!(dp->netdev->features & NETIF_F_RXCSUM))
-               return;
-
-       if (meta->csum_type) {
-               skb->ip_summed = meta->csum_type;
-               skb->csum = meta->csum;
-               u64_stats_update_begin(&r_vec->rx_sync);
-               r_vec->hw_csum_rx_complete++;
-               u64_stats_update_end(&r_vec->rx_sync);
-               return;
-       }
-
-       if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
-               u64_stats_update_begin(&r_vec->rx_sync);
-               r_vec->hw_csum_rx_error++;
-               u64_stats_update_end(&r_vec->rx_sync);
-               return;
-       }
-
-       /* Assume that the firmware will never report inner CSUM_OK unless outer
-        * L4 headers were successfully parsed. FW will always report zero UDP
-        * checksum as CSUM_OK.
-        */
-       if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
-           rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
-               __skb_incr_checksum_unnecessary(skb);
-               u64_stats_update_begin(&r_vec->rx_sync);
-               r_vec->hw_csum_rx_ok++;
-               u64_stats_update_end(&r_vec->rx_sync);
-       }
-
-       if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
-           rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
-               __skb_incr_checksum_unnecessary(skb);
-               u64_stats_update_begin(&r_vec->rx_sync);
-               r_vec->hw_csum_rx_inner_ok++;
-               u64_stats_update_end(&r_vec->rx_sync);
-       }
-}
-
-static void
-nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
-                unsigned int type, __be32 *hash)
-{
-       if (!(netdev->features & NETIF_F_RXHASH))
-               return;
-
-       switch (type) {
-       case NFP_NET_RSS_IPV4:
-       case NFP_NET_RSS_IPV6:
-       case NFP_NET_RSS_IPV6_EX:
-               meta->hash_type = PKT_HASH_TYPE_L3;
-               break;
-       default:
-               meta->hash_type = PKT_HASH_TYPE_L4;
-               break;
-       }
-
-       meta->hash = get_unaligned_be32(hash);
-}
-
-static void
-nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
-                     void *data, struct nfp_net_rx_desc *rxd)
-{
-       struct nfp_net_rx_hash *rx_hash = data;
-
-       if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
-               return;
-
-       nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
-                        &rx_hash->hash);
-}
-
-bool
-nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
-                  void *data, void *pkt, unsigned int pkt_len, int meta_len)
-{
-       u32 meta_info;
-
-       meta_info = get_unaligned_be32(data);
-       data += 4;
-
-       while (meta_info) {
-               switch (meta_info & NFP_NET_META_FIELD_MASK) {
-               case NFP_NET_META_HASH:
-                       meta_info >>= NFP_NET_META_FIELD_SIZE;
-                       nfp_net_set_hash(netdev, meta,
-                                        meta_info & NFP_NET_META_FIELD_MASK,
-                                        (__be32 *)data);
-                       data += 4;
-                       break;
-               case NFP_NET_META_MARK:
-                       meta->mark = get_unaligned_be32(data);
-                       data += 4;
-                       break;
-               case NFP_NET_META_PORTID:
-                       meta->portid = get_unaligned_be32(data);
-                       data += 4;
-                       break;
-               case NFP_NET_META_CSUM:
-                       meta->csum_type = CHECKSUM_COMPLETE;
-                       meta->csum =
-                               (__force __wsum)__get_unaligned_cpu32(data);
-                       data += 4;
-                       break;
-               case NFP_NET_META_RESYNC_INFO:
-                       if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
-                                                     pkt_len))
-                               return false;
-                       data += sizeof(struct nfp_net_tls_resync_req);
-                       break;
-               default:
-                       return true;
-               }
-
-               meta_info >>= NFP_NET_META_FIELD_SIZE;
-       }
-
-       return data != pkt;
-}
-
-static void
-nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
-               struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
-               struct sk_buff *skb)
-{
-       u64_stats_update_begin(&r_vec->rx_sync);
-       r_vec->rx_drops++;
-       /* If we have both skb and rxbuf the replacement buffer allocation
-        * must have failed, count this as an alloc failure.
-        */
-       if (skb && rxbuf)
-               r_vec->rx_replace_buf_alloc_fail++;
-       u64_stats_update_end(&r_vec->rx_sync);
-
-       /* skb is build based on the frag, free_skb() would free the frag
-        * so to be able to reuse it we need an extra ref.
-        */
-       if (skb && rxbuf && skb->head == rxbuf->frag)
-               page_ref_inc(virt_to_head_page(rxbuf->frag));
-       if (rxbuf)
-               nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
-       if (skb)
-               dev_kfree_skb_any(skb);
-}
-
-static bool
-nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
-                  struct nfp_net_tx_ring *tx_ring,
-                  struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
-                  unsigned int pkt_len, bool *completed)
-{
-       unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
-       struct nfp_net_tx_buf *txbuf;
-       struct nfp_net_tx_desc *txd;
-       int wr_idx;
-
-       /* Reject if xdp_adjust_tail grow packet beyond DMA area */
-       if (pkt_len + dma_off > dma_map_sz)
-               return false;
-
-       if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
-               if (!*completed) {
-                       nfp_net_xdp_complete(tx_ring);
-                       *completed = true;
-               }
-
-               if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
-                       nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
-                                       NULL);
-                       return false;
-               }
-       }
-
-       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
-       /* Stash the soft descriptor of the head then initialize it */
-       txbuf = &tx_ring->txbufs[wr_idx];
-
-       nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
-
-       txbuf->frag = rxbuf->frag;
-       txbuf->dma_addr = rxbuf->dma_addr;
-       txbuf->fidx = -1;
-       txbuf->pkt_cnt = 1;
-       txbuf->real_len = pkt_len;
-
-       dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
-                                  pkt_len, DMA_BIDIRECTIONAL);
-
-       /* Build TX descriptor */
-       txd = &tx_ring->txds[wr_idx];
-       txd->offset_eop = PCIE_DESC_TX_EOP;
-       txd->dma_len = cpu_to_le16(pkt_len);
-       nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
-       txd->data_len = cpu_to_le16(pkt_len);
-
-       txd->flags = 0;
-       txd->mss = 0;
-       txd->lso_hdrlen = 0;
-
-       tx_ring->wr_p++;
-       tx_ring->wr_ptr_add++;
-       return true;
-}
-
-/**
- * nfp_net_rx() - receive up to @budget packets on @rx_ring
- * @rx_ring:   RX ring to receive from
- * @budget:    NAPI budget
- *
- * Note, this function is separated out from the napi poll function to
- * more cleanly separate packet receive code from other bookkeeping
- * functions performed in the napi poll function.
- *
- * Return: Number of packets received.
- */
-static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
-{
-       struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
-       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-       struct nfp_net_tx_ring *tx_ring;
-       struct bpf_prog *xdp_prog;
-       bool xdp_tx_cmpl = false;
-       unsigned int true_bufsz;
-       struct sk_buff *skb;
-       int pkts_polled = 0;
-       struct xdp_buff xdp;
-       int idx;
-
-       xdp_prog = READ_ONCE(dp->xdp_prog);
-       true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
-       xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
-                     &rx_ring->xdp_rxq);
-       tx_ring = r_vec->xdp_ring;
-
-       while (pkts_polled < budget) {
-               unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
-               struct nfp_net_rx_buf *rxbuf;
-               struct nfp_net_rx_desc *rxd;
-               struct nfp_meta_parsed meta;
-               bool redir_egress = false;
-               struct net_device *netdev;
-               dma_addr_t new_dma_addr;
-               u32 meta_len_xdp = 0;
-               void *new_frag;
-
-               idx = D_IDX(rx_ring, rx_ring->rd_p);
-
-               rxd = &rx_ring->rxds[idx];
-               if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
-                       break;
-
-               /* Memory barrier to ensure that we won't do other reads
-                * before the DD bit.
-                */
-               dma_rmb();
-
-               memset(&meta, 0, sizeof(meta));
-
-               rx_ring->rd_p++;
-               pkts_polled++;
-
-               rxbuf = &rx_ring->rxbufs[idx];
-               /*         < meta_len >
-                *  <-- [rx_offset] -->
-                *  ---------------------------------------------------------
-                * | [XX] |  metadata  |             packet           | XXXX |
-                *  ---------------------------------------------------------
-                *         <---------------- data_len --------------->
-                *
-                * The rx_offset is fixed for all packets, the meta_len can vary
-                * on a packet by packet basis. If rx_offset is set to zero
-                * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
-                * buffer and is immediately followed by the packet (no [XX]).
-                */
-               meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
-               data_len = le16_to_cpu(rxd->rxd.data_len);
-               pkt_len = data_len - meta_len;
-
-               pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
-               if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
-                       pkt_off += meta_len;
-               else
-                       pkt_off += dp->rx_offset;
-               meta_off = pkt_off - meta_len;
-
-               /* Stats update */
-               u64_stats_update_begin(&r_vec->rx_sync);
-               r_vec->rx_pkts++;
-               r_vec->rx_bytes += pkt_len;
-               u64_stats_update_end(&r_vec->rx_sync);
-
-               if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
-                            (dp->rx_offset && meta_len > dp->rx_offset))) {
-                       nn_dp_warn(dp, "oversized RX packet metadata %u\n",
-                                  meta_len);
-                       nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
-                       continue;
-               }
-
-               nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
-                                       data_len);
-
-               if (!dp->chained_metadata_format) {
-                       nfp_net_set_hash_desc(dp->netdev, &meta,
-                                             rxbuf->frag + meta_off, rxd);
-               } else if (meta_len) {
-                       if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
-                                                       rxbuf->frag + meta_off,
-                                                       rxbuf->frag + pkt_off,
-                                                       pkt_len, meta_len))) {
-                               nn_dp_warn(dp, "invalid RX packet metadata\n");
-                               nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
-                                               NULL);
-                               continue;
-                       }
-               }
-
-               if (xdp_prog && !meta.portid) {
-                       void *orig_data = rxbuf->frag + pkt_off;
-                       unsigned int dma_off;
-                       int act;
-
-                       xdp_prepare_buff(&xdp,
-                                        rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
-                                        pkt_off - NFP_NET_RX_BUF_HEADROOM,
-                                        pkt_len, true);
-
-                       act = bpf_prog_run_xdp(xdp_prog, &xdp);
-
-                       pkt_len = xdp.data_end - xdp.data;
-                       pkt_off += xdp.data - orig_data;
-
-                       switch (act) {
-                       case XDP_PASS:
-                               meta_len_xdp = xdp.data - xdp.data_meta;
-                               break;
-                       case XDP_TX:
-                               dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
-                               if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
-                                                                tx_ring, rxbuf,
-                                                                dma_off,
-                                                                pkt_len,
-                                                                &xdp_tx_cmpl)))
-                                       trace_xdp_exception(dp->netdev,
-                                                           xdp_prog, act);
-                               continue;
-                       default:
-                               bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
-                               fallthrough;
-                       case XDP_ABORTED:
-                               trace_xdp_exception(dp->netdev, xdp_prog, act);
-                               fallthrough;
-                       case XDP_DROP:
-                               nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
-                                                   rxbuf->dma_addr);
-                               continue;
-                       }
-               }
-
-               if (likely(!meta.portid)) {
-                       netdev = dp->netdev;
-               } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
-                       struct nfp_net *nn = netdev_priv(dp->netdev);
-
-                       nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
-                                           pkt_len);
-                       nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
-                                           rxbuf->dma_addr);
-                       continue;
-               } else {
-                       struct nfp_net *nn;
-
-                       nn = netdev_priv(dp->netdev);
-                       netdev = nfp_app_dev_get(nn->app, meta.portid,
-                                                &redir_egress);
-                       if (unlikely(!netdev)) {
-                               nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
-                                               NULL);
-                               continue;
-                       }
-
-                       if (nfp_netdev_is_nfp_repr(netdev))
-                               nfp_repr_inc_rx_stats(netdev, pkt_len);
-               }
-
-               skb = build_skb(rxbuf->frag, true_bufsz);
-               if (unlikely(!skb)) {
-                       nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
-                       continue;
-               }
-               new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
-               if (unlikely(!new_frag)) {
-                       nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
-                       continue;
-               }
-
-               nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
-               nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
-               skb_reserve(skb, pkt_off);
-               skb_put(skb, pkt_len);
-
-               skb->mark = meta.mark;
-               skb_set_hash(skb, meta.hash, meta.hash_type);
-
-               skb_record_rx_queue(skb, rx_ring->idx);
-               skb->protocol = eth_type_trans(skb, netdev);
-
-               nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
-
-#ifdef CONFIG_TLS_DEVICE
-               if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
-                       skb->decrypted = true;
-                       u64_stats_update_begin(&r_vec->rx_sync);
-                       r_vec->hw_tls_rx++;
-                       u64_stats_update_end(&r_vec->rx_sync);
-               }
-#endif
-
-               if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
-                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-                                              le16_to_cpu(rxd->rxd.vlan));
-               if (meta_len_xdp)
-                       skb_metadata_set(skb, meta_len_xdp);
-
-               if (likely(!redir_egress)) {
-                       napi_gro_receive(&rx_ring->r_vec->napi, skb);
-               } else {
-                       skb->dev = netdev;
-                       skb_reset_network_header(skb);
-                       __skb_push(skb, ETH_HLEN);
-                       dev_queue_xmit(skb);
-               }
-       }
-
-       if (xdp_prog) {
-               if (tx_ring->wr_ptr_add)
-                       nfp_net_tx_xmit_more_flush(tx_ring);
-               else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
-                        !xdp_tx_cmpl)
-                       if (!nfp_net_xdp_complete(tx_ring))
-                               pkts_polled = budget;
-       }
-
-       return pkts_polled;
-}
-
-/**
- * nfp_net_poll() - napi poll function
- * @napi:    NAPI structure
- * @budget:  NAPI budget
- *
- * Return: number of packets polled.
- */
-static int nfp_net_poll(struct napi_struct *napi, int budget)
-{
-       struct nfp_net_r_vector *r_vec =
-               container_of(napi, struct nfp_net_r_vector, napi);
-       unsigned int pkts_polled = 0;
-
-       if (r_vec->tx_ring)
-               nfp_net_tx_complete(r_vec->tx_ring, budget);
-       if (r_vec->rx_ring)
-               pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
-
-       if (pkts_polled < budget)
-               if (napi_complete_done(napi, pkts_polled))
-                       nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
-
-       if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
-               struct dim_sample dim_sample = {};
-               unsigned int start;
-               u64 pkts, bytes;
-
-               do {
-                       start = u64_stats_fetch_begin(&r_vec->rx_sync);
-                       pkts = r_vec->rx_pkts;
-                       bytes = r_vec->rx_bytes;
-               } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
-
-               dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
-               net_dim(&r_vec->rx_dim, dim_sample);
-       }
-
-       if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
-               struct dim_sample dim_sample = {};
-               unsigned int start;
-               u64 pkts, bytes;
-
-               do {
-                       start = u64_stats_fetch_begin(&r_vec->tx_sync);
-                       pkts = r_vec->tx_pkts;
-                       bytes = r_vec->tx_bytes;
-               } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
-
-               dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
-               net_dim(&r_vec->tx_dim, dim_sample);
-       }
-
-       return pkts_polled;
-}
-
-/* Control device data path
- */
-
-static bool
-nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
-               struct sk_buff *skb, bool old)
-{
-       unsigned int real_len = skb->len, meta_len = 0;
-       struct nfp_net_tx_ring *tx_ring;
-       struct nfp_net_tx_buf *txbuf;
-       struct nfp_net_tx_desc *txd;
-       struct nfp_net_dp *dp;
-       dma_addr_t dma_addr;
-       int wr_idx;
-
-       dp = &r_vec->nfp_net->dp;
-       tx_ring = r_vec->tx_ring;
-
-       if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
-               nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
-               goto err_free;
-       }
-
-       if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
-               u64_stats_update_begin(&r_vec->tx_sync);
-               r_vec->tx_busy++;
-               u64_stats_update_end(&r_vec->tx_sync);
-               if (!old)
-                       __skb_queue_tail(&r_vec->queue, skb);
-               else
-                       __skb_queue_head(&r_vec->queue, skb);
-               return true;
-       }
-
-       if (nfp_app_ctrl_has_meta(nn->app)) {
-               if (unlikely(skb_headroom(skb) < 8)) {
-                       nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
-                       goto err_free;
-               }
-               meta_len = 8;
-               put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
-               put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
-       }
-
-       /* Start with the head skbuf */
-       dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
-                                 DMA_TO_DEVICE);
-       if (dma_mapping_error(dp->dev, dma_addr))
-               goto err_dma_warn;
-
-       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
-       /* Stash the soft descriptor of the head then initialize it */
-       txbuf = &tx_ring->txbufs[wr_idx];
-       txbuf->skb = skb;
-       txbuf->dma_addr = dma_addr;
-       txbuf->fidx = -1;
-       txbuf->pkt_cnt = 1;
-       txbuf->real_len = real_len;
-
-       /* Build TX descriptor */
-       txd = &tx_ring->txds[wr_idx];
-       txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
-       txd->dma_len = cpu_to_le16(skb_headlen(skb));
-       nfp_desc_set_dma_addr(txd, dma_addr);
-       txd->data_len = cpu_to_le16(skb->len);
-
-       txd->flags = 0;
-       txd->mss = 0;
-       txd->lso_hdrlen = 0;
-
-       tx_ring->wr_p++;
-       tx_ring->wr_ptr_add++;
-       nfp_net_tx_xmit_more_flush(tx_ring);
-
-       return false;
+                                                     ntls->next_seq);
 
-err_dma_warn:
-       nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
-err_free:
-       u64_stats_update_begin(&r_vec->tx_sync);
-       r_vec->tx_errors++;
-       u64_stats_update_end(&r_vec->tx_sync);
-       dev_kfree_skb_any(skb);
-       return false;
-}
+               *nr_frags = 0;
+               return nskb;
+       }
 
-bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
-{
-       struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+       if (datalen) {
+               u64_stats_update_begin(&r_vec->tx_sync);
+               if (!skb_is_gso(skb))
+                       r_vec->hw_tls_tx++;
+               else
+                       r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
+               u64_stats_update_end(&r_vec->tx_sync);
+       }
 
-       return nfp_ctrl_tx_one(nn, r_vec, skb, false);
+       memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
+       ntls->next_seq += datalen;
+#endif
+       return skb;
 }
 
-bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
 {
-       struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
-       bool ret;
-
-       spin_lock_bh(&r_vec->lock);
-       ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
-       spin_unlock_bh(&r_vec->lock);
+#ifdef CONFIG_TLS_DEVICE
+       struct nfp_net_tls_offload_ctx *ntls;
+       u32 datalen, seq;
 
-       return ret;
-}
+       if (!tls_handle)
+               return;
+       if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
+               return;
 
-static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
-{
-       struct sk_buff *skb;
+       datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+       seq = ntohl(tcp_hdr(skb)->seq);
 
-       while ((skb = __skb_dequeue(&r_vec->queue)))
-               if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
-                       return;
+       ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+       if (ntls->next_seq == seq + datalen)
+               ntls->next_seq = seq;
+       else
+               WARN_ON_ONCE(1);
+#endif
 }
 
-static bool
-nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
 {
-       u32 meta_type, meta_tag;
-
-       if (!nfp_app_ctrl_has_meta(nn->app))
-               return !meta_len;
-
-       if (meta_len != 8)
-               return false;
-
-       meta_type = get_unaligned_be32(data);
-       meta_tag = get_unaligned_be32(data + 4);
+       struct nfp_net *nn = netdev_priv(netdev);
 
-       return (meta_type == NFP_NET_META_PORTID &&
-               meta_tag == NFP_META_PORT_ID_CTRL);
+       nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
 }
 
-static bool
-nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
-               struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+/* Receive processing */
+static unsigned int
+nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
 {
-       unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
-       struct nfp_net_rx_buf *rxbuf;
-       struct nfp_net_rx_desc *rxd;
-       dma_addr_t new_dma_addr;
-       struct sk_buff *skb;
-       void *new_frag;
-       int idx;
-
-       idx = D_IDX(rx_ring, rx_ring->rd_p);
-
-       rxd = &rx_ring->rxds[idx];
-       if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
-               return false;
-
-       /* Memory barrier to ensure that we won't do other reads
-        * before the DD bit.
-        */
-       dma_rmb();
-
-       rx_ring->rd_p++;
-
-       rxbuf = &rx_ring->rxbufs[idx];
-       meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
-       data_len = le16_to_cpu(rxd->rxd.data_len);
-       pkt_len = data_len - meta_len;
+       unsigned int fl_bufsz = 0;
 
-       pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
        if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
-               pkt_off += meta_len;
+               fl_bufsz += NFP_NET_MAX_PREPEND;
        else
-               pkt_off += dp->rx_offset;
-       meta_off = pkt_off - meta_len;
-
-       /* Stats update */
-       u64_stats_update_begin(&r_vec->rx_sync);
-       r_vec->rx_pkts++;
-       r_vec->rx_bytes += pkt_len;
-       u64_stats_update_end(&r_vec->rx_sync);
-
-       nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
-
-       if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
-               nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
-                          meta_len);
-               nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
-               return true;
-       }
-
-       skb = build_skb(rxbuf->frag, dp->fl_bufsz);
-       if (unlikely(!skb)) {
-               nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
-               return true;
-       }
-       new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
-       if (unlikely(!new_frag)) {
-               nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
-               return true;
-       }
-
-       nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
-       nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
-       skb_reserve(skb, pkt_off);
-       skb_put(skb, pkt_len);
-
-       nfp_app_ctrl_rx(nn->app, skb);
+               fl_bufsz += dp->rx_offset;
+       fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
 
-       return true;
+       return fl_bufsz;
 }
 
-static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
 {
-       struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
-       struct nfp_net *nn = r_vec->nfp_net;
-       struct nfp_net_dp *dp = &nn->dp;
-       unsigned int budget = 512;
+       unsigned int fl_bufsz;
+
+       fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
+       fl_bufsz += dp->rx_dma_off;
+       fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
 
-       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
-               continue;
+       fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
+       fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-       return budget;
+       return fl_bufsz;
 }
 
-static void nfp_ctrl_poll(struct tasklet_struct *t)
+static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
 {
-       struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+       unsigned int fl_bufsz;
 
-       spin_lock(&r_vec->lock);
-       nfp_net_tx_complete(r_vec->tx_ring, 0);
-       __nfp_ctrl_tx_queued(r_vec);
-       spin_unlock(&r_vec->lock);
+       fl_bufsz = XDP_PACKET_HEADROOM;
+       fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
 
-       if (nfp_ctrl_rx(r_vec)) {
-               nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
-       } else {
-               tasklet_schedule(&r_vec->tasklet);
-               nn_dp_warn(&r_vec->nfp_net->dp,
-                          "control message budget exceeded!\n");
-       }
+       return fl_bufsz;
 }
 
 /* Setup and Configuration
@@ -2408,7 +754,7 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
 
                        __skb_queue_head_init(&r_vec->queue);
                        spin_lock_init(&r_vec->lock);
-                       tasklet_setup(&r_vec->tasklet, nfp_ctrl_poll);
+                       tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll);
                        tasklet_disable(&r_vec->tasklet);
                }
 
@@ -2416,298 +762,13 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
        }
 }
 
-/**
- * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
- * @tx_ring:   TX ring to free
- */
-static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
-{
-       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-
-       kvfree(tx_ring->txbufs);
-
-       if (tx_ring->txds)
-               dma_free_coherent(dp->dev, tx_ring->size,
-                                 tx_ring->txds, tx_ring->dma);
-
-       tx_ring->cnt = 0;
-       tx_ring->txbufs = NULL;
-       tx_ring->txds = NULL;
-       tx_ring->dma = 0;
-       tx_ring->size = 0;
-}
-
-/**
- * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
- * @dp:        NFP Net data path struct
- * @tx_ring:   TX Ring structure to allocate
- *
- * Return: 0 on success, negative errno otherwise.
- */
-static int
-nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
-{
-       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-
-       tx_ring->cnt = dp->txd_cnt;
-
-       tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
-       tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
-                                          &tx_ring->dma,
-                                          GFP_KERNEL | __GFP_NOWARN);
-       if (!tx_ring->txds) {
-               netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
-                           tx_ring->cnt);
-               goto err_alloc;
-       }
-
-       tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
-                                  GFP_KERNEL);
-       if (!tx_ring->txbufs)
-               goto err_alloc;
-
-       if (!tx_ring->is_xdp && dp->netdev)
-               netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
-                                   tx_ring->idx);
-
-       return 0;
-
-err_alloc:
-       nfp_net_tx_ring_free(tx_ring);
-       return -ENOMEM;
-}
-
-static void
-nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
-                         struct nfp_net_tx_ring *tx_ring)
-{
-       unsigned int i;
-
-       if (!tx_ring->is_xdp)
-               return;
-
-       for (i = 0; i < tx_ring->cnt; i++) {
-               if (!tx_ring->txbufs[i].frag)
-                       return;
-
-               nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
-               __free_page(virt_to_page(tx_ring->txbufs[i].frag));
-       }
-}
-
-static int
-nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
-                          struct nfp_net_tx_ring *tx_ring)
-{
-       struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
-       unsigned int i;
-
-       if (!tx_ring->is_xdp)
-               return 0;
-
-       for (i = 0; i < tx_ring->cnt; i++) {
-               txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
-               if (!txbufs[i].frag) {
-                       nfp_net_tx_ring_bufs_free(dp, tx_ring);
-                       return -ENOMEM;
-               }
-       }
-
-       return 0;
-}
-
-static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
-{
-       unsigned int r;
-
-       dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
-                              GFP_KERNEL);
-       if (!dp->tx_rings)
-               return -ENOMEM;
-
-       for (r = 0; r < dp->num_tx_rings; r++) {
-               int bias = 0;
-
-               if (r >= dp->num_stack_tx_rings)
-                       bias = dp->num_stack_tx_rings;
-
-               nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
-                                    r, bias);
-
-               if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
-                       goto err_free_prev;
-
-               if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
-                       goto err_free_ring;
-       }
-
-       return 0;
-
-err_free_prev:
-       while (r--) {
-               nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
-err_free_ring:
-               nfp_net_tx_ring_free(&dp->tx_rings[r]);
-       }
-       kfree(dp->tx_rings);
-       return -ENOMEM;
-}
-
-static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
-{
-       unsigned int r;
-
-       for (r = 0; r < dp->num_tx_rings; r++) {
-               nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
-               nfp_net_tx_ring_free(&dp->tx_rings[r]);
-       }
-
-       kfree(dp->tx_rings);
-}
-
-/**
- * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
- * @rx_ring:  RX ring to free
- */
-static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
-{
-       struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
-       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-
-       if (dp->netdev)
-               xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
-
-       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
-               kvfree(rx_ring->xsk_rxbufs);
-       else
-               kvfree(rx_ring->rxbufs);
-
-       if (rx_ring->rxds)
-               dma_free_coherent(dp->dev, rx_ring->size,
-                                 rx_ring->rxds, rx_ring->dma);
-
-       rx_ring->cnt = 0;
-       rx_ring->rxbufs = NULL;
-       rx_ring->xsk_rxbufs = NULL;
-       rx_ring->rxds = NULL;
-       rx_ring->dma = 0;
-       rx_ring->size = 0;
-}
-
-/**
- * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
- * @dp:              NFP Net data path struct
- * @rx_ring:  RX ring to allocate
- *
- * Return: 0 on success, negative errno otherwise.
- */
-static int
-nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
-{
-       enum xdp_mem_type mem_type;
-       size_t rxbuf_sw_desc_sz;
-       int err;
-
-       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
-               mem_type = MEM_TYPE_XSK_BUFF_POOL;
-               rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
-       } else {
-               mem_type = MEM_TYPE_PAGE_ORDER0;
-               rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
-       }
-
-       if (dp->netdev) {
-               err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
-                                      rx_ring->idx, rx_ring->r_vec->napi.napi_id);
-               if (err < 0)
-                       return err;
-       }
-
-       err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
-       if (err)
-               goto err_alloc;
-
-       rx_ring->cnt = dp->rxd_cnt;
-       rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
-       rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
-                                          &rx_ring->dma,
-                                          GFP_KERNEL | __GFP_NOWARN);
-       if (!rx_ring->rxds) {
-               netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
-                           rx_ring->cnt);
-               goto err_alloc;
-       }
-
-       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
-               rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
-                                              GFP_KERNEL);
-               if (!rx_ring->xsk_rxbufs)
-                       goto err_alloc;
-       } else {
-               rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
-                                          GFP_KERNEL);
-               if (!rx_ring->rxbufs)
-                       goto err_alloc;
-       }
-
-       return 0;
-
-err_alloc:
-       nfp_net_rx_ring_free(rx_ring);
-       return -ENOMEM;
-}
-
-static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
-{
-       unsigned int r;
-
-       dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
-                              GFP_KERNEL);
-       if (!dp->rx_rings)
-               return -ENOMEM;
-
-       for (r = 0; r < dp->num_rx_rings; r++) {
-               nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
-
-               if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
-                       goto err_free_prev;
-
-               if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
-                       goto err_free_ring;
-       }
-
-       return 0;
-
-err_free_prev:
-       while (r--) {
-               nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
-err_free_ring:
-               nfp_net_rx_ring_free(&dp->rx_rings[r]);
-       }
-       kfree(dp->rx_rings);
-       return -ENOMEM;
-}
-
-static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
-{
-       unsigned int r;
-
-       for (r = 0; r < dp->num_rx_rings; r++) {
-               nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
-               nfp_net_rx_ring_free(&dp->rx_rings[r]);
-       }
-
-       kfree(dp->rx_rings);
-}
-
 static void
 nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
 {
        if (dp->netdev)
                netif_napi_add(dp->netdev, &r_vec->napi,
                               nfp_net_has_xsk_pool_slow(dp, idx) ?
-                              nfp_net_xsk_poll : nfp_net_poll,
+                              dp->ops->xsk_poll : dp->ops->poll,
                               NAPI_POLL_WEIGHT);
        else
                tasklet_enable(&r_vec->tasklet);
@@ -2850,17 +911,6 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
        nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
 }
 
-static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
-{
-       nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
-       nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
-       nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
-
-       nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
-       nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
-       nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
-}
-
 /**
  * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
  * @nn:      NFP Net device to reconfigure
@@ -2903,25 +953,6 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
        nn->dp.ctrl = new_ctrl;
 }
 
-static void
-nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
-                            struct nfp_net_rx_ring *rx_ring, unsigned int idx)
-{
-       /* Write the DMA address, size and MSI-X info to the device */
-       nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
-       nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
-       nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
-}
-
-static void
-nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
-                            struct nfp_net_tx_ring *tx_ring, unsigned int idx)
-{
-       nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
-       nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
-       nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
-}
-
 /**
  * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
  * @nn:      NFP Net device to reconfigure
@@ -2951,11 +982,11 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
        for (r = 0; r < nn->dp.num_rx_rings; r++)
                nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
 
-       nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
-                 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
+       nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE,
+                 U64_MAX >> (64 - nn->dp.num_tx_rings));
 
-       nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
-                 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
+       nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE,
+                 U64_MAX >> (64 - nn->dp.num_rx_rings));
 
        if (nn->dp.netdev)
                nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
@@ -3396,6 +1427,8 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
        new->rx_rings = NULL;
        new->num_r_vecs = 0;
        new->num_stack_tx_rings = 0;
+       new->txrwb = NULL;
+       new->txrwb_dma = 0;
 
        return new;
 }
@@ -3431,7 +1464,7 @@ nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
 
                if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) {
                        NL_SET_ERR_MSG_MOD(extack,
-                                          "XSK buffer pool chunk size too small\n");
+                                          "XSK buffer pool chunk size too small");
                        return -EINVAL;
                }
        }
@@ -3859,7 +1892,7 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
        return 0;
 }
 
-const struct net_device_ops nfp_net_netdev_ops = {
+const struct net_device_ops nfp_nfd3_netdev_ops = {
        .ndo_init               = nfp_app_ndo_init,
        .ndo_uninit             = nfp_app_ndo_uninit,
        .ndo_open               = nfp_net_netdev_open,
@@ -3887,6 +1920,33 @@ const struct net_device_ops nfp_net_netdev_ops = {
        .ndo_get_devlink_port   = nfp_devlink_get_devlink_port,
 };
 
+const struct net_device_ops nfp_nfdk_netdev_ops = {
+       .ndo_init               = nfp_app_ndo_init,
+       .ndo_uninit             = nfp_app_ndo_uninit,
+       .ndo_open               = nfp_net_netdev_open,
+       .ndo_stop               = nfp_net_netdev_close,
+       .ndo_start_xmit         = nfp_net_tx,
+       .ndo_get_stats64        = nfp_net_stat64,
+       .ndo_vlan_rx_add_vid    = nfp_net_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = nfp_net_vlan_rx_kill_vid,
+       .ndo_set_vf_mac         = nfp_app_set_vf_mac,
+       .ndo_set_vf_vlan        = nfp_app_set_vf_vlan,
+       .ndo_set_vf_spoofchk    = nfp_app_set_vf_spoofchk,
+       .ndo_set_vf_trust       = nfp_app_set_vf_trust,
+       .ndo_get_vf_config      = nfp_app_get_vf_config,
+       .ndo_set_vf_link_state  = nfp_app_set_vf_link_state,
+       .ndo_setup_tc           = nfp_port_setup_tc,
+       .ndo_tx_timeout         = nfp_net_tx_timeout,
+       .ndo_set_rx_mode        = nfp_net_set_rx_mode,
+       .ndo_change_mtu         = nfp_net_change_mtu,
+       .ndo_set_mac_address    = nfp_net_set_mac_address,
+       .ndo_set_features       = nfp_net_set_features,
+       .ndo_features_check     = nfp_net_features_check,
+       .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
+       .ndo_bpf                = nfp_net_xdp,
+       .ndo_get_devlink_port   = nfp_devlink_get_devlink_port,
+};
+
 static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
 {
        struct nfp_net *nn = netdev_priv(netdev);
@@ -3929,10 +1989,10 @@ void nfp_net_info(struct nfp_net *nn)
                nn->dp.num_tx_rings, nn->max_tx_rings,
                nn->dp.num_rx_rings, nn->max_rx_rings);
        nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
-               nn->fw_ver.resv, nn->fw_ver.class,
+               nn->fw_ver.extend, nn->fw_ver.class,
                nn->fw_ver.major, nn->fw_ver.minor,
                nn->max_mtu);
-       nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+       nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
                nn->cap,
                nn->cap & NFP_NET_CFG_CTRL_PROMISC  ? "PROMISC "  : "",
                nn->cap & NFP_NET_CFG_CTRL_L2BC     ? "L2BCFILT " : "",
@@ -3950,6 +2010,7 @@ void nfp_net_info(struct nfp_net *nn)
                nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
                nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
                nn->cap & NFP_NET_CFG_CTRL_IRQMOD   ? "IRQMOD "   : "",
+               nn->cap & NFP_NET_CFG_CTRL_TXRWB    ? "TXRWB "    : "",
                nn->cap & NFP_NET_CFG_CTRL_VXLAN    ? "VXLAN "    : "",
                nn->cap & NFP_NET_CFG_CTRL_NVGRE    ? "NVGRE "    : "",
                nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
@@ -3961,6 +2022,7 @@ void nfp_net_info(struct nfp_net *nn)
 /**
  * nfp_net_alloc() - Allocate netdev and related structure
  * @pdev:         PCI device
+ * @dev_info:     NFP ASIC params
  * @ctrl_bar:     PCI IOMEM with vNIC config memory
  * @needs_netdev: Whether to allocate a netdev for this vNIC
  * @max_tx_rings: Maximum number of TX rings supported by device
@@ -3973,7 +2035,8 @@ void nfp_net_info(struct nfp_net *nn)
  * Return: NFP Net device structure, or ERR_PTR on error.
  */
 struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
+             void __iomem *ctrl_bar, bool needs_netdev,
              unsigned int max_tx_rings, unsigned int max_rx_rings)
 {
        struct nfp_net *nn;
@@ -3998,7 +2061,28 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
 
        nn->dp.dev = &pdev->dev;
        nn->dp.ctrl_bar = ctrl_bar;
+       nn->dev_info = dev_info;
        nn->pdev = pdev;
+       nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
+
+       switch (FIELD_GET(NFP_NET_CFG_VERSION_DP_MASK, nn->fw_ver.extend)) {
+       case NFP_NET_CFG_VERSION_DP_NFD3:
+               nn->dp.ops = &nfp_nfd3_ops;
+               break;
+       case NFP_NET_CFG_VERSION_DP_NFDK:
+               if (nn->fw_ver.major < 5) {
+                       dev_err(&pdev->dev,
+                               "NFDK must use ABI 5 or newer, found: %d\n",
+                               nn->fw_ver.major);
+                       err = -EINVAL;
+                       goto err_free_nn;
+               }
+               nn->dp.ops = &nfp_nfdk_ops;
+               break;
+       default:
+               err = -EINVAL;
+               goto err_free_nn;
+       }
 
        nn->max_tx_rings = max_tx_rings;
        nn->max_rx_rings = max_rx_rings;
@@ -4217,7 +2301,15 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
        nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
 
        /* Finalise the netdev setup */
-       netdev->netdev_ops = &nfp_net_netdev_ops;
+       switch (nn->dp.ops->version) {
+       case NFP_NFD_VER_NFD3:
+               netdev->netdev_ops = &nfp_nfd3_netdev_ops;
+               break;
+       case NFP_NFD_VER_NFDK:
+               netdev->netdev_ops = &nfp_nfdk_netdev_ops;
+               break;
+       }
+
        netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
 
        /* MTU range: 68 - hw-specific max */
@@ -4265,6 +2357,9 @@ static int nfp_net_read_caps(struct nfp_net *nn)
                nn->dp.rx_offset = NFP_NET_RX_OFFSET;
        }
 
+       /* Mask out NFD-version-specific features */
+       nn->cap &= nn->dp.ops->cap_mask;
+
        /* For control vNICs mask out the capabilities app doesn't want. */
        if (!nn->dp.netdev)
                nn->cap &= nn->app->type->ctrl_cap_mask;
@@ -4317,6 +2412,10 @@ int nfp_net_init(struct nfp_net *nn)
                nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
        }
 
+       /* Enable TX pointer writeback, if supported */
+       if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
+
        /* Stash the re-configuration queue away.  First odd queue in TX Bar */
        nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
 
index 50007cc..8892a94 100644 (file)
@@ -92,7 +92,6 @@
 #define   NFP_NET_CFG_CTRL_RINGCFG       (0x1 << 16) /* Ring runtime changes */
 #define   NFP_NET_CFG_CTRL_RSS           (0x1 << 17) /* RSS (version 1) */
 #define   NFP_NET_CFG_CTRL_IRQMOD        (0x1 << 18) /* Interrupt moderation */
-#define   NFP_NET_CFG_CTRL_RINGPRIO      (0x1 << 19) /* Ring priorities */
 #define   NFP_NET_CFG_CTRL_MSIXAUTO      (0x1 << 20) /* MSI-X auto-masking */
 #define   NFP_NET_CFG_CTRL_TXRWB         (0x1 << 21) /* Write-back of TX ring*/
 #define   NFP_NET_CFG_CTRL_VXLAN         (0x1 << 24) /* VXLAN tunnel support */
  * - define more STS bits
  */
 #define NFP_NET_CFG_VERSION            0x0030
-#define   NFP_NET_CFG_VERSION_RESERVED_MASK    (0xff << 24)
+#define   NFP_NET_CFG_VERSION_RESERVED_MASK    (0xfe << 24)
+#define   NFP_NET_CFG_VERSION_DP_NFD3          0
+#define   NFP_NET_CFG_VERSION_DP_NFDK          1
+#define   NFP_NET_CFG_VERSION_DP_MASK          1
 #define   NFP_NET_CFG_VERSION_CLASS_MASK  (0xff << 16)
 #define   NFP_NET_CFG_VERSION_CLASS(x)   (((x) & 0xff) << 16)
 #define   NFP_NET_CFG_VERSION_CLASS_GENERIC    0
index 2c74b3c..d8b735c 100644 (file)
@@ -1,10 +1,11 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
 #include <linux/debugfs.h>
 #include <linux/module.h>
 #include <linux/rtnetlink.h>
 
 #include "nfp_net.h"
+#include "nfp_net_dp.h"
 
 static struct dentry *nfp_dir;
 
@@ -80,10 +81,8 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
 {
        struct nfp_net_r_vector *r_vec = file->private;
        struct nfp_net_tx_ring *tx_ring;
-       struct nfp_net_tx_desc *txd;
-       int d_rd_p, d_wr_p, txd_cnt;
        struct nfp_net *nn;
-       int i;
+       int d_rd_p, d_wr_p;
 
        rtnl_lock();
 
@@ -97,52 +96,20 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
        if (!nfp_net_running(nn))
                goto out;
 
-       txd_cnt = tx_ring->cnt;
-
        d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
        d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
 
-       seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p   H_RD=%u H_WR=%u D_RD=%u D_WR=%u\n",
+       seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p   H_RD=%u H_WR=%u D_RD=%u D_WR=%u",
                   tx_ring->idx, tx_ring->qcidx,
                   tx_ring == r_vec->tx_ring ? "" : "xdp",
                   tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
                   tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
+       if (tx_ring->txrwb)
+               seq_printf(file, " TXRWB=%llu", *tx_ring->txrwb);
+       seq_putc(file, '\n');
 
-       for (i = 0; i < txd_cnt; i++) {
-               struct xdp_buff *xdp;
-               struct sk_buff *skb;
-
-               txd = &tx_ring->txds[i];
-               seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
-                          txd->vals[0], txd->vals[1],
-                          txd->vals[2], txd->vals[3]);
-
-               if (!tx_ring->is_xdp) {
-                       skb = READ_ONCE(tx_ring->txbufs[i].skb);
-                       if (skb)
-                               seq_printf(file, " skb->head=%p skb->data=%p",
-                                          skb->head, skb->data);
-               } else {
-                       xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
-                       if (xdp)
-                               seq_printf(file, " xdp->data=%p", xdp->data);
-               }
-
-               if (tx_ring->txbufs[i].dma_addr)
-                       seq_printf(file, " dma_addr=%pad",
-                                  &tx_ring->txbufs[i].dma_addr);
-
-               if (i == tx_ring->rd_p % txd_cnt)
-                       seq_puts(file, " H_RD");
-               if (i == tx_ring->wr_p % txd_cnt)
-                       seq_puts(file, " H_WR");
-               if (i == d_rd_p % txd_cnt)
-                       seq_puts(file, " D_RD");
-               if (i == d_wr_p % txd_cnt)
-                       seq_puts(file, " D_WR");
-
-               seq_putc(file, '\n');
-       }
+       nfp_net_debugfs_print_tx_descs(file, &nn->dp, r_vec, tx_ring,
+                                      d_rd_p, d_wr_p);
 out:
        rtnl_unlock();
        return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
new file mode 100644 (file)
index 0000000..34dd948
--- /dev/null
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include "nfp_app.h"
+#include "nfp_net_dp.h"
+#include "nfp_net_xsk.h"
+
+/**
+ * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
+ * @dp:                NFP Net data path struct
+ * @dma_addr:  Pointer to storage for DMA address (output param)
+ *
+ * This function will allcate a new page frag, map it for DMA.
+ *
+ * Return: allocated page frag or NULL on failure.
+ */
+void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+       void *frag;
+
+       if (!dp->xdp_prog) {
+               frag = netdev_alloc_frag(dp->fl_bufsz);
+       } else {
+               struct page *page;
+
+               page = alloc_page(GFP_KERNEL);
+               frag = page ? page_address(page) : NULL;
+       }
+       if (!frag) {
+               nn_dp_warn(dp, "Failed to alloc receive page frag\n");
+               return NULL;
+       }
+
+       *dma_addr = nfp_net_dma_map_rx(dp, frag);
+       if (dma_mapping_error(dp->dev, *dma_addr)) {
+               nfp_net_free_frag(frag, dp->xdp_prog);
+               nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+               return NULL;
+       }
+
+       return frag;
+}
+
+/**
+ * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
+ * @tx_ring:  TX ring structure
+ * @dp:              NFP Net data path struct
+ * @r_vec:    IRQ vector servicing this ring
+ * @idx:      Ring index
+ * @is_xdp:   Is this an XDP TX ring?
+ */
+static void
+nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp,
+                    struct nfp_net_r_vector *r_vec, unsigned int idx,
+                    bool is_xdp)
+{
+       struct nfp_net *nn = r_vec->nfp_net;
+
+       tx_ring->idx = idx;
+       tx_ring->r_vec = r_vec;
+       tx_ring->is_xdp = is_xdp;
+       u64_stats_init(&tx_ring->r_vec->tx_sync);
+
+       tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
+       tx_ring->txrwb = dp->txrwb ? &dp->txrwb[idx] : NULL;
+       tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
+}
+
+/**
+ * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
+ * @rx_ring:  RX ring structure
+ * @r_vec:    IRQ vector servicing this ring
+ * @idx:      Ring index
+ */
+static void
+nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
+                    struct nfp_net_r_vector *r_vec, unsigned int idx)
+{
+       struct nfp_net *nn = r_vec->nfp_net;
+
+       rx_ring->idx = idx;
+       rx_ring->r_vec = r_vec;
+       u64_stats_init(&rx_ring->r_vec->rx_sync);
+
+       rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
+       rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
+}
+
+/**
+ * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
+ * @rx_ring:   RX ring structure
+ *
+ * Assumes that the device is stopped, must be idempotent.
+ */
+void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
+{
+       unsigned int wr_idx, last_idx;
+
+       /* wr_p == rd_p means ring was never fed FL bufs.  RX rings are always
+        * kept at cnt - 1 FL bufs.
+        */
+       if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
+               return;
+
+       /* Move the empty entry to the end of the list */
+       wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+       last_idx = rx_ring->cnt - 1;
+       if (rx_ring->r_vec->xsk_pool) {
+               rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
+               memset(&rx_ring->xsk_rxbufs[last_idx], 0,
+                      sizeof(*rx_ring->xsk_rxbufs));
+       } else {
+               rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
+               memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
+       }
+
+       memset(rx_ring->rxds, 0, rx_ring->size);
+       rx_ring->wr_p = 0;
+       rx_ring->rd_p = 0;
+}
+
+/**
+ * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
+ * @dp:                NFP Net data path struct
+ * @rx_ring:   RX ring to remove buffers from
+ *
+ * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
+ * entries.  After device is disabled nfp_net_rx_ring_reset() must be called
+ * to restore required ring geometry.
+ */
+static void
+nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
+                         struct nfp_net_rx_ring *rx_ring)
+{
+       unsigned int i;
+
+       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+               return;
+
+       for (i = 0; i < rx_ring->cnt - 1; i++) {
+               /* NULL skb can only happen when initial filling of the ring
+                * fails to allocate enough buffers and calls here to free
+                * already allocated ones.
+                */
+               if (!rx_ring->rxbufs[i].frag)
+                       continue;
+
+               nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
+               nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
+               rx_ring->rxbufs[i].dma_addr = 0;
+               rx_ring->rxbufs[i].frag = NULL;
+       }
+}
+
+/**
+ * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
+ * @dp:                NFP Net data path struct
+ * @rx_ring:   RX ring to remove buffers from
+ */
+static int
+nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
+                          struct nfp_net_rx_ring *rx_ring)
+{
+       struct nfp_net_rx_buf *rxbufs;
+       unsigned int i;
+
+       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+               return 0;
+
+       rxbufs = rx_ring->rxbufs;
+
+       for (i = 0; i < rx_ring->cnt - 1; i++) {
+               rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
+               if (!rxbufs[i].frag) {
+                       nfp_net_rx_ring_bufs_free(dp, rx_ring);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+       unsigned int r;
+
+       dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
+                              GFP_KERNEL);
+       if (!dp->tx_rings)
+               return -ENOMEM;
+
+       if (dp->ctrl & NFP_NET_CFG_CTRL_TXRWB) {
+               dp->txrwb = dma_alloc_coherent(dp->dev,
+                                              dp->num_tx_rings * sizeof(u64),
+                                              &dp->txrwb_dma, GFP_KERNEL);
+               if (!dp->txrwb)
+                       goto err_free_rings;
+       }
+
+       for (r = 0; r < dp->num_tx_rings; r++) {
+               int bias = 0;
+
+               if (r >= dp->num_stack_tx_rings)
+                       bias = dp->num_stack_tx_rings;
+
+               nfp_net_tx_ring_init(&dp->tx_rings[r], dp,
+                                    &nn->r_vecs[r - bias], r, bias);
+
+               if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
+                       goto err_free_prev;
+
+               if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
+                       goto err_free_ring;
+       }
+
+       return 0;
+
+err_free_prev:
+       while (r--) {
+               nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+err_free_ring:
+               nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
+       }
+       if (dp->txrwb)
+               dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
+                                 dp->txrwb, dp->txrwb_dma);
+err_free_rings:
+       kfree(dp->tx_rings);
+       return -ENOMEM;
+}
+
+void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
+{
+       unsigned int r;
+
+       for (r = 0; r < dp->num_tx_rings; r++) {
+               nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+               nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
+       }
+
+       if (dp->txrwb)
+               dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
+                                 dp->txrwb, dp->txrwb_dma);
+       kfree(dp->tx_rings);
+}
+
+/**
+ * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
+ * @rx_ring:  RX ring to free
+ */
+static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
+{
+       struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+       if (dp->netdev)
+               xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+
+       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+               kvfree(rx_ring->xsk_rxbufs);
+       else
+               kvfree(rx_ring->rxbufs);
+
+       if (rx_ring->rxds)
+               dma_free_coherent(dp->dev, rx_ring->size,
+                                 rx_ring->rxds, rx_ring->dma);
+
+       rx_ring->cnt = 0;
+       rx_ring->rxbufs = NULL;
+       rx_ring->xsk_rxbufs = NULL;
+       rx_ring->rxds = NULL;
+       rx_ring->dma = 0;
+       rx_ring->size = 0;
+}
+
+/**
+ * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
+ * @dp:              NFP Net data path struct
+ * @rx_ring:  RX ring to allocate
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int
+nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
+{
+       enum xdp_mem_type mem_type;
+       size_t rxbuf_sw_desc_sz;
+       int err;
+
+       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
+               mem_type = MEM_TYPE_XSK_BUFF_POOL;
+               rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
+       } else {
+               mem_type = MEM_TYPE_PAGE_ORDER0;
+               rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
+       }
+
+       if (dp->netdev) {
+               err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
+                                      rx_ring->idx, rx_ring->r_vec->napi.napi_id);
+               if (err < 0)
+                       return err;
+
+               err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
+               if (err)
+                       goto err_alloc;
+       }
+
+       rx_ring->cnt = dp->rxd_cnt;
+       rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
+       rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
+                                          &rx_ring->dma,
+                                          GFP_KERNEL | __GFP_NOWARN);
+       if (!rx_ring->rxds) {
+               netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+                           rx_ring->cnt);
+               goto err_alloc;
+       }
+
+       if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
+               rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
+                                              GFP_KERNEL);
+               if (!rx_ring->xsk_rxbufs)
+                       goto err_alloc;
+       } else {
+               rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
+                                          GFP_KERNEL);
+               if (!rx_ring->rxbufs)
+                       goto err_alloc;
+       }
+
+       return 0;
+
+err_alloc:
+       nfp_net_rx_ring_free(rx_ring);
+       return -ENOMEM;
+}
+
+int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+       unsigned int r;
+
+       dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
+                              GFP_KERNEL);
+       if (!dp->rx_rings)
+               return -ENOMEM;
+
+       for (r = 0; r < dp->num_rx_rings; r++) {
+               nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
+
+               if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
+                       goto err_free_prev;
+
+               if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
+                       goto err_free_ring;
+       }
+
+       return 0;
+
+err_free_prev:
+       while (r--) {
+               nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+err_free_ring:
+               nfp_net_rx_ring_free(&dp->rx_rings[r]);
+       }
+       kfree(dp->rx_rings);
+       return -ENOMEM;
+}
+
+void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
+{
+       unsigned int r;
+
+       for (r = 0; r < dp->num_rx_rings; r++) {
+               nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+               nfp_net_rx_ring_free(&dp->rx_rings[r]);
+       }
+
+       kfree(dp->rx_rings);
+}
+
+void
+nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
+                            struct nfp_net_rx_ring *rx_ring, unsigned int idx)
+{
+       /* Write the DMA address, size and MSI-X info to the device */
+       nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
+       nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
+       nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
+}
+
+void
+nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
+                            struct nfp_net_tx_ring *tx_ring, unsigned int idx)
+{
+       nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
+       if (tx_ring->txrwb) {
+               *tx_ring->txrwb = 0;
+               nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx),
+                         nn->dp.txrwb_dma + idx * sizeof(u64));
+       }
+       nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
+       nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
+}
+
+void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
+{
+       nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
+       nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
+       nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
+
+       nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
+       nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx), 0);
+       nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
+       nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
+}
+
+netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct nfp_net *nn = netdev_priv(netdev);
+
+       return nn->dp.ops->xmit(skb, netdev);
+}
+
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+       struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+
+       return nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
+}
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+       struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+       bool ret;
+
+       spin_lock_bh(&r_vec->lock);
+       ret = nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
+       spin_unlock_bh(&r_vec->lock);
+
+       return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
new file mode 100644 (file)
index 0000000..c934cc2
--- /dev/null
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_NET_DP_
+#define _NFP_NET_DP_
+
+#include "nfp_net.h"
+
+static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
+{
+       return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
+                                   dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+                                   dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline void
+nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
+{
+       dma_sync_single_for_device(dp->dev, dma_addr,
+                                  dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+                                  dp->rx_dma_dir);
+}
+
+static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
+                                       dma_addr_t dma_addr)
+{
+       dma_unmap_single_attrs(dp->dev, dma_addr,
+                              dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+                              dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
+                                          dma_addr_t dma_addr,
+                                          unsigned int len)
+{
+       dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
+                               len, dp->rx_dma_dir);
+}
+
+/**
+ * nfp_net_tx_full() - check if the TX ring is full
+ * @tx_ring: TX ring to check
+ * @dcnt:    Number of descriptors that need to be enqueued (must be >= 1)
+ *
+ * This function checks, based on the *host copy* of read/write
+ * pointer if a given TX ring is full.  The real TX queue may have
+ * some newly made available slots.
+ *
+ * Return: True if the ring is full.
+ */
+static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
+{
+       return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
+}
+
+static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
+{
+       wmb(); /* drain writebuffer */
+       nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
+       tx_ring->wr_ptr_add = 0;
+}
+
+static inline u32
+nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
+{
+       if (tx_ring->txrwb)
+               return *tx_ring->txrwb;
+       return nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
+}
+
+static inline void nfp_net_free_frag(void *frag, bool xdp)
+{
+       if (!xdp)
+               skb_free_frag(frag);
+       else
+               __free_page(virt_to_page(frag));
+}
+
+/**
+ * nfp_net_irq_unmask() - Unmask automasked interrupt
+ * @nn:       NFP Network structure
+ * @entry_nr: MSI-X table entry
+ *
+ * Clear the ICR for the IRQ entry.
+ */
+static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
+{
+       nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
+       nn_pci_flush(nn);
+}
+
+struct seq_file;
+
+/* Common */
+void
+nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
+                            struct nfp_net_rx_ring *rx_ring, unsigned int idx);
+void
+nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
+                            struct nfp_net_tx_ring *tx_ring, unsigned int idx);
+void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
+
+void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
+int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
+int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
+void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
+void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
+void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
+
+enum nfp_nfd_version {
+       NFP_NFD_VER_NFD3,
+       NFP_NFD_VER_NFDK,
+};
+
+/**
+ * struct nfp_dp_ops - Hooks to wrap different implementation of different dp
+ * @version:                   Indicate dp type
+ * @tx_min_desc_per_pkt:       Minimal TX descs needed for each packet
+ * @cap_mask:                  Mask of supported features
+ * @poll:                      Napi poll for normal rx/tx
+ * @xsk_poll:                  Napi poll when xsk is enabled
+ * @ctrl_poll:                 Tasklet poll for ctrl rx/tx
+ * @xmit:                      Xmit for normal path
+ * @ctrl_tx_one:               Xmit for ctrl path
+ * @rx_ring_fill_freelist:     Give buffers from the ring to FW
+ * @tx_ring_alloc:             Allocate resource for a TX ring
+ * @tx_ring_reset:             Free any untransmitted buffers and reset pointers
+ * @tx_ring_free:              Free resources allocated to a TX ring
+ * @tx_ring_bufs_alloc:                Allocate resource for each TX buffer
+ * @tx_ring_bufs_free:         Free resources allocated to each TX buffer
+ * @print_tx_descs:            Show TX ring's info for debug purpose
+ */
+struct nfp_dp_ops {
+       enum nfp_nfd_version version;
+       unsigned int tx_min_desc_per_pkt;
+       u32 cap_mask;
+
+       int (*poll)(struct napi_struct *napi, int budget);
+       int (*xsk_poll)(struct napi_struct *napi, int budget);
+       void (*ctrl_poll)(struct tasklet_struct *t);
+       netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
+       bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+                           struct sk_buff *skb, bool old);
+       void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
+                                     struct nfp_net_rx_ring *rx_ring);
+       int (*tx_ring_alloc)(struct nfp_net_dp *dp,
+                            struct nfp_net_tx_ring *tx_ring);
+       void (*tx_ring_reset)(struct nfp_net_dp *dp,
+                             struct nfp_net_tx_ring *tx_ring);
+       void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
+       int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
+                                 struct nfp_net_tx_ring *tx_ring);
+       void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
+                                 struct nfp_net_tx_ring *tx_ring);
+
+       void (*print_tx_descs)(struct seq_file *file,
+                              struct nfp_net_r_vector *r_vec,
+                              struct nfp_net_tx_ring *tx_ring,
+                              u32 d_rd_p, u32 d_wr_p);
+};
+
+static inline void
+nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+       return dp->ops->tx_ring_reset(dp, tx_ring);
+}
+
+static inline void
+nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+                             struct nfp_net_rx_ring *rx_ring)
+{
+       dp->ops->rx_ring_fill_freelist(dp, rx_ring);
+}
+
+static inline int
+nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+       return dp->ops->tx_ring_alloc(dp, tx_ring);
+}
+
+static inline void
+nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+       dp->ops->tx_ring_free(tx_ring);
+}
+
+static inline int
+nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+                          struct nfp_net_tx_ring *tx_ring)
+{
+       return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
+}
+
+static inline void
+nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
+                         struct nfp_net_tx_ring *tx_ring)
+{
+       dp->ops->tx_ring_bufs_free(dp, tx_ring);
+}
+
+static inline void
+nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
+                              struct nfp_net_r_vector *r_vec,
+                              struct nfp_net_tx_ring *tx_ring,
+                              u32 d_rd_p, u32 d_wr_p)
+{
+       dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
+}
+
+extern const struct nfp_dp_ops nfp_nfd3_ops;
+extern const struct nfp_dp_ops nfp_nfdk_ops;
+
+netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _NFP_NET_DP_ */
index e0c2747..61c8b45 100644 (file)
 #include <linux/sfp.h>
 
 #include "nfpcore/nfp.h"
+#include "nfpcore/nfp_dev.h"
 #include "nfpcore/nfp_nsp.h"
 #include "nfp_app.h"
 #include "nfp_main.h"
 #include "nfp_net_ctrl.h"
+#include "nfp_net_dp.h"
 #include "nfp_net.h"
 #include "nfp_port.h"
 
@@ -217,7 +219,7 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
        struct nfp_net *nn = netdev_priv(netdev);
 
        snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
-                nn->fw_ver.resv, nn->fw_ver.class,
+                nn->fw_ver.extend, nn->fw_ver.class,
                 nn->fw_ver.major, nn->fw_ver.minor);
        strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
                sizeof(drvinfo->bus_info));
@@ -386,9 +388,10 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
                                  struct netlink_ext_ack *extack)
 {
        struct nfp_net *nn = netdev_priv(netdev);
+       u32 qc_max = nn->dev_info->max_qc_size;
 
-       ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
-       ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
+       ring->rx_max_pending = qc_max;
+       ring->tx_max_pending = qc_max / nn->dp.ops->tx_min_desc_per_pkt;
        ring->rx_pending = nn->dp.rxd_cnt;
        ring->tx_pending = nn->dp.txd_cnt;
 }
@@ -412,19 +415,22 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
                                 struct kernel_ethtool_ringparam *kernel_ring,
                                 struct netlink_ext_ack *extack)
 {
+       u32 tx_dpp, qc_min, qc_max, rxd_cnt, txd_cnt;
        struct nfp_net *nn = netdev_priv(netdev);
-       u32 rxd_cnt, txd_cnt;
 
        /* We don't have separate queues/rings for small/large frames. */
        if (ring->rx_mini_pending || ring->rx_jumbo_pending)
                return -EINVAL;
 
+       qc_min = nn->dev_info->min_qc_size;
+       qc_max = nn->dev_info->max_qc_size;
+       tx_dpp = nn->dp.ops->tx_min_desc_per_pkt;
        /* Round up to supported values */
        rxd_cnt = roundup_pow_of_two(ring->rx_pending);
        txd_cnt = roundup_pow_of_two(ring->tx_pending);
 
-       if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
-           txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
+       if (rxd_cnt < qc_min || rxd_cnt > qc_max ||
+           txd_cnt < qc_min / tx_dpp || txd_cnt > qc_max / tx_dpp)
                return -EINVAL;
 
        if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
index 751f76c..ca4e056 100644 (file)
@@ -22,6 +22,7 @@
 
 #include "nfpcore/nfp.h"
 #include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_dev.h"
 #include "nfpcore/nfp_nffw.h"
 #include "nfpcore/nfp_nsp.h"
 #include "nfpcore/nfp6000_pcie.h"
@@ -116,13 +117,12 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
        n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
 
        /* Allocate and initialise the vNIC */
-       nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
+       nn = nfp_net_alloc(pf->pdev, pf->dev_info, ctrl_bar, needs_netdev,
                           n_tx_rings, n_rx_rings);
        if (IS_ERR(nn))
                return nn;
 
        nn->app = pf->app;
-       nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
        nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
        nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
        nn->dp.is_vf = 0;
@@ -307,6 +307,7 @@ err_prev_deinit:
 static int
 nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
 {
+       struct devlink *devlink = priv_to_devlink(pf);
        u8 __iomem *ctrl_bar;
        int err;
 
@@ -314,9 +315,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
        if (IS_ERR(pf->app))
                return PTR_ERR(pf->app);
 
-       mutex_lock(&pf->lock);
+       devl_lock(devlink);
        err = nfp_app_init(pf->app);
-       mutex_unlock(&pf->lock);
+       devl_unlock(devlink);
        if (err)
                goto err_free;
 
@@ -343,9 +344,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
 err_unmap:
        nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
 err_app_clean:
-       mutex_lock(&pf->lock);
+       devl_lock(devlink);
        nfp_app_clean(pf->app);
-       mutex_unlock(&pf->lock);
+       devl_unlock(devlink);
 err_free:
        nfp_app_free(pf->app);
        pf->app = NULL;
@@ -354,14 +355,16 @@ err_free:
 
 static void nfp_net_pf_app_clean(struct nfp_pf *pf)
 {
+       struct devlink *devlink = priv_to_devlink(pf);
+
        if (pf->ctrl_vnic) {
                nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
                nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
        }
 
-       mutex_lock(&pf->lock);
+       devl_lock(devlink);
        nfp_app_clean(pf->app);
-       mutex_unlock(&pf->lock);
+       devl_unlock(devlink);
 
        nfp_app_free(pf->app);
        pf->app = NULL;
@@ -495,8 +498,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
        }
 
        cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
-       mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0),
-                              NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area);
+       mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id,
+                              nfp_qcp_queue_offset(pf->dev_info, 0),
+                              pf->dev_info->qc_area_sz, &pf->qc_area);
        if (IS_ERR(mem)) {
                nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
                err = PTR_ERR(mem);
@@ -546,12 +550,13 @@ nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
 
 int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
 {
+       struct devlink *devlink = priv_to_devlink(pf);
        struct nfp_eth_table *eth_table;
        struct nfp_net *nn, *next;
        struct nfp_port *port;
        int err;
 
-       lockdep_assert_held(&pf->lock);
+       devl_assert_locked(devlink);
 
        /* Check for nfp_net_pci_remove() racing against us */
        if (list_empty(&pf->vnics))
@@ -600,10 +605,11 @@ static void nfp_net_refresh_vnics(struct work_struct *work)
 {
        struct nfp_pf *pf = container_of(work, struct nfp_pf,
                                         port_refresh_work);
+       struct devlink *devlink = priv_to_devlink(pf);
 
-       mutex_lock(&pf->lock);
+       devl_lock(devlink);
        nfp_net_refresh_port_table_sync(pf);
-       mutex_unlock(&pf->lock);
+       devl_unlock(devlink);
 }
 
 void nfp_net_refresh_port_table(struct nfp_port *port)
@@ -672,9 +678,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
        }
 
        nfp_net_get_fw_version(&fw_ver, ctrl_bar);
-       if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+       if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
+           fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
                nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
-                       fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
+                       fw_ver.extend, fw_ver.class,
+                       fw_ver.major, fw_ver.minor);
                err = -EINVAL;
                goto err_unmap;
        }
@@ -690,7 +698,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
                        break;
                default:
                        nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
-                               fw_ver.resv, fw_ver.class,
+                               fw_ver.extend, fw_ver.class,
                                fw_ver.major, fw_ver.minor);
                        err = -EINVAL;
                        goto err_unmap;
@@ -709,7 +717,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
        if (err)
                goto err_shared_buf_unreg;
 
-       mutex_lock(&pf->lock);
+       devl_lock(devlink);
        pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
 
        /* Allocate the vnics and do basic init */
@@ -729,7 +737,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
        if (err)
                goto err_stop_app;
 
-       mutex_unlock(&pf->lock);
+       devl_unlock(devlink);
        devlink_register(devlink);
 
        return 0;
@@ -742,7 +750,7 @@ err_free_vnics:
        nfp_net_pf_free_vnics(pf);
 err_clean_ddir:
        nfp_net_debugfs_dir_clean(&pf->ddir);
-       mutex_unlock(&pf->lock);
+       devl_unlock(devlink);
        nfp_devlink_params_unregister(pf);
 err_shared_buf_unreg:
        nfp_shared_buf_unregister(pf);
@@ -756,10 +764,11 @@ err_unmap:
 
 void nfp_net_pci_remove(struct nfp_pf *pf)
 {
+       struct devlink *devlink = priv_to_devlink(pf);
        struct nfp_net *nn, *next;
 
        devlink_unregister(priv_to_devlink(pf));
-       mutex_lock(&pf->lock);
+       devl_lock(devlink);
        list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
                if (!nfp_net_is_data_vnic(nn))
                        continue;
@@ -771,7 +780,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
        /* stop app first, to avoid double free of ctrl vNIC's ddir */
        nfp_net_debugfs_dir_clean(&pf->ddir);
 
-       mutex_unlock(&pf->lock);
+       devl_unlock(devlink);
 
        nfp_devlink_params_unregister(pf);
        nfp_shared_buf_unregister(pf);
index 181ac8e..ba3fa7e 100644 (file)
@@ -20,7 +20,7 @@ struct net_device *
 nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
 {
        return rcu_dereference_protected(set->reprs[id],
-                                        lockdep_is_held(&app->pf->lock));
+                                        nfp_app_is_locked(app));
 }
 
 static void
@@ -476,7 +476,7 @@ nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
        int i;
 
        reprs = rcu_dereference_protected(app->reprs[type],
-                                         lockdep_is_held(&app->pf->lock));
+                                         nfp_app_is_locked(app));
        if (!reprs)
                return;
 
index ab72432..8682944 100644 (file)
 
 #include "nfp_app.h"
 #include "nfp_net.h"
+#include "nfp_net_dp.h"
 #include "nfp_net_xsk.h"
 
-static int nfp_net_tx_space(struct nfp_net_tx_ring *tx_ring)
-{
-       return tx_ring->cnt - tx_ring->wr_p + tx_ring->rd_p - 1;
-}
-
-static void nfp_net_xsk_tx_free(struct nfp_net_tx_buf *txbuf)
-{
-       xsk_buff_free(txbuf->xdp);
-
-       txbuf->dma_addr = 0;
-       txbuf->xdp = NULL;
-}
-
-void nfp_net_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring)
-{
-       struct nfp_net_tx_buf *txbuf;
-       unsigned int idx;
-
-       while (tx_ring->rd_p != tx_ring->wr_p) {
-               idx = D_IDX(tx_ring, tx_ring->rd_p);
-               txbuf = &tx_ring->txbufs[idx];
-
-               txbuf->real_len = 0;
-
-               tx_ring->qcp_rd_p++;
-               tx_ring->rd_p++;
-
-               if (tx_ring->r_vec->xsk_pool) {
-                       if (txbuf->is_xsk_tx)
-                               nfp_net_xsk_tx_free(txbuf);
-
-                       xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1);
-               }
-       }
-}
-
-static bool nfp_net_xsk_complete(struct nfp_net_tx_ring *tx_ring)
-{
-       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-       u32 done_pkts = 0, done_bytes = 0, reused = 0;
-       bool done_all;
-       int idx, todo;
-       u32 qcp_rd_p;
-
-       if (tx_ring->wr_p == tx_ring->rd_p)
-               return true;
-
-       /* Work out how many descriptors have been transmitted. */
-       qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
-
-       if (qcp_rd_p == tx_ring->qcp_rd_p)
-               return true;
-
-       todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
-
-       done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
-       todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
-
-       tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
-
-       done_pkts = todo;
-       while (todo--) {
-               struct nfp_net_tx_buf *txbuf;
-
-               idx = D_IDX(tx_ring, tx_ring->rd_p);
-               tx_ring->rd_p++;
-
-               txbuf = &tx_ring->txbufs[idx];
-               if (unlikely(!txbuf->real_len))
-                       continue;
-
-               done_bytes += txbuf->real_len;
-               txbuf->real_len = 0;
-
-               if (txbuf->is_xsk_tx) {
-                       nfp_net_xsk_tx_free(txbuf);
-                       reused++;
-               }
-       }
-
-       u64_stats_update_begin(&r_vec->tx_sync);
-       r_vec->tx_bytes += done_bytes;
-       r_vec->tx_pkts += done_pkts;
-       u64_stats_update_end(&r_vec->tx_sync);
-
-       xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused);
-
-       WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
-                 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
-                 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
-
-       return done_all;
-}
-
-static void nfp_net_xsk_tx(struct nfp_net_tx_ring *tx_ring)
-{
-       struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-       struct xdp_desc desc[NFP_NET_XSK_TX_BATCH];
-       struct xsk_buff_pool *xsk_pool;
-       struct nfp_net_tx_desc *txd;
-       u32 pkts = 0, wr_idx;
-       u32 i, got;
-
-       xsk_pool = r_vec->xsk_pool;
-
-       while (nfp_net_tx_space(tx_ring) >= NFP_NET_XSK_TX_BATCH) {
-               for (i = 0; i < NFP_NET_XSK_TX_BATCH; i++)
-                       if (!xsk_tx_peek_desc(xsk_pool, &desc[i]))
-                               break;
-               got = i;
-               if (!got)
-                       break;
-
-               wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
-               prefetchw(&tx_ring->txds[wr_idx]);
-
-               for (i = 0; i < got; i++)
-                       xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr,
-                                                        desc[i].len);
-
-               for (i = 0; i < got; i++) {
-                       wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
-
-                       tx_ring->txbufs[wr_idx].real_len = desc[i].len;
-                       tx_ring->txbufs[wr_idx].is_xsk_tx = false;
-
-                       /* Build TX descriptor. */
-                       txd = &tx_ring->txds[wr_idx];
-                       nfp_desc_set_dma_addr(txd,
-                                             xsk_buff_raw_get_dma(xsk_pool,
-                                                                  desc[i].addr
-                                                                  ));
-                       txd->offset_eop = PCIE_DESC_TX_EOP;
-                       txd->dma_len = cpu_to_le16(desc[i].len);
-                       txd->data_len = cpu_to_le16(desc[i].len);
-               }
-
-               tx_ring->wr_p += got;
-               pkts += got;
-       }
-
-       if (!pkts)
-               return;
-
-       xsk_tx_release(xsk_pool);
-       /* Ensure all records are visible before incrementing write counter. */
-       wmb();
-       nfp_qcp_wr_ptr_add(tx_ring->qcp_q, pkts);
-}
-
-static bool
-nfp_net_xsk_tx_xdp(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
-                  struct nfp_net_rx_ring *rx_ring,
-                  struct nfp_net_tx_ring *tx_ring,
-                  struct nfp_net_xsk_rx_buf *xrxbuf, unsigned int pkt_len,
-                  int pkt_off)
-{
-       struct xsk_buff_pool *pool = r_vec->xsk_pool;
-       struct nfp_net_tx_buf *txbuf;
-       struct nfp_net_tx_desc *txd;
-       unsigned int wr_idx;
-
-       if (nfp_net_tx_space(tx_ring) < 1)
-               return false;
-
-       xsk_buff_raw_dma_sync_for_device(pool, xrxbuf->dma_addr + pkt_off, pkt_len);
-
-       wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
-       txbuf = &tx_ring->txbufs[wr_idx];
-       txbuf->xdp = xrxbuf->xdp;
-       txbuf->real_len = pkt_len;
-       txbuf->is_xsk_tx = true;
-
-       /* Build TX descriptor */
-       txd = &tx_ring->txds[wr_idx];
-       txd->offset_eop = PCIE_DESC_TX_EOP;
-       txd->dma_len = cpu_to_le16(pkt_len);
-       nfp_desc_set_dma_addr(txd, xrxbuf->dma_addr + pkt_off);
-       txd->data_len = cpu_to_le16(pkt_len);
-
-       txd->flags = 0;
-       txd->mss = 0;
-       txd->lso_hdrlen = 0;
-
-       tx_ring->wr_ptr_add++;
-       tx_ring->wr_p++;
-
-       return true;
-}
-
-static int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
-{
-       return rx_ring->cnt - rx_ring->wr_p + rx_ring->rd_p - 1;
-}
-
 static void
 nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx,
                          struct xdp_buff *xdp)
@@ -224,13 +29,13 @@ nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx,
                xsk_buff_xdp_get_frame_dma(xdp) + headroom;
 }
 
-static void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf)
+void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf)
 {
        rxbuf->dma_addr = 0;
        rxbuf->xdp = NULL;
 }
 
-static void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf)
+void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf)
 {
        if (rxbuf->xdp)
                xsk_buff_free(rxbuf->xdp);
@@ -277,8 +82,8 @@ void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
        nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, wr_ptr_add);
 }
 
-static void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
-                               struct nfp_net_xsk_rx_buf *xrxbuf)
+void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
+                        struct nfp_net_xsk_rx_buf *xrxbuf)
 {
        u64_stats_update_begin(&r_vec->rx_sync);
        r_vec->rx_drops++;
@@ -287,213 +92,6 @@ static void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
        nfp_net_xsk_rx_free(xrxbuf);
 }
 
-static void nfp_net_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
-                              const struct nfp_net_rx_desc *rxd,
-                              struct nfp_net_xsk_rx_buf *xrxbuf,
-                              const struct nfp_meta_parsed *meta,
-                              unsigned int pkt_len,
-                              bool meta_xdp,
-                              unsigned int *skbs_polled)
-{
-       struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
-       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-       struct net_device *netdev;
-       struct sk_buff *skb;
-
-       if (likely(!meta->portid)) {
-               netdev = dp->netdev;
-       } else {
-               struct nfp_net *nn = netdev_priv(dp->netdev);
-
-               netdev = nfp_app_dev_get(nn->app, meta->portid, NULL);
-               if (unlikely(!netdev)) {
-                       nfp_net_xsk_rx_drop(r_vec, xrxbuf);
-                       return;
-               }
-               nfp_repr_inc_rx_stats(netdev, pkt_len);
-       }
-
-       skb = napi_alloc_skb(&r_vec->napi, pkt_len);
-       if (!skb) {
-               nfp_net_xsk_rx_drop(r_vec, xrxbuf);
-               return;
-       }
-       memcpy(skb_put(skb, pkt_len), xrxbuf->xdp->data, pkt_len);
-
-       skb->mark = meta->mark;
-       skb_set_hash(skb, meta->hash, meta->hash_type);
-
-       skb_record_rx_queue(skb, rx_ring->idx);
-       skb->protocol = eth_type_trans(skb, netdev);
-
-       nfp_net_rx_csum(dp, r_vec, rxd, meta, skb);
-
-       if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-                                      le16_to_cpu(rxd->rxd.vlan));
-       if (meta_xdp)
-               skb_metadata_set(skb,
-                                xrxbuf->xdp->data - xrxbuf->xdp->data_meta);
-
-       napi_gro_receive(&rx_ring->r_vec->napi, skb);
-
-       nfp_net_xsk_rx_free(xrxbuf);
-
-       (*skbs_polled)++;
-}
-
-static unsigned int
-nfp_net_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
-              unsigned int *skbs_polled)
-{
-       struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
-       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-       struct nfp_net_tx_ring *tx_ring;
-       struct bpf_prog *xdp_prog;
-       bool xdp_redir = false;
-       int pkts_polled = 0;
-
-       xdp_prog = READ_ONCE(dp->xdp_prog);
-       tx_ring = r_vec->xdp_ring;
-
-       while (pkts_polled < budget) {
-               unsigned int meta_len, data_len, pkt_len, pkt_off;
-               struct nfp_net_xsk_rx_buf *xrxbuf;
-               struct nfp_net_rx_desc *rxd;
-               struct nfp_meta_parsed meta;
-               int idx, act;
-
-               idx = D_IDX(rx_ring, rx_ring->rd_p);
-
-               rxd = &rx_ring->rxds[idx];
-               if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
-                       break;
-
-               rx_ring->rd_p++;
-               pkts_polled++;
-
-               xrxbuf = &rx_ring->xsk_rxbufs[idx];
-
-               /* If starved of buffers "drop" it and scream. */
-               if (rx_ring->rd_p >= rx_ring->wr_p) {
-                       nn_dp_warn(dp, "Starved of RX buffers\n");
-                       nfp_net_xsk_rx_drop(r_vec, xrxbuf);
-                       break;
-               }
-
-               /* Memory barrier to ensure that we won't do other reads
-                * before the DD bit.
-                */
-               dma_rmb();
-
-               memset(&meta, 0, sizeof(meta));
-
-               /* Only supporting AF_XDP with dynamic metadata so buffer layout
-                * is always:
-                *
-                *  ---------------------------------------------------------
-                * |  off | metadata  |             packet           | XXXX  |
-                *  ---------------------------------------------------------
-                */
-               meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
-               data_len = le16_to_cpu(rxd->rxd.data_len);
-               pkt_len = data_len - meta_len;
-
-               if (unlikely(meta_len > NFP_NET_MAX_PREPEND)) {
-                       nn_dp_warn(dp, "Oversized RX packet metadata %u\n",
-                                  meta_len);
-                       nfp_net_xsk_rx_drop(r_vec, xrxbuf);
-                       continue;
-               }
-
-               /* Stats update. */
-               u64_stats_update_begin(&r_vec->rx_sync);
-               r_vec->rx_pkts++;
-               r_vec->rx_bytes += pkt_len;
-               u64_stats_update_end(&r_vec->rx_sync);
-
-               xrxbuf->xdp->data += meta_len;
-               xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
-               xdp_set_data_meta_invalid(xrxbuf->xdp);
-               xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
-               net_prefetch(xrxbuf->xdp->data);
-
-               if (meta_len) {
-                       if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
-                                                       xrxbuf->xdp->data -
-                                                       meta_len,
-                                                       xrxbuf->xdp->data,
-                                                       pkt_len, meta_len))) {
-                               nn_dp_warn(dp, "Invalid RX packet metadata\n");
-                               nfp_net_xsk_rx_drop(r_vec, xrxbuf);
-                               continue;
-                       }
-
-                       if (unlikely(meta.portid)) {
-                               struct nfp_net *nn = netdev_priv(dp->netdev);
-
-                               if (meta.portid != NFP_META_PORT_ID_CTRL) {
-                                       nfp_net_xsk_rx_skb(rx_ring, rxd, xrxbuf,
-                                                          &meta, pkt_len,
-                                                          false, skbs_polled);
-                                       continue;
-                               }
-
-                               nfp_app_ctrl_rx_raw(nn->app, xrxbuf->xdp->data,
-                                                   pkt_len);
-                               nfp_net_xsk_rx_free(xrxbuf);
-                               continue;
-                       }
-               }
-
-               act = bpf_prog_run_xdp(xdp_prog, xrxbuf->xdp);
-
-               pkt_len = xrxbuf->xdp->data_end - xrxbuf->xdp->data;
-               pkt_off = xrxbuf->xdp->data - xrxbuf->xdp->data_hard_start;
-
-               switch (act) {
-               case XDP_PASS:
-                       nfp_net_xsk_rx_skb(rx_ring, rxd, xrxbuf, &meta, pkt_len,
-                                          true, skbs_polled);
-                       break;
-               case XDP_TX:
-                       if (!nfp_net_xsk_tx_xdp(dp, r_vec, rx_ring, tx_ring,
-                                               xrxbuf, pkt_len, pkt_off))
-                               nfp_net_xsk_rx_drop(r_vec, xrxbuf);
-                       else
-                               nfp_net_xsk_rx_unstash(xrxbuf);
-                       break;
-               case XDP_REDIRECT:
-                       if (xdp_do_redirect(dp->netdev, xrxbuf->xdp, xdp_prog)) {
-                               nfp_net_xsk_rx_drop(r_vec, xrxbuf);
-                       } else {
-                               nfp_net_xsk_rx_unstash(xrxbuf);
-                               xdp_redir = true;
-                       }
-                       break;
-               default:
-                       bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
-                       fallthrough;
-               case XDP_ABORTED:
-                       trace_xdp_exception(dp->netdev, xdp_prog, act);
-                       fallthrough;
-               case XDP_DROP:
-                       nfp_net_xsk_rx_drop(r_vec, xrxbuf);
-                       break;
-               }
-       }
-
-       nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
-
-       if (xdp_redir)
-               xdp_do_flush_map();
-
-       if (tx_ring->wr_ptr_add)
-               nfp_net_tx_xmit_more_flush(tx_ring);
-
-       return pkts_polled;
-}
-
 static void nfp_net_xsk_pool_unmap(struct device *dev,
                                   struct xsk_buff_pool *pool)
 {
@@ -514,6 +112,10 @@ int nfp_net_xsk_setup_pool(struct net_device *netdev,
        struct nfp_net_dp *dp;
        int err;
 
+       /* NFDK doesn't implement xsk yet. */
+       if (nn->dp.ops->version == NFP_NFD_VER_NFDK)
+               return -EOPNOTSUPP;
+
        /* Reject on old FWs so we can drop some checks on datapath. */
        if (nn->dp.rx_offset != NFP_NET_CFG_RX_OFFSET_DYNAMIC)
                return -EOPNOTSUPP;
@@ -566,27 +168,3 @@ int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags)
 
        return 0;
 }
-
-int nfp_net_xsk_poll(struct napi_struct *napi, int budget)
-{
-       struct nfp_net_r_vector *r_vec =
-               container_of(napi, struct nfp_net_r_vector, napi);
-       unsigned int pkts_polled, skbs = 0;
-
-       pkts_polled = nfp_net_xsk_rx(r_vec->rx_ring, budget, &skbs);
-
-       if (pkts_polled < budget) {
-               if (r_vec->tx_ring)
-                       nfp_net_tx_complete(r_vec->tx_ring, budget);
-
-               if (!nfp_net_xsk_complete(r_vec->xdp_ring))
-                       pkts_polled = budget;
-
-               nfp_net_xsk_tx(r_vec->xdp_ring);
-
-               if (pkts_polled < budget && napi_complete_done(napi, skbs))
-                       nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
-       }
-
-       return pkts_polled;
-}
index 5c8549c..6d281eb 100644 (file)
@@ -15,15 +15,27 @@ static inline bool nfp_net_has_xsk_pool_slow(struct nfp_net_dp *dp,
        return dp->xdp_prog && dp->xsk_pools[qid];
 }
 
+static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
+{
+       return rx_ring->cnt - rx_ring->wr_p + rx_ring->rd_p - 1;
+}
+
+static inline int nfp_net_tx_space(struct nfp_net_tx_ring *tx_ring)
+{
+       return tx_ring->cnt - tx_ring->wr_p + tx_ring->rd_p - 1;
+}
+
+void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf);
+void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf);
+void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
+                        struct nfp_net_xsk_rx_buf *xrxbuf);
 int nfp_net_xsk_setup_pool(struct net_device *netdev, struct xsk_buff_pool *pool,
                           u16 queue_id);
 
-void nfp_net_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring);
 void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring);
 
 void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring);
 
 int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-int nfp_net_xsk_poll(struct napi_struct *napi, int budget);
 
 #endif /* _NFP_XSK_H_ */
index 87f2268..a51eb26 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/etherdevice.h>
 
+#include "nfpcore/nfp_dev.h"
 #include "nfp_net_ctrl.h"
 #include "nfp_net.h"
 #include "nfp_main.h"
@@ -36,11 +37,14 @@ struct nfp_net_vf {
 
 static const char nfp_net_driver_name[] = "nfp_netvf";
 
-#define PCI_DEVICE_NFP6000VF           0x6003
 static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
-       { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
+       { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP3800_VF,
          PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
-         PCI_ANY_ID, 0,
+         PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF,
+       },
+       { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF,
+         PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+         PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF,
        },
        { 0, } /* Required last entry. */
 };
@@ -65,6 +69,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
 static int nfp_netvf_pci_probe(struct pci_dev *pdev,
                               const struct pci_device_id *pci_id)
 {
+       const struct nfp_dev_info *dev_info;
        struct nfp_net_fw_version fw_ver;
        int max_tx_rings, max_rx_rings;
        u32 tx_bar_off, rx_bar_off;
@@ -78,6 +83,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        int stride;
        int err;
 
+       dev_info = &nfp_dev_info[pci_id->driver_data];
+
        vf = kzalloc(sizeof(*vf), GFP_KERNEL);
        if (!vf)
                return -ENOMEM;
@@ -95,8 +102,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       err = dma_set_mask_and_coherent(&pdev->dev,
-                                       DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
+       err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask);
        if (err)
                goto err_pci_regions;
 
@@ -116,9 +122,11 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        }
 
        nfp_net_get_fw_version(&fw_ver, ctrl_bar);
-       if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+       if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
+           fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
                dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
-                       fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
+                       fw_ver.extend, fw_ver.class,
+                       fw_ver.major, fw_ver.minor);
                err = -EINVAL;
                goto err_ctrl_unmap;
        }
@@ -138,7 +146,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
                        break;
                default:
                        dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n",
-                               fw_ver.resv, fw_ver.class,
+                               fw_ver.extend, fw_ver.class,
                                fw_ver.major, fw_ver.minor);
                        err = -EINVAL;
                        goto err_ctrl_unmap;
@@ -167,19 +175,19 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        }
 
        startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
-       tx_bar_off = NFP_PCIE_QUEUE(startq);
+       tx_bar_off = nfp_qcp_queue_offset(dev_info, startq);
        startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
-       rx_bar_off = NFP_PCIE_QUEUE(startq);
+       rx_bar_off = nfp_qcp_queue_offset(dev_info, startq);
 
        /* Allocate and initialise the netdev */
-       nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
+       nn = nfp_net_alloc(pdev, dev_info, ctrl_bar, true,
+                          max_tx_rings, max_rx_rings);
        if (IS_ERR(nn)) {
                err = PTR_ERR(nn);
                goto err_ctrl_unmap;
        }
        vf->nn = nn;
 
-       nn->fw_ver = fw_ver;
        nn->dp.is_vf = 1;
        nn->stride_tx = stride;
        nn->stride_rx = stride;
index 93c5bfc..4f23085 100644 (file)
@@ -75,23 +75,6 @@ int nfp_port_set_features(struct net_device *netdev, netdev_features_t features)
        return 0;
 }
 
-struct nfp_port *
-nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id)
-{
-       struct nfp_port *port;
-
-       lockdep_assert_held(&pf->lock);
-
-       if (type != NFP_PORT_PHYS_PORT)
-               return NULL;
-
-       list_for_each_entry(port, &pf->ports, port_list)
-               if (port->eth_id == id)
-                       return port;
-
-       return NULL;
-}
-
 struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port)
 {
        if (!port)
index df316b9..d1ebe6c 100644 (file)
@@ -106,8 +106,6 @@ nfp_port_set_features(struct net_device *netdev, netdev_features_t features);
 struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
 int nfp_port_get_port_parent_id(struct net_device *netdev,
                                struct netdev_phys_item_id *ppid);
-struct nfp_port *
-nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
 struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port);
 struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port);
 
index 252fe06..0d1d39e 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/pci.h>
 
 #include "nfp_cpp.h"
+#include "nfp_dev.h"
 
 #include "nfp6000/nfp6000.h"
 
 #define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
 #define NFP_PCIE_P2C_GENERAL_SIZE(bar)             (1 << ((bar)->bitsize - 4))
 
-#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
-       (0x400 + ((bar) * 8 + (slot)) * 4)
-
-#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
-       (((bar) * 8 + (slot)) * 4)
+#define NFP_PCIE_P2C_EXPBAR_OFFSET(bar_index)          ((bar_index) * 4)
 
 /* The number of explicit BARs to reserve.
  * Minimum is 0, maximum is 4 on the NFP6000.
@@ -145,6 +142,7 @@ struct nfp_bar {
 struct nfp6000_pcie {
        struct pci_dev *pdev;
        struct device *dev;
+       const struct nfp_dev_info *dev_info;
 
        /* PCI BAR management */
        spinlock_t bar_lock;            /* Protect the PCI2CPP BAR cache */
@@ -269,19 +267,16 @@ compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
 static int
 nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
 {
-       int base, slot;
-       int xbar;
+       unsigned int xbar;
 
-       base = bar->index >> 3;
-       slot = bar->index & 7;
+       xbar = NFP_PCIE_P2C_EXPBAR_OFFSET(bar->index);
 
        if (nfp->iomem.csr) {
-               xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
                writel(newcfg, nfp->iomem.csr + xbar);
                /* Readback to ensure BAR is flushed */
                readl(nfp->iomem.csr + xbar);
        } else {
-               xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
+               xbar += nfp->dev_info->pcie_cfg_expbar_offset;
                pci_write_config_dword(nfp->pdev, xbar, newcfg);
        }
 
@@ -622,7 +617,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
 
                nfp6000_bar_write(nfp, bar, barcfg_msix_general);
 
-               nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
+               nfp->expl.data = bar->iomem + NFP_PCIE_SRAM +
+                       nfp->dev_info->pcie_expl_offset;
 
                switch (nfp->pdev->device) {
                case PCI_DEVICE_ID_NETRONOME_NFP3800:
@@ -1306,18 +1302,20 @@ static const struct nfp_cpp_operations nfp6000_pcie_ops = {
 /**
  * nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device
  * @pdev:      NFP6000 PCI device
+ * @dev_info:  NFP ASIC params
  *
  * Return: NFP CPP handle
  */
-struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
+struct nfp_cpp *
+nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info)
 {
        struct nfp6000_pcie *nfp;
        u16 interface;
        int err;
 
        /*  Finished with card initialization. */
-       dev_info(&pdev->dev,
-                "Netronome Flow Processor NFP4000/NFP5000/NFP6000 PCIe Card Probe\n");
+       dev_info(&pdev->dev, "Netronome Flow Processor %s PCIe Card Probe\n",
+                dev_info->chip_names);
        pcie_print_link_status(pdev);
 
        nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
@@ -1328,6 +1326,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
 
        nfp->dev = &pdev->dev;
        nfp->pdev = pdev;
+       nfp->dev_info = dev_info;
        init_waitqueue_head(&nfp->bar_waiters);
        spin_lock_init(&nfp->bar_lock);
 
index 6d1bffa..097660b 100644 (file)
@@ -11,6 +11,7 @@
 
 #include "nfp_cpp.h"
 
-struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev);
+struct nfp_cpp *
+nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info);
 
 #endif /* NFP6000_PCIE_H */
index 2dd0f58..3d379e9 100644 (file)
 
 #define PCI_64BIT_BAR_COUNT             3
 
-/* NFP hardware vendor/device ids.
- */
-#define PCI_DEVICE_ID_NETRONOME_NFP3800        0x3800
-
 #define NFP_CPP_NUM_TARGETS             16
 /* Max size of area it should be safe to request */
 #define NFP_CPP_SAFE_AREA_SIZE         SZ_2M
index 85734c6..508ae6b 100644 (file)
@@ -22,6 +22,7 @@
 #include "nfp6000/nfp_xpb.h"
 
 /* NFP6000 PL */
+#define NFP_PL_DEVICE_PART_NFP6000             0x6200
 #define NFP_PL_DEVICE_ID                       0x00000004
 #define   NFP_PL_DEVICE_ID_MASK                        GENMASK(7, 0)
 #define   NFP_PL_DEVICE_PART_MASK              GENMASK(31, 16)
@@ -130,8 +131,12 @@ int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model)
                return err;
 
        *model = reg & NFP_PL_DEVICE_MODEL_MASK;
-       if (*model & NFP_PL_DEVICE_ID_MASK)
-               *model -= 0x10;
+       /* Disambiguate the NFP4000/NFP5000/NFP6000 chips */
+       if (FIELD_GET(NFP_PL_DEVICE_PART_MASK, reg) ==
+           NFP_PL_DEVICE_PART_NFP6000) {
+               if (*model & NFP_PL_DEVICE_ID_MASK)
+                       *model -= 0x10;
+       }
 
        return 0;
 }
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
new file mode 100644 (file)
index 0000000..28384d6
--- /dev/null
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+
+#include "nfp_dev.h"
+
+const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = {
+       [NFP_DEV_NFP3800] = {
+               .dma_mask               = DMA_BIT_MASK(40),
+               .qc_idx_mask            = GENMASK(8, 0),
+               .qc_addr_offset         = 0x400000,
+               .min_qc_size            = 512,
+               .max_qc_size            = SZ_64K,
+
+               .chip_names             = "NFP3800",
+               .pcie_cfg_expbar_offset = 0x0a00,
+               .pcie_expl_offset       = 0xd000,
+               .qc_area_sz             = 0x100000,
+       },
+       [NFP_DEV_NFP3800_VF] = {
+               .dma_mask               = DMA_BIT_MASK(40),
+               .qc_idx_mask            = GENMASK(8, 0),
+               .qc_addr_offset         = 0,
+               .min_qc_size            = 512,
+               .max_qc_size            = SZ_64K,
+       },
+       [NFP_DEV_NFP6000] = {
+               .dma_mask               = DMA_BIT_MASK(40),
+               .qc_idx_mask            = GENMASK(7, 0),
+               .qc_addr_offset         = 0x80000,
+               .min_qc_size            = 256,
+               .max_qc_size            = SZ_256K,
+
+               .chip_names             = "NFP4000/NFP5000/NFP6000",
+               .pcie_cfg_expbar_offset = 0x0400,
+               .pcie_expl_offset       = 0x1000,
+               .qc_area_sz             = 0x80000,
+       },
+       [NFP_DEV_NFP6000_VF] = {
+               .dma_mask               = DMA_BIT_MASK(40),
+               .qc_idx_mask            = GENMASK(7, 0),
+               .qc_addr_offset         = 0,
+               .min_qc_size            = 256,
+               .max_qc_size            = SZ_256K,
+       },
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h
new file mode 100644 (file)
index 0000000..d418986
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DEV_H_
+#define _NFP_DEV_H_
+
+#include <linux/types.h>
+
+enum nfp_dev_id {
+       NFP_DEV_NFP3800,
+       NFP_DEV_NFP3800_VF,
+       NFP_DEV_NFP6000,
+       NFP_DEV_NFP6000_VF,
+       NFP_DEV_CNT,
+};
+
+struct nfp_dev_info {
+       /* Required fields */
+       u64 dma_mask;
+       u32 qc_idx_mask;
+       u32 qc_addr_offset;
+       u32 min_qc_size;
+       u32 max_qc_size;
+
+       /* PF-only fields */
+       const char *chip_names;
+       u32 pcie_cfg_expbar_offset;
+       u32 pcie_expl_offset;
+       u32 qc_area_sz;
+};
+
+extern const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT];
+
+#endif
index bc39558..756f97d 100644 (file)
@@ -1471,6 +1471,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct netdata_local *pldat;
+       int ret;
 
        if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(ndev->irq);
@@ -1480,7 +1481,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
                        pldat = netdev_priv(ndev);
 
                        /* Enable interface clock */
-                       clk_enable(pldat->clk);
+                       ret = clk_enable(pldat->clk);
+                       if (ret)
+                               return ret;
 
                        /* Reset and initialize */
                        __lpc_eth_reset(pldat);
index 12105f6..0365002 100644 (file)
@@ -191,7 +191,7 @@ IV. Notes
 
 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
-and an AlphaStation to verifty the Alpha port!
+and an AlphaStation to verify the Alpha port!
 
 IVb. References
 
index 0ce37f2..407029a 100644 (file)
@@ -1835,8 +1835,6 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
        if (!allocated_mem)
                return NULL;
 
-       memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
-
        /* For each Storm, set physical address in RAM */
        while (buf_offset < buf_size) {
                struct phys_mem_desc *storm_mem_desc;
index bf4a951..0848b55 100644 (file)
@@ -3817,11 +3817,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
        return found;
 }
 
-static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
-                            u16 vfid,
-                            struct qed_mcp_link_params *p_params,
-                            struct qed_mcp_link_state *p_link,
-                            struct qed_mcp_link_capabilities *p_caps)
+static int qed_iov_get_link(struct qed_hwfn *p_hwfn,
+                           u16 vfid,
+                           struct qed_mcp_link_params *p_params,
+                           struct qed_mcp_link_state *p_link,
+                           struct qed_mcp_link_capabilities *p_caps)
 {
        struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
                                                       vfid,
@@ -3829,7 +3829,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
        struct qed_bulletin_content *p_bulletin;
 
        if (!p_vf)
-               return;
+               return -EINVAL;
 
        p_bulletin = p_vf->bulletin.p_virt;
 
@@ -3839,6 +3839,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
                __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
        if (p_caps)
                __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+       return 0;
 }
 
 static int
@@ -4697,6 +4698,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
        struct qed_public_vf_info *vf_info;
        struct qed_mcp_link_state link;
        u32 tx_rate;
+       int ret;
 
        /* Sanitize request */
        if (IS_VF(cdev))
@@ -4710,7 +4712,9 @@ static int qed_get_vf_config(struct qed_dev *cdev,
 
        vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
 
-       qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+       ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+       if (ret)
+               return ret;
 
        /* Fill information about VF */
        ivi->vf = vf_id;
index 597cd9c..7b0e390 100644 (file)
@@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
                                                    p_iov->bulletin.size,
                                                    &p_iov->bulletin.phys,
                                                    GFP_KERNEL);
+       if (!p_iov->bulletin.p_virt)
+               goto free_pf2vf_reply;
+
        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
                   "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
                   p_iov->bulletin.p_virt,
@@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
 
        return rc;
 
+free_pf2vf_reply:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(union pfvf_tlvs),
+                         p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
 free_vf2pf_request:
        dma_free_coherent(&p_hwfn->cdev->pdev->dev,
                          sizeof(union vfpf_tlvs),
index e10fe07..54a2d65 100644 (file)
@@ -1355,7 +1355,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
 
        memset(data, 0, stats->n_stats * sizeof(u64));
 
-       for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
+       for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
                if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
                        tx_ring = &adapter->tx_ring[ring];
                        data = qlcnic_fill_tx_queue_stats(data, tx_ring);
index bfbd784..a313242 100644 (file)
@@ -207,7 +207,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
        dev = skb->dev;
        port = rmnet_get_port_rcu(dev);
        if (unlikely(!port)) {
-               atomic_long_inc(&skb->dev->rx_nohandler);
+               dev_core_stats_rx_nohandler_inc(skb->dev);
                kfree_skb(skb);
                goto done;
        }
index 67014eb..33f5c56 100644 (file)
@@ -1397,8 +1397,11 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
        rtl_lock_config_regs(tp);
 
        device_set_wakeup_enable(tp_to_dev(tp), wolopts);
-       rtl_set_d3_pll_down(tp, !wolopts);
-       tp->dev->wol_enabled = wolopts ? 1 : 0;
+
+       if (tp->dash_type == RTL_DASH_NONE) {
+               rtl_set_d3_pll_down(tp, !wolopts);
+               tp->dev->wol_enabled = wolopts ? 1 : 0;
+       }
 }
 
 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -4938,6 +4941,9 @@ static int rtl8169_runtime_idle(struct device *device)
 {
        struct rtl8169_private *tp = dev_get_drvdata(device);
 
+       if (tp->dash_type != RTL_DASH_NONE)
+               return -EBUSY;
+
        if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
                pm_schedule_suspend(device, 10000);
 
@@ -4978,7 +4984,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
        /* Restore original MAC address */
        rtl_rar_set(tp, tp->dev->perm_addr);
 
-       if (system_state == SYSTEM_POWER_OFF) {
+       if (system_state == SYSTEM_POWER_OFF &&
+           tp->dash_type == RTL_DASH_NONE) {
                if (tp->saved_wolopts)
                        rtl_wol_shutdown_quirk(tp);
 
@@ -5449,7 +5456,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* configure chip for default features */
        rtl8169_set_features(dev, dev->features);
 
-       rtl_set_d3_pll_down(tp, true);
+       if (tp->dash_type == RTL_DASH_NONE) {
+               rtl_set_d3_pll_down(tp, true);
+       } else {
+               rtl_set_d3_pll_down(tp, false);
+               dev->wol_enabled = 1;
+       }
 
        jumbo_max = rtl_jumbo_max(tp);
        if (jumbo_max)
index 32ef3df..63754a9 100644 (file)
@@ -1159,6 +1159,7 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G                0xa0ac
 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0     0x7aac
 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1     0x7aad
+#define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G       0x54ac
 
 static const struct pci_device_id intel_eth_pci_id_table[] = {
        { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
@@ -1176,6 +1177,7 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
        { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
        { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
        { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
+       { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
index 58c0fea..6ff88df 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_net.h>
-#include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/stmmac.h>
 
 #define ETH_FINE_DLY_GTXC      BIT(1)
 #define ETH_FINE_DLY_RXC       BIT(0)
 
+/* Peri Configuration register for mt8195 */
+#define MT8195_PERI_ETH_CTRL0          0xFD0
+#define MT8195_RMII_CLK_SRC_INTERNAL   BIT(28)
+#define MT8195_RMII_CLK_SRC_RXC                BIT(27)
+#define MT8195_ETH_INTF_SEL            GENMASK(26, 24)
+#define MT8195_RGMII_TXC_PHASE_CTRL    BIT(22)
+#define MT8195_EXT_PHY_MODE            BIT(21)
+#define MT8195_DLY_GTXC_INV            BIT(12)
+#define MT8195_DLY_GTXC_ENABLE         BIT(5)
+#define MT8195_DLY_GTXC_STAGES         GENMASK(4, 0)
+
+#define MT8195_PERI_ETH_CTRL1          0xFD4
+#define MT8195_DLY_RXC_INV             BIT(25)
+#define MT8195_DLY_RXC_ENABLE          BIT(18)
+#define MT8195_DLY_RXC_STAGES          GENMASK(17, 13)
+#define MT8195_DLY_TXC_INV             BIT(12)
+#define MT8195_DLY_TXC_ENABLE          BIT(5)
+#define MT8195_DLY_TXC_STAGES          GENMASK(4, 0)
+
+#define MT8195_PERI_ETH_CTRL2          0xFD8
+#define MT8195_DLY_RMII_RXC_INV                BIT(25)
+#define MT8195_DLY_RMII_RXC_ENABLE     BIT(18)
+#define MT8195_DLY_RMII_RXC_STAGES     GENMASK(17, 13)
+#define MT8195_DLY_RMII_TXC_INV                BIT(12)
+#define MT8195_DLY_RMII_TXC_ENABLE     BIT(5)
+#define MT8195_DLY_RMII_TXC_STAGES     GENMASK(4, 0)
+
 struct mac_delay_struct {
        u32 tx_delay;
        u32 rx_delay;
@@ -50,19 +76,21 @@ struct mac_delay_struct {
 struct mediatek_dwmac_plat_data {
        const struct mediatek_dwmac_variant *variant;
        struct mac_delay_struct mac_delay;
+       struct clk *rmii_internal_clk;
        struct clk_bulk_data *clks;
-       struct device_node *np;
        struct regmap *peri_regmap;
+       struct device_node *np;
        struct device *dev;
        phy_interface_t phy_mode;
-       int num_clks_to_config;
        bool rmii_clk_from_mac;
        bool rmii_rxc;
+       bool mac_wol;
 };
 
 struct mediatek_dwmac_variant {
        int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
        int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
+       void (*dwmac_fix_mac_speed)(void *priv, unsigned int speed);
 
        /* clock ids to be requested */
        const char * const *clk_list;
@@ -75,7 +103,11 @@ struct mediatek_dwmac_variant {
 
 /* list of clocks required for mac */
 static const char * const mt2712_dwmac_clk_l[] = {
-       "axi", "apb", "mac_main", "ptp_ref", "rmii_internal"
+       "axi", "apb", "mac_main", "ptp_ref"
+};
+
+static const char * const mt8195_dwmac_clk_l[] = {
+       "axi", "apb", "mac_cg", "mac_main", "ptp_ref"
 };
 
 static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
@@ -84,23 +116,12 @@ static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
        int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
        u32 intf_val = 0;
 
-       /* The clock labeled as "rmii_internal" in mt2712_dwmac_clk_l is needed
-        * only in RMII(when MAC provides the reference clock), and useless for
-        * RGMII/MII/RMII(when PHY provides the reference clock).
-        * num_clks_to_config indicates the real number of clocks should be
-        * configured, equals to (plat->variant->num_clks - 1) in default for all the case,
-        * then +1 for rmii_clk_from_mac case.
-        */
-       plat->num_clks_to_config = plat->variant->num_clks - 1;
-
        /* select phy interface in top control domain */
        switch (plat->phy_mode) {
        case PHY_INTERFACE_MODE_MII:
                intf_val |= PHY_INTF_MII;
                break;
        case PHY_INTERFACE_MODE_RMII:
-               if (plat->rmii_clk_from_mac)
-                       plat->num_clks_to_config++;
                intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac);
                break;
        case PHY_INTERFACE_MODE_RGMII:
@@ -268,6 +289,193 @@ static const struct mediatek_dwmac_variant mt2712_gmac_variant = {
                .tx_delay_max = 17600,
 };
 
+static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat)
+{
+       int rmii_clk_from_mac = plat->rmii_clk_from_mac ? MT8195_RMII_CLK_SRC_INTERNAL : 0;
+       int rmii_rxc = plat->rmii_rxc ? MT8195_RMII_CLK_SRC_RXC : 0;
+       u32 intf_val = 0;
+
+       /* select phy interface in top control domain */
+       switch (plat->phy_mode) {
+       case PHY_INTERFACE_MODE_MII:
+               intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_MII);
+               break;
+       case PHY_INTERFACE_MODE_RMII:
+               intf_val |= (rmii_rxc | rmii_clk_from_mac);
+               intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RMII);
+               break;
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+               intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RGMII);
+               break;
+       default:
+               dev_err(plat->dev, "phy interface not supported\n");
+               return -EINVAL;
+       }
+
+       /* MT8195 only support external PHY */
+       intf_val |= MT8195_EXT_PHY_MODE;
+
+       regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL0, intf_val);
+
+       return 0;
+}
+
+static void mt8195_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
+{
+       struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+       /* 290ps per stage */
+       mac_delay->tx_delay /= 290;
+       mac_delay->rx_delay /= 290;
+}
+
+static void mt8195_delay_stage2ps(struct mediatek_dwmac_plat_data *plat)
+{
+       struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+       /* 290ps per stage */
+       mac_delay->tx_delay *= 290;
+       mac_delay->rx_delay *= 290;
+}
+
+static int mt8195_set_delay(struct mediatek_dwmac_plat_data *plat)
+{
+       struct mac_delay_struct *mac_delay = &plat->mac_delay;
+       u32 gtxc_delay_val = 0, delay_val = 0, rmii_delay_val = 0;
+
+       mt8195_delay_ps2stage(plat);
+
+       switch (plat->phy_mode) {
+       case PHY_INTERFACE_MODE_MII:
+               delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE, !!mac_delay->tx_delay);
+               delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES, mac_delay->tx_delay);
+               delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV, mac_delay->tx_inv);
+
+               delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+               delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
+               delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
+               break;
+       case PHY_INTERFACE_MODE_RMII:
+               if (plat->rmii_clk_from_mac) {
+                       /* case 1: mac provides the rmii reference clock,
+                        * and the clock output to TXC pin.
+                        * The egress timing can be adjusted by RMII_TXC delay macro circuit.
+                        * The ingress timing can be adjusted by RMII_RXC delay macro circuit.
+                        */
+                       rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_ENABLE,
+                                                    !!mac_delay->tx_delay);
+                       rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_STAGES,
+                                                    mac_delay->tx_delay);
+                       rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_INV,
+                                                    mac_delay->tx_inv);
+
+                       rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_ENABLE,
+                                                    !!mac_delay->rx_delay);
+                       rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_STAGES,
+                                                    mac_delay->rx_delay);
+                       rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_INV,
+                                                    mac_delay->rx_inv);
+               } else {
+                       /* case 2: the rmii reference clock is from external phy,
+                        * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+                        * the reference clk is connected to. The reference clock is a
+                        * received signal, so rx_delay/rx_inv are used to indicate
+                        * the reference clock timing adjustment
+                        */
+                       if (plat->rmii_rxc) {
+                               /* the rmii reference clock from outside is connected
+                                * to RXC pin, the reference clock will be adjusted
+                                * by RXC delay macro circuit.
+                                */
+                               delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE,
+                                                       !!mac_delay->rx_delay);
+                               delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES,
+                                                       mac_delay->rx_delay);
+                               delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV,
+                                                       mac_delay->rx_inv);
+                       } else {
+                               /* the rmii reference clock from outside is connected
+                                * to TXC pin, the reference clock will be adjusted
+                                * by TXC delay macro circuit.
+                                */
+                               delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE,
+                                                       !!mac_delay->rx_delay);
+                               delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES,
+                                                       mac_delay->rx_delay);
+                               delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV,
+                                                       mac_delay->rx_inv);
+                       }
+               }
+               break;
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+               gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+               gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_STAGES, mac_delay->tx_delay);
+               gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_INV, mac_delay->tx_inv);
+
+               delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+               delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
+               delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
+
+               break;
+       default:
+               dev_err(plat->dev, "phy interface not supported\n");
+               return -EINVAL;
+       }
+
+       regmap_update_bits(plat->peri_regmap,
+                          MT8195_PERI_ETH_CTRL0,
+                          MT8195_RGMII_TXC_PHASE_CTRL |
+                          MT8195_DLY_GTXC_INV |
+                          MT8195_DLY_GTXC_ENABLE |
+                          MT8195_DLY_GTXC_STAGES,
+                          gtxc_delay_val);
+       regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL1, delay_val);
+       regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL2, rmii_delay_val);
+
+       mt8195_delay_stage2ps(plat);
+
+       return 0;
+}
+
+static void mt8195_fix_mac_speed(void *priv, unsigned int speed)
+{
+       struct mediatek_dwmac_plat_data *priv_plat = priv;
+
+       if ((phy_interface_mode_is_rgmii(priv_plat->phy_mode))) {
+               /* prefer 2ns fixed delay which is controlled by TXC_PHASE_CTRL,
+                * when link speed is 1Gbps with RGMII interface,
+                * Fall back to delay macro circuit for 10/100Mbps link speed.
+                */
+               if (speed == SPEED_1000)
+                       regmap_update_bits(priv_plat->peri_regmap,
+                                          MT8195_PERI_ETH_CTRL0,
+                                          MT8195_RGMII_TXC_PHASE_CTRL |
+                                          MT8195_DLY_GTXC_ENABLE |
+                                          MT8195_DLY_GTXC_INV |
+                                          MT8195_DLY_GTXC_STAGES,
+                                          MT8195_RGMII_TXC_PHASE_CTRL);
+               else
+                       mt8195_set_delay(priv_plat);
+       }
+}
+
+static const struct mediatek_dwmac_variant mt8195_gmac_variant = {
+       .dwmac_set_phy_interface = mt8195_set_interface,
+       .dwmac_set_delay = mt8195_set_delay,
+       .dwmac_fix_mac_speed = mt8195_fix_mac_speed,
+       .clk_list = mt8195_dwmac_clk_l,
+       .num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l),
+       .dma_bit_mask = 35,
+       .rx_delay_max = 9280,
+       .tx_delay_max = 9280,
+};
+
 static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
 {
        struct mac_delay_struct *mac_delay = &plat->mac_delay;
@@ -308,6 +516,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
        mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse");
        plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc");
        plat->rmii_clk_from_mac = of_property_read_bool(plat->np, "mediatek,rmii-clk-from-mac");
+       plat->mac_wol = of_property_read_bool(plat->np, "mediatek,mac-wol");
 
        return 0;
 }
@@ -315,18 +524,34 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
 static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
 {
        const struct mediatek_dwmac_variant *variant = plat->variant;
-       int i, num = variant->num_clks;
+       int i, ret;
 
-       plat->clks = devm_kcalloc(plat->dev, num, sizeof(*plat->clks), GFP_KERNEL);
+       plat->clks = devm_kcalloc(plat->dev, variant->num_clks, sizeof(*plat->clks), GFP_KERNEL);
        if (!plat->clks)
                return -ENOMEM;
 
-       for (i = 0; i < num; i++)
+       for (i = 0; i < variant->num_clks; i++)
                plat->clks[i].id = variant->clk_list[i];
 
-       plat->num_clks_to_config = variant->num_clks;
+       ret = devm_clk_bulk_get(plat->dev, variant->num_clks, plat->clks);
+       if (ret)
+               return ret;
+
+       /* The clock labeled as "rmii_internal" is needed only in RMII(when
+        * MAC provides the reference clock), and useless for RGMII/MII or
+        * RMII(when PHY provides the reference clock).
+        * So, "rmii_internal" clock is got and configured only when
+        * reference clock of RMII is from MAC.
+        */
+       if (plat->rmii_clk_from_mac) {
+               plat->rmii_internal_clk = devm_clk_get(plat->dev, "rmii_internal");
+               if (IS_ERR(plat->rmii_internal_clk))
+                       ret = PTR_ERR(plat->rmii_internal_clk);
+       } else {
+               plat->rmii_internal_clk = NULL;
+       }
 
-       return devm_clk_bulk_get(plat->dev, num, plat->clks);
+       return ret;
 }
 
 static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
@@ -335,44 +560,117 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
        const struct mediatek_dwmac_variant *variant = plat->variant;
        int ret;
 
-       ret = dma_set_mask_and_coherent(plat->dev, DMA_BIT_MASK(variant->dma_bit_mask));
-       if (ret) {
-               dev_err(plat->dev, "No suitable DMA available, err = %d\n", ret);
-               return ret;
+       if (variant->dwmac_set_phy_interface) {
+               ret = variant->dwmac_set_phy_interface(plat);
+               if (ret) {
+                       dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+                       return ret;
+               }
        }
 
-       ret = variant->dwmac_set_phy_interface(plat);
-       if (ret) {
-               dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
-               return ret;
+       if (variant->dwmac_set_delay) {
+               ret = variant->dwmac_set_delay(plat);
+               if (ret) {
+                       dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+                       return ret;
+               }
        }
 
-       ret = variant->dwmac_set_delay(plat);
+       ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
        if (ret) {
-               dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+               dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
                return ret;
        }
 
-       ret = clk_bulk_prepare_enable(plat->num_clks_to_config, plat->clks);
+       ret = clk_prepare_enable(plat->rmii_internal_clk);
        if (ret) {
-               dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
-               return ret;
+               dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
+               goto err_clk;
        }
 
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_get_sync(&pdev->dev);
-
        return 0;
+
+err_clk:
+       clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+       return ret;
 }
 
 static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
 {
        struct mediatek_dwmac_plat_data *plat = priv;
+       const struct mediatek_dwmac_variant *variant = plat->variant;
 
-       clk_bulk_disable_unprepare(plat->num_clks_to_config, plat->clks);
+       clk_disable_unprepare(plat->rmii_internal_clk);
+       clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+}
 
-       pm_runtime_put_sync(&pdev->dev);
-       pm_runtime_disable(&pdev->dev);
+static int mediatek_dwmac_clks_config(void *priv, bool enabled)
+{
+       struct mediatek_dwmac_plat_data *plat = priv;
+       const struct mediatek_dwmac_variant *variant = plat->variant;
+       int ret = 0;
+
+       if (enabled) {
+               ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
+               if (ret) {
+                       dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
+                       return ret;
+               }
+
+               ret = clk_prepare_enable(plat->rmii_internal_clk);
+               if (ret) {
+                       dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
+                       return ret;
+               }
+       } else {
+               clk_disable_unprepare(plat->rmii_internal_clk);
+               clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+       }
+
+       return ret;
+}
+
+static int mediatek_dwmac_common_data(struct platform_device *pdev,
+                                     struct plat_stmmacenet_data *plat,
+                                     struct mediatek_dwmac_plat_data *priv_plat)
+{
+       int i;
+
+       plat->interface = priv_plat->phy_mode;
+       plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
+       plat->riwt_off = 1;
+       plat->maxmtu = ETH_DATA_LEN;
+       plat->addr64 = priv_plat->variant->dma_bit_mask;
+       plat->bsp_priv = priv_plat;
+       plat->init = mediatek_dwmac_init;
+       plat->exit = mediatek_dwmac_exit;
+       plat->clks_config = mediatek_dwmac_clks_config;
+       if (priv_plat->variant->dwmac_fix_mac_speed)
+               plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
+
+       plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+                                            sizeof(*plat->safety_feat_cfg),
+                                            GFP_KERNEL);
+       if (!plat->safety_feat_cfg)
+               return -ENOMEM;
+
+       plat->safety_feat_cfg->tsoee = 1;
+       plat->safety_feat_cfg->mrxpee = 0;
+       plat->safety_feat_cfg->mestee = 1;
+       plat->safety_feat_cfg->mrxee = 1;
+       plat->safety_feat_cfg->mtxee = 1;
+       plat->safety_feat_cfg->epsi = 0;
+       plat->safety_feat_cfg->edpp = 1;
+       plat->safety_feat_cfg->prtyen = 1;
+       plat->safety_feat_cfg->tmouten = 1;
+
+       for (i = 0; i < plat->tx_queues_to_use; i++) {
+               /* Default TX Q0 to use TSO and rest TXQ for TBS */
+               if (i > 0)
+                       plat->tx_queues_cfg[i].tbs_en = 1;
+       }
+
+       return 0;
 }
 
 static int mediatek_dwmac_probe(struct platform_device *pdev)
@@ -411,15 +709,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
        if (IS_ERR(plat_dat))
                return PTR_ERR(plat_dat);
 
-       plat_dat->interface = priv_plat->phy_mode;
-       plat_dat->has_gmac4 = 1;
-       plat_dat->has_gmac = 0;
-       plat_dat->pmt = 0;
-       plat_dat->riwt_off = 1;
-       plat_dat->maxmtu = ETH_DATA_LEN;
-       plat_dat->bsp_priv = priv_plat;
-       plat_dat->init = mediatek_dwmac_init;
-       plat_dat->exit = mediatek_dwmac_exit;
+       mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
        mediatek_dwmac_init(pdev, priv_plat);
 
        ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
@@ -434,6 +724,8 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
 static const struct of_device_id mediatek_dwmac_match[] = {
        { .compatible = "mediatek,mt2712-gmac",
          .data = &mt2712_gmac_variant },
+       { .compatible = "mediatek,mt8195-gmac",
+         .data = &mt8195_gmac_variant },
        { }
 };
 
index cf4e077..4a4b365 100644 (file)
@@ -1668,7 +1668,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        u32 rx_count = priv->plat->rx_queues_to_use;
-       u32 queue;
+       int queue;
        int ret;
 
        /* RX INITIALIZATION */
@@ -1695,9 +1695,6 @@ err_init_rx_buffers:
                rx_q->buf_alloc_num = 0;
                rx_q->xsk_pool = NULL;
 
-               if (queue == 0)
-                       break;
-
                queue--;
        }
 
@@ -3275,7 +3272,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
 
        ret = stmmac_init_ptp(priv);
        if (ret == -EOPNOTSUPP)
-               netdev_warn(priv->dev, "PTP not supported by HW\n");
+               netdev_info(priv->dev, "PTP not supported by HW\n");
        else if (ret)
                netdev_warn(priv->dev, "PTP init failed\n");
        else if (ptp_register)
index 153edc5..b04a6a7 100644 (file)
@@ -4664,7 +4664,7 @@ static void cas_set_msglevel(struct net_device *dev, u32 value)
 static int cas_get_regs_len(struct net_device *dev)
 {
        struct cas *cp = netdev_priv(dev);
-       return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
+       return min_t(int, cp->casreg_len, CAS_MAX_REGS);
 }
 
 static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
index ba8ad76..42460c0 100644 (file)
@@ -7909,7 +7909,7 @@ static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
                 * won't get any interrupts and that's painful to debug.
                 */
                if (nr64(LDG_NUM(ldn)) != ldg) {
-                       dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
+                       dev_err(np->device, "Port %u, mismatched LDG assignment for ldn %d, should be %d is %llu\n",
                                np->port, ldn, ldg,
                                (unsigned long long) nr64(LDG_NUM(ldn)));
                        return -EINVAL;
index ad9029a..77e5dff 100644 (file)
@@ -3146,7 +3146,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
        if (err) {
                printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
                       "aborting.\n");
-               goto err_out_iounmap;
+               goto err_out_free_coherent;
        }
 
        pci_set_drvdata(pdev, hp);
@@ -3179,6 +3179,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
 
        return 0;
 
+err_out_free_coherent:
+       dma_free_coherent(hp->dma_dev, PAGE_SIZE,
+                         hp->happy_block, hp->hblock_dvma);
+
 err_out_iounmap:
        iounmap(hp->gregs);
 
index d45b6bb..72acdf8 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 #include <linux/net_tstamp.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
@@ -471,9 +471,7 @@ static void am65_cpsw_get_pauseparam(struct net_device *ndev,
 {
        struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
 
-       pause->autoneg = AUTONEG_DISABLE;
-       pause->rx_pause = salve->rx_pause ? true : false;
-       pause->tx_pause = salve->tx_pause ? true : false;
+       phylink_ethtool_get_pauseparam(salve->phylink, pause);
 }
 
 static int am65_cpsw_set_pauseparam(struct net_device *ndev,
@@ -481,18 +479,7 @@ static int am65_cpsw_set_pauseparam(struct net_device *ndev,
 {
        struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
 
-       if (!salve->phy)
-               return -EINVAL;
-
-       if (!phy_validate_pause(salve->phy, pause))
-               return -EINVAL;
-
-       salve->rx_pause = pause->rx_pause ? true : false;
-       salve->tx_pause = pause->tx_pause ? true : false;
-
-       phy_set_asym_pause(salve->phy, salve->rx_pause, salve->tx_pause);
-
-       return 0;
+       return phylink_ethtool_set_pauseparam(salve->phylink, pause);
 }
 
 static void am65_cpsw_get_wol(struct net_device *ndev,
@@ -500,11 +487,7 @@ static void am65_cpsw_get_wol(struct net_device *ndev,
 {
        struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
 
-       wol->supported = 0;
-       wol->wolopts = 0;
-
-       if (salve->phy)
-               phy_ethtool_get_wol(salve->phy, wol);
+       phylink_ethtool_get_wol(salve->phylink, wol);
 }
 
 static int am65_cpsw_set_wol(struct net_device *ndev,
@@ -512,10 +495,7 @@ static int am65_cpsw_set_wol(struct net_device *ndev,
 {
        struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
 
-       if (!salve->phy)
-               return -EOPNOTSUPP;
-
-       return phy_ethtool_set_wol(salve->phy, wol);
+       return phylink_ethtool_set_wol(salve->phylink, wol);
 }
 
 static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
@@ -523,11 +503,7 @@ static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
 {
        struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
 
-       if (!salve->phy)
-               return -EOPNOTSUPP;
-
-       phy_ethtool_ksettings_get(salve->phy, ecmd);
-       return 0;
+       return phylink_ethtool_ksettings_get(salve->phylink, ecmd);
 }
 
 static int
@@ -536,40 +512,28 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev,
 {
        struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
 
-       if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
-               return -EOPNOTSUPP;
-
-       return phy_ethtool_ksettings_set(salve->phy, ecmd);
+       return phylink_ethtool_ksettings_set(salve->phylink, ecmd);
 }
 
 static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
 {
        struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
 
-       if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
-               return -EOPNOTSUPP;
-
-       return phy_ethtool_get_eee(salve->phy, edata);
+       return phylink_ethtool_get_eee(salve->phylink, edata);
 }
 
 static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
 {
        struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
 
-       if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
-               return -EOPNOTSUPP;
-
-       return phy_ethtool_set_eee(salve->phy, edata);
+       return phylink_ethtool_set_eee(salve->phylink, edata);
 }
 
 static int am65_cpsw_nway_reset(struct net_device *ndev)
 {
        struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
 
-       if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
-               return -EOPNOTSUPP;
-
-       return phy_restart_aneg(salve->phy);
+       return phylink_ethtool_nway_reset(salve->phylink);
 }
 
 static int am65_cpsw_get_regs_len(struct net_device *ndev)
index 8251d7e..d2747e9 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
 #include <linux/of_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
@@ -159,69 +159,6 @@ static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
                common->pdata.quirks);
 }
 
-void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
-{
-       struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
-       struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
-       struct phy_device *phy = port->slave.phy;
-       u32 mac_control = 0;
-
-       if (!phy)
-               return;
-
-       if (phy->link) {
-               mac_control = CPSW_SL_CTL_GMII_EN;
-
-               if (phy->speed == 1000)
-                       mac_control |= CPSW_SL_CTL_GIG;
-               if (phy->speed == 10 && phy_interface_is_rgmii(phy))
-                       /* Can be used with in band mode only */
-                       mac_control |= CPSW_SL_CTL_EXT_EN;
-               if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII)
-                       mac_control |= CPSW_SL_CTL_IFCTL_A;
-               if (phy->duplex)
-                       mac_control |= CPSW_SL_CTL_FULLDUPLEX;
-
-               /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/
-
-               /* rx_pause/tx_pause */
-               if (port->slave.rx_pause)
-                       mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
-
-               if (port->slave.tx_pause)
-                       mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
-
-               cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
-
-               /* enable forwarding */
-               cpsw_ale_control_set(common->ale, port->port_id,
-                                    ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
-
-               am65_cpsw_qos_link_up(ndev, phy->speed);
-               netif_tx_wake_all_queues(ndev);
-       } else {
-               int tmo;
-
-               /* disable forwarding */
-               cpsw_ale_control_set(common->ale, port->port_id,
-                                    ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
-
-               cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
-
-               tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
-               dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n",
-                       cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS),
-                       tmo);
-
-               cpsw_sl_ctl_reset(port->slave.mac_sl);
-
-               am65_cpsw_qos_link_down(ndev);
-               netif_tx_stop_all_queues(ndev);
-       }
-
-       phy_print_status(phy);
-}
-
 static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
                                            __be16 proto, u16 vid)
 {
@@ -589,15 +526,11 @@ static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
        struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
        int ret;
 
-       if (port->slave.phy)
-               phy_stop(port->slave.phy);
+       phylink_stop(port->slave.phylink);
 
        netif_tx_stop_all_queues(ndev);
 
-       if (port->slave.phy) {
-               phy_disconnect(port->slave.phy);
-               port->slave.phy = NULL;
-       }
+       phylink_disconnect_phy(port->slave.phylink);
 
        ret = am65_cpsw_nuss_common_stop(common);
        if (ret)
@@ -667,25 +600,14 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
        if (ret)
                goto error_cleanup;
 
-       if (port->slave.phy_node) {
-               port->slave.phy = of_phy_connect(ndev,
-                                                port->slave.phy_node,
-                                                &am65_cpsw_nuss_adjust_link,
-                                                0, port->slave.phy_if);
-               if (!port->slave.phy) {
-                       dev_err(common->dev, "phy %pOF not found on slave %d\n",
-                               port->slave.phy_node,
-                               port->port_id);
-                       ret = -ENODEV;
-                       goto error_cleanup;
-               }
-       }
+       ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
+       if (ret)
+               goto error_cleanup;
 
        /* restore vlan configurations */
        vlan_for_each(ndev, cpsw_restore_vlans, port);
 
-       phy_attached_info(port->slave.phy);
-       phy_start(port->slave.phy);
+       phylink_start(port->slave.phylink);
 
        return 0;
 
@@ -1431,10 +1353,7 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
                return am65_cpsw_nuss_hwtstamp_get(ndev, req);
        }
 
-       if (!port->slave.phy)
-               return -EOPNOTSUPP;
-
-       return phy_mii_ioctl(port->slave.phy, req, cmd);
+       return phylink_mii_ioctl(port->slave.phylink, req, cmd);
 }
 
 static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
@@ -1494,6 +1413,81 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
        .ndo_get_devlink_port   = am65_cpsw_ndo_get_devlink_port,
 };
 
+static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
+                                     const struct phylink_link_state *state)
+{
+       /* Currently not used */
+}
+
+static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
+                                        phy_interface_t interface)
+{
+       struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+                                                         phylink_config);
+       struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+       struct am65_cpsw_common *common = port->common;
+       struct net_device *ndev = port->ndev;
+       int tmo;
+
+       /* disable forwarding */
+       cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+       cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+
+       tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
+       dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
+               cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
+
+       cpsw_sl_ctl_reset(port->slave.mac_sl);
+
+       am65_cpsw_qos_link_down(ndev);
+       netif_tx_stop_all_queues(ndev);
+}
+
+static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+                                      unsigned int mode, phy_interface_t interface, int speed,
+                                      int duplex, bool tx_pause, bool rx_pause)
+{
+       struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+                                                         phylink_config);
+       struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+       struct am65_cpsw_common *common = port->common;
+       u32 mac_control = CPSW_SL_CTL_GMII_EN;
+       struct net_device *ndev = port->ndev;
+
+       if (speed == SPEED_1000)
+               mac_control |= CPSW_SL_CTL_GIG;
+       if (speed == SPEED_10 && interface == PHY_INTERFACE_MODE_RGMII)
+               /* Can be used with in band mode only */
+               mac_control |= CPSW_SL_CTL_EXT_EN;
+       if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
+               mac_control |= CPSW_SL_CTL_IFCTL_A;
+       if (duplex)
+               mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+       /* rx_pause/tx_pause */
+       if (rx_pause)
+               mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+       if (tx_pause)
+               mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+       cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
+
+       /* enable forwarding */
+       cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+       am65_cpsw_qos_link_up(ndev, speed);
+       netif_tx_wake_all_queues(ndev);
+}
+
+static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
+       .validate = phylink_generic_validate,
+       .mac_config = am65_cpsw_nuss_mac_config,
+       .mac_link_down = am65_cpsw_nuss_mac_link_down,
+       .mac_link_up = am65_cpsw_nuss_mac_link_up,
+};
+
 static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
 {
        struct am65_cpsw_common *common = port->common;
@@ -1890,27 +1884,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                                of_property_read_bool(port_np, "ti,mac-only");
 
                /* get phy/link info */
-               if (of_phy_is_fixed_link(port_np)) {
-                       ret = of_phy_register_fixed_link(port_np);
-                       if (ret) {
-                               ret = dev_err_probe(dev, ret,
-                                                    "failed to register fixed-link phy %pOF\n",
-                                                    port_np);
-                               goto of_node_put;
-                       }
-                       port->slave.phy_node = of_node_get(port_np);
-               } else {
-                       port->slave.phy_node =
-                               of_parse_phandle(port_np, "phy-handle", 0);
-               }
-
-               if (!port->slave.phy_node) {
-                       dev_err(dev,
-                               "slave[%d] no phy found\n", port_id);
-                       ret = -ENODEV;
-                       goto of_node_put;
-               }
-
+               port->slave.phy_node = port_np;
                ret = of_get_phy_mode(port_np, &port->slave.phy_if);
                if (ret) {
                        dev_err(dev, "%pOF read phy-mode err %d\n",
@@ -1952,12 +1926,25 @@ static void am65_cpsw_pcpu_stats_free(void *data)
        free_percpu(stats);
 }
 
+static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
+{
+       struct am65_cpsw_port *port;
+       int i;
+
+       for (i = 0; i < common->port_num; i++) {
+               port = &common->ports[i];
+               if (port->slave.phylink)
+                       phylink_destroy(port->slave.phylink);
+       }
+}
+
 static int
 am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
 {
        struct am65_cpsw_ndev_priv *ndev_priv;
        struct device *dev = common->dev;
        struct am65_cpsw_port *port;
+       struct phylink *phylink;
        int ret;
 
        port = &common->ports[port_idx];
@@ -1995,6 +1982,20 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
        port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
        port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
 
+       /* Configuring Phylink */
+       port->slave.phylink_config.dev = &port->ndev->dev;
+       port->slave.phylink_config.type = PHYLINK_NETDEV;
+       port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+       phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
+
+       phylink = phylink_create(&port->slave.phylink_config, dev->fwnode, port->slave.phy_if,
+                                &am65_cpsw_phylink_mac_ops);
+       if (IS_ERR(phylink))
+               return PTR_ERR(phylink);
+
+       port->slave.phylink = phylink;
+
        /* Disable TX checksum offload by default due to HW bug */
        if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
                port->ndev->features &= ~NETIF_F_HW_CSUM;
@@ -2761,15 +2762,17 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
 
        ret = am65_cpsw_nuss_init_ndevs(common);
        if (ret)
-               goto err_of_clear;
+               goto err_free_phylink;
 
        ret = am65_cpsw_nuss_register_ndevs(common);
        if (ret)
-               goto err_of_clear;
+               goto err_free_phylink;
 
        pm_runtime_put(dev);
        return 0;
 
+err_free_phylink:
+       am65_cpsw_nuss_phylink_cleanup(common);
 err_of_clear:
        of_platform_device_destroy(common->mdio_dev, NULL);
 err_pm_clear:
@@ -2792,6 +2795,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
                return ret;
        }
 
+       am65_cpsw_nuss_phylink_cleanup(common);
        am65_cpsw_unregister_devlink(common);
        am65_cpsw_unregister_notifiers(common);
 
index 048ed10..ac94563 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
 #include <linux/platform_device.h>
 #include <linux/soc/ti/k3-ringacc.h>
 #include <net/devlink.h>
@@ -30,13 +30,14 @@ struct am65_cpsw_slave_data {
        bool                            mac_only;
        struct cpsw_sl                  *mac_sl;
        struct device_node              *phy_node;
-       struct phy_device               *phy;
        phy_interface_t                 phy_if;
        struct phy                      *ifphy;
        bool                            rx_pause;
        bool                            tx_pause;
        u8                              mac_addr[ETH_ALEN];
        int                             port_vlan;
+       struct phylink                  *phylink;
+       struct phylink_config           phylink_config;
 };
 
 struct am65_cpsw_port {
index dc70a6b..92ca739 100644 (file)
@@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts)
        for (i = 0; i < CPTS_MAX_EVENTS; i++)
                list_add(&cpts->pool_data[i].list, &cpts->pool);
 
-       clk_enable(cpts->refclk);
+       err = clk_enable(cpts->refclk);
+       if (err)
+               return err;
 
        cpts_write32(cpts, CPTS_EN, control);
        cpts_write32(cpts, TS_PEND_EN, int_enable);
index 31df326..4b6aed7 100644 (file)
@@ -1604,6 +1604,7 @@ static int emac_dev_stop(struct net_device *ndev)
        int irq_num;
        struct emac_priv *priv = netdev_priv(ndev);
        struct device *emac_dev = &ndev->dev;
+       int ret = 0;
 
        /* inform the upper layers. */
        netif_stop_queue(ndev);
@@ -1618,17 +1619,31 @@ static int emac_dev_stop(struct net_device *ndev)
                phy_disconnect(ndev->phydev);
 
        /* Free IRQ */
-       while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
-               for (irq_num = res->start; irq_num <= res->end; irq_num++)
-                       free_irq(irq_num, priv->ndev);
-               i++;
+       if (dev_of_node(&priv->pdev->dev)) {
+               do {
+                       ret = platform_get_irq_optional(priv->pdev, i);
+                       if (ret < 0 && ret != -ENXIO)
+                               break;
+                       if (ret > 0) {
+                               free_irq(ret, priv->ndev);
+                       } else {
+                               ret = 0;
+                               break;
+                       }
+               } while (++i);
+       } else {
+               while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+                       for (irq_num = res->start; irq_num <= res->end; irq_num++)
+                               free_irq(irq_num, priv->ndev);
+                       i++;
+               }
        }
 
        if (netif_msg_drv(priv))
                dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
 
        pm_runtime_put(&priv->pdev->dev);
-       return 0;
+       return ret;
 }
 
 /**
index b818e45..16507bf 100644 (file)
@@ -2082,7 +2082,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
        netcp->tx_pool_region_id = temp[1];
 
        if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
-               dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
+               dev_err(dev, "tx-pool size too small, must be at least %ld\n",
                        MAX_SKB_FRAGS);
                ret = -ENODEV;
                goto quit;
index 1d8c6e9..c7eb05e 100644 (file)
@@ -965,7 +965,7 @@ static int axienet_poll(struct napi_struct *napi, int budget)
                        packets++;
                }
 
-               new_skb = netdev_alloc_skb_ip_align(lp->ndev, lp->max_frm_size);
+               new_skb = napi_alloc_skb(napi, lp->max_frm_size);
                if (!new_skb)
                        break;
 
index f65a638..57a24f6 100644 (file)
@@ -1183,7 +1183,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        if (rc) {
                dev_err(dev,
                        "Cannot register network device, aborting\n");
-               goto error;
+               goto put_node;
        }
 
        dev_info(dev,
@@ -1191,6 +1191,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
                 (unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq);
        return 0;
 
+put_node:
+       of_node_put(lp->phy_node);
 error:
        free_netdev(ndev);
        return rc;
index a895ff7..8f30660 100644 (file)
@@ -56,6 +56,7 @@ struct geneve_config {
        bool                    use_udp6_rx_checksums;
        bool                    ttl_inherit;
        enum ifla_geneve_df     df;
+       bool                    inner_proto_inherit;
 };
 
 /* Pseudo network device */
@@ -251,17 +252,24 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                }
        }
 
-       skb_reset_mac_header(skb);
-       skb->protocol = eth_type_trans(skb, geneve->dev);
-       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
        if (tun_dst)
                skb_dst_set(skb, &tun_dst->dst);
 
-       /* Ignore packet loops (and multicast echo) */
-       if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
-               geneve->dev->stats.rx_errors++;
-               goto drop;
+       if (gnvh->proto_type == htons(ETH_P_TEB)) {
+               skb_reset_mac_header(skb);
+               skb->protocol = eth_type_trans(skb, geneve->dev);
+               skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+               /* Ignore packet loops (and multicast echo) */
+               if (ether_addr_equal(eth_hdr(skb)->h_source,
+                                    geneve->dev->dev_addr)) {
+                       geneve->dev->stats.rx_errors++;
+                       goto drop;
+               }
+       } else {
+               skb_reset_mac_header(skb);
+               skb->dev = geneve->dev;
+               skb->pkt_type = PACKET_HOST;
        }
 
        oiph = skb_network_header(skb);
@@ -345,6 +353,7 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        struct genevehdr *geneveh;
        struct geneve_dev *geneve;
        struct geneve_sock *gs;
+       __be16 inner_proto;
        int opts_len;
 
        /* Need UDP and Geneve header to be present */
@@ -356,7 +365,11 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        if (unlikely(geneveh->ver != GENEVE_VER))
                goto drop;
 
-       if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
+       inner_proto = geneveh->proto_type;
+
+       if (unlikely((inner_proto != htons(ETH_P_TEB) &&
+                     inner_proto != htons(ETH_P_IP) &&
+                     inner_proto != htons(ETH_P_IPV6))))
                goto drop;
 
        gs = rcu_dereference_sk_user_data(sk);
@@ -367,9 +380,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        if (!geneve)
                goto drop;
 
+       if (unlikely((!geneve->cfg.inner_proto_inherit &&
+                     inner_proto != htons(ETH_P_TEB)))) {
+               geneve->dev->stats.rx_dropped++;
+               goto drop;
+       }
+
        opts_len = geneveh->opt_len * 4;
-       if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
-                                htons(ETH_P_TEB),
+       if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto,
                                 !net_eq(geneve->net, dev_net(geneve->dev)))) {
                geneve->dev->stats.rx_dropped++;
                goto drop;
@@ -717,7 +735,8 @@ static int geneve_stop(struct net_device *dev)
 }
 
 static void geneve_build_header(struct genevehdr *geneveh,
-                               const struct ip_tunnel_info *info)
+                               const struct ip_tunnel_info *info,
+                               __be16 inner_proto)
 {
        geneveh->ver = GENEVE_VER;
        geneveh->opt_len = info->options_len / 4;
@@ -725,7 +744,7 @@ static void geneve_build_header(struct genevehdr *geneveh,
        geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT);
        geneveh->rsvd1 = 0;
        tunnel_id_to_vni(info->key.tun_id, geneveh->vni);
-       geneveh->proto_type = htons(ETH_P_TEB);
+       geneveh->proto_type = inner_proto;
        geneveh->rsvd2 = 0;
 
        if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
@@ -734,10 +753,12 @@ static void geneve_build_header(struct genevehdr *geneveh,
 
 static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
                            const struct ip_tunnel_info *info,
-                           bool xnet, int ip_hdr_len)
+                           bool xnet, int ip_hdr_len,
+                           bool inner_proto_inherit)
 {
        bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
        struct genevehdr *gnvh;
+       __be16 inner_proto;
        int min_headroom;
        int err;
 
@@ -755,8 +776,9 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
                goto free_dst;
 
        gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len);
-       geneve_build_header(gnvh, info);
-       skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+       inner_proto = inner_proto_inherit ? skb->protocol : htons(ETH_P_TEB);
+       geneve_build_header(gnvh, info, inner_proto);
+       skb_set_inner_protocol(skb, inner_proto);
        return 0;
 
 free_dst:
@@ -959,7 +981,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                }
        }
 
-       err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
+       err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
+                              geneve->cfg.inner_proto_inherit);
        if (unlikely(err))
                return err;
 
@@ -1038,7 +1061,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                        ttl = key->ttl;
                ttl = ttl ? : ip6_dst_hoplimit(dst);
        }
-       err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr));
+       err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
+                              geneve->cfg.inner_proto_inherit);
        if (unlikely(err))
                return err;
 
@@ -1388,6 +1412,14 @@ static int geneve_configure(struct net *net, struct net_device *dev,
        dst_cache_reset(&geneve->cfg.info.dst_cache);
        memcpy(&geneve->cfg, cfg, sizeof(*cfg));
 
+       if (geneve->cfg.inner_proto_inherit) {
+               dev->header_ops = NULL;
+               dev->type = ARPHRD_NONE;
+               dev->hard_header_len = 0;
+               dev->addr_len = 0;
+               dev->flags = IFF_NOARP;
+       }
+
        err = register_netdevice(dev);
        if (err)
                return err;
@@ -1561,10 +1593,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
 #endif
        }
 
+       if (data[IFLA_GENEVE_INNER_PROTO_INHERIT]) {
+               if (changelink) {
+                       attrtype = IFLA_GENEVE_INNER_PROTO_INHERIT;
+                       goto change_notsup;
+               }
+               cfg->inner_proto_inherit = true;
+       }
+
        return 0;
 change_notsup:
        NL_SET_ERR_MSG_ATTR(extack, data[attrtype],
-                           "Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported");
+                           "Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, and UDP checksum attributes are not supported");
        return -EOPNOTSUPP;
 }
 
@@ -1799,6 +1839,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
        if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit))
                goto nla_put_failure;
 
+       if (geneve->cfg.inner_proto_inherit &&
+           nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT))
+               goto nla_put_failure;
+
        return 0;
 
 nla_put_failure:
index bf08717..a208e2b 100644 (file)
@@ -66,13 +66,23 @@ struct gtp_dev {
 
        struct sock             *sk0;
        struct sock             *sk1u;
+       u8                      sk_created;
 
        struct net_device       *dev;
+       struct net              *net;
 
        unsigned int            role;
        unsigned int            hash_size;
        struct hlist_head       *tid_hash;
        struct hlist_head       *addr_hash;
+
+       u8                      restart_count;
+};
+
+struct echo_info {
+       struct in_addr          ms_addr_ip4;
+       struct in_addr          peer_addr_ip4;
+       u8                      gtp_version;
 };
 
 static unsigned int gtp_net_id __read_mostly;
@@ -83,6 +93,16 @@ struct gtp_net {
 
 static u32 gtp_h_initval;
 
+static struct genl_family gtp_genl_family;
+
+enum gtp_multicast_groups {
+       GTP_GENL_MCGRP,
+};
+
+static const struct genl_multicast_group gtp_genl_mcgrps[] = {
+       [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
+};
+
 static void pdp_context_delete(struct pdp_ctx *pctx);
 
 static inline u32 gtp0_hashfn(u64 tid)
@@ -215,6 +235,174 @@ err:
        return -1;
 }
 
+static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
+                                          const struct sock *sk,
+                                          __be32 daddr, __be32 saddr)
+{
+       memset(fl4, 0, sizeof(*fl4));
+       fl4->flowi4_oif         = sk->sk_bound_dev_if;
+       fl4->daddr              = daddr;
+       fl4->saddr              = saddr;
+       fl4->flowi4_tos         = RT_CONN_FLAGS(sk);
+       fl4->flowi4_proto       = sk->sk_protocol;
+
+       return ip_route_output_key(sock_net(sk), fl4);
+}
+
+/* GSM TS 09.60. 7.3
+ * In all Path Management messages:
+ * - TID: is not used and shall be set to 0.
+ * - Flow Label is not used and shall be set to 0
+ * In signalling messages:
+ * - number: this field is not yet used in signalling messages.
+ *   It shall be set to 255 by the sender and shall be ignored
+ *   by the receiver
+ * Returns true if the echo req was correct, false otherwise.
+ */
+static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0)
+{
+       return !(gtp0->tid || (gtp0->flags ^ 0x1e) ||
+               gtp0->number != 0xff || gtp0->flow);
+}
+
+/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
+static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
+{
+       int len_pkt, len_hdr;
+
+       hdr->flags = 0x1e; /* v0, GTP-non-prime. */
+       hdr->type = msg_type;
+       /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
+        * are not used and shall be set to 0.
+        */
+       hdr->flow = 0;
+       hdr->tid = 0;
+       hdr->number = 0xff;
+       hdr->spare[0] = 0xff;
+       hdr->spare[1] = 0xff;
+       hdr->spare[2] = 0xff;
+
+       len_pkt = sizeof(struct gtp0_packet);
+       len_hdr = sizeof(struct gtp0_header);
+
+       if (msg_type == GTP_ECHO_RSP)
+               hdr->length = htons(len_pkt - len_hdr);
+       else
+               hdr->length = 0;
+}
+
+static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+       struct gtp0_packet *gtp_pkt;
+       struct gtp0_header *gtp0;
+       struct rtable *rt;
+       struct flowi4 fl4;
+       struct iphdr *iph;
+       __be16 seq;
+
+       gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+       if (!gtp0_validate_echo_hdr(gtp0))
+               return -1;
+
+       seq = gtp0->seq;
+
+       /* pull GTP and UDP headers */
+       skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr));
+
+       gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet));
+       memset(gtp_pkt, 0, sizeof(struct gtp0_packet));
+
+       gtp0_build_echo_msg(&gtp_pkt->gtp0_h, GTP_ECHO_RSP);
+
+       /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
+        * message shall be copied from the signalling request message
+        * that the GSN is replying to.
+        */
+       gtp_pkt->gtp0_h.seq = seq;
+
+       gtp_pkt->ie.tag = GTPIE_RECOVERY;
+       gtp_pkt->ie.val = gtp->restart_count;
+
+       iph = ip_hdr(skb);
+
+       /* find route to the sender,
+        * src address becomes dst address and vice versa.
+        */
+       rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
+       if (IS_ERR(rt)) {
+               netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
+                          &iph->saddr);
+               return -1;
+       }
+
+       udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
+                           fl4.saddr, fl4.daddr,
+                           iph->tos,
+                           ip4_dst_hoplimit(&rt->dst),
+                           0,
+                           htons(GTP0_PORT), htons(GTP0_PORT),
+                           !net_eq(sock_net(gtp->sk1u),
+                                   dev_net(gtp->dev)),
+                           false);
+       return 0;
+}
+
+static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
+                             int flags, u32 type, struct echo_info echo)
+{
+       void *genlh;
+
+       genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
+                           type);
+       if (!genlh)
+               goto failure;
+
+       if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
+           nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) ||
+           nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr))
+               goto failure;
+
+       genlmsg_end(skb, genlh);
+       return 0;
+
+failure:
+       genlmsg_cancel(skb, genlh);
+       return -EMSGSIZE;
+}
+
+static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+       struct gtp0_header *gtp0;
+       struct echo_info echo;
+       struct sk_buff *msg;
+       struct iphdr *iph;
+       int ret;
+
+       gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+       if (!gtp0_validate_echo_hdr(gtp0))
+               return -1;
+
+       iph = ip_hdr(skb);
+       echo.ms_addr_ip4.s_addr = iph->daddr;
+       echo.peer_addr_ip4.s_addr = iph->saddr;
+       echo.gtp_version = GTP_V0;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+       if (!msg)
+               return -ENOMEM;
+
+       ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
+       if (ret < 0) {
+               nlmsg_free(msg);
+               return ret;
+       }
+
+       return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
+                                      msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+}
+
 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
 static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
 {
@@ -231,6 +419,16 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
        if ((gtp0->flags >> 5) != GTP_V0)
                return 1;
 
+       /* If the sockets were created in kernel, it means that
+        * there is no daemon running in userspace which would
+        * handle echo request.
+        */
+       if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created)
+               return gtp0_send_echo_resp(gtp, skb);
+
+       if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created)
+               return gtp0_handle_echo_resp(gtp, skb);
+
        if (gtp0->type != GTP_TPDU)
                return 1;
 
@@ -243,6 +441,131 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
        return gtp_rx(pctx, skb, hdrlen, gtp->role);
 }
 
+/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
+static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
+{
+       int len_pkt, len_hdr;
+
+       /* S flag must be set to 1 */
+       hdr->flags = 0x32; /* v1, GTP-non-prime. */
+       hdr->type = msg_type;
+       /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
+       hdr->tid = 0;
+
+       /* seq, npdu and next should be counted to the length of the GTP packet
+        * that's why szie of gtp1_header should be subtracted,
+        * not size of gtp1_header_long.
+        */
+
+       len_hdr = sizeof(struct gtp1_header);
+
+       if (msg_type == GTP_ECHO_RSP) {
+               len_pkt = sizeof(struct gtp1u_packet);
+               hdr->length = htons(len_pkt - len_hdr);
+       } else {
+               /* GTP_ECHO_REQ does not carry GTP Information Element,
+                * the why gtp1_header_long is used here.
+                */
+               len_pkt = sizeof(struct gtp1_header_long);
+               hdr->length = htons(len_pkt - len_hdr);
+       }
+}
+
+static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+       struct gtp1_header_long *gtp1u;
+       struct gtp1u_packet *gtp_pkt;
+       struct rtable *rt;
+       struct flowi4 fl4;
+       struct iphdr *iph;
+
+       gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
+
+       /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
+        * Error Indication and Supported Extension Headers Notification
+        * messages, the S flag shall be set to 1 and TEID shall be set to 0.
+        */
+       if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
+               return -1;
+
+       /* pull GTP and UDP headers */
+       skb_pull_data(skb,
+                     sizeof(struct gtp1_header_long) + sizeof(struct udphdr));
+
+       gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet));
+       memset(gtp_pkt, 0, sizeof(struct gtp1u_packet));
+
+       gtp1u_build_echo_msg(&gtp_pkt->gtp1u_h, GTP_ECHO_RSP);
+
+       /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
+        * Recovery information element shall not be used, i.e. it shall
+        * be set to zero by the sender and shall be ignored by the receiver.
+        * The Recovery information element is mandatory due to backwards
+        * compatibility reasons.
+        */
+       gtp_pkt->ie.tag = GTPIE_RECOVERY;
+       gtp_pkt->ie.val = 0;
+
+       iph = ip_hdr(skb);
+
+       /* find route to the sender,
+        * src address becomes dst address and vice versa.
+        */
+       rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr);
+       if (IS_ERR(rt)) {
+               netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
+                          &iph->saddr);
+               return -1;
+       }
+
+       udp_tunnel_xmit_skb(rt, gtp->sk1u, skb,
+                           fl4.saddr, fl4.daddr,
+                           iph->tos,
+                           ip4_dst_hoplimit(&rt->dst),
+                           0,
+                           htons(GTP1U_PORT), htons(GTP1U_PORT),
+                           !net_eq(sock_net(gtp->sk1u),
+                                   dev_net(gtp->dev)),
+                           false);
+       return 0;
+}
+
+static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+       struct gtp1_header_long *gtp1u;
+       struct echo_info echo;
+       struct sk_buff *msg;
+       struct iphdr *iph;
+       int ret;
+
+       gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
+
+       /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
+        * Error Indication and Supported Extension Headers Notification
+        * messages, the S flag shall be set to 1 and TEID shall be set to 0.
+        */
+       if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
+               return -1;
+
+       iph = ip_hdr(skb);
+       echo.ms_addr_ip4.s_addr = iph->daddr;
+       echo.peer_addr_ip4.s_addr = iph->saddr;
+       echo.gtp_version = GTP_V1;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+       if (!msg)
+               return -ENOMEM;
+
+       ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
+       if (ret < 0) {
+               nlmsg_free(msg);
+               return ret;
+       }
+
+       return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
+                                      msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+}
+
 static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
 {
        unsigned int hdrlen = sizeof(struct udphdr) +
@@ -258,6 +581,16 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
        if ((gtp1->flags >> 5) != GTP_V1)
                return 1;
 
+       /* If the sockets were created in kernel, it means that
+        * there is no daemon running in userspace which would
+        * handle echo request.
+        */
+       if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created)
+               return gtp1u_send_echo_resp(gtp, skb);
+
+       if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created)
+               return gtp1u_handle_echo_resp(gtp, skb);
+
        if (gtp1->type != GTP_TPDU)
                return 1;
 
@@ -320,8 +653,16 @@ static void gtp_encap_disable_sock(struct sock *sk)
 
 static void gtp_encap_disable(struct gtp_dev *gtp)
 {
-       gtp_encap_disable_sock(gtp->sk0);
-       gtp_encap_disable_sock(gtp->sk1u);
+       if (gtp->sk_created) {
+               udp_tunnel_sock_release(gtp->sk0->sk_socket);
+               udp_tunnel_sock_release(gtp->sk1u->sk_socket);
+               gtp->sk_created = false;
+               gtp->sk0 = NULL;
+               gtp->sk1u = NULL;
+       } else {
+               gtp_encap_disable_sock(gtp->sk0);
+               gtp_encap_disable_sock(gtp->sk1u);
+       }
 }
 
 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
@@ -388,20 +729,6 @@ static void gtp_dev_uninit(struct net_device *dev)
        free_percpu(dev->tstats);
 }
 
-static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
-                                          const struct sock *sk,
-                                          __be32 daddr)
-{
-       memset(fl4, 0, sizeof(*fl4));
-       fl4->flowi4_oif         = sk->sk_bound_dev_if;
-       fl4->daddr              = daddr;
-       fl4->saddr              = inet_sk(sk)->inet_saddr;
-       fl4->flowi4_tos         = RT_CONN_FLAGS(sk);
-       fl4->flowi4_proto       = sk->sk_protocol;
-
-       return ip_route_output_key(sock_net(sk), fl4);
-}
-
 static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
 {
        int payload_len = skb->len;
@@ -507,7 +834,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
        }
        netdev_dbg(dev, "found PDP context %p\n", pctx);
 
-       rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
+       rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr,
+                                 inet_sk(pctx->sk)->inet_saddr);
        if (IS_ERR(rt)) {
                netdev_dbg(dev, "no route to SSGN %pI4\n",
                           &pctx->peer_addr_ip4.s_addr);
@@ -656,17 +984,69 @@ static void gtp_destructor(struct net_device *dev)
        kfree(gtp->tid_hash);
 }
 
+static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)
+{
+       struct udp_tunnel_sock_cfg tuncfg = {};
+       struct udp_port_cfg udp_conf = {
+               .local_ip.s_addr        = htonl(INADDR_ANY),
+               .family                 = AF_INET,
+       };
+       struct net *net = gtp->net;
+       struct socket *sock;
+       int err;
+
+       if (type == UDP_ENCAP_GTP0)
+               udp_conf.local_udp_port = htons(GTP0_PORT);
+       else if (type == UDP_ENCAP_GTP1U)
+               udp_conf.local_udp_port = htons(GTP1U_PORT);
+       else
+               return ERR_PTR(-EINVAL);
+
+       err = udp_sock_create(net, &udp_conf, &sock);
+       if (err)
+               return ERR_PTR(err);
+
+       tuncfg.sk_user_data = gtp;
+       tuncfg.encap_type = type;
+       tuncfg.encap_rcv = gtp_encap_recv;
+       tuncfg.encap_destroy = NULL;
+
+       setup_udp_tunnel_sock(net, sock, &tuncfg);
+
+       return sock->sk;
+}
+
+static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])
+{
+       struct sock *sk1u = NULL;
+       struct sock *sk0 = NULL;
+
+       sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp);
+       if (IS_ERR(sk0))
+               return PTR_ERR(sk0);
+
+       sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp);
+       if (IS_ERR(sk1u)) {
+               udp_tunnel_sock_release(sk0->sk_socket);
+               return PTR_ERR(sk1u);
+       }
+
+       gtp->sk_created = true;
+       gtp->sk0 = sk0;
+       gtp->sk1u = sk1u;
+
+       return 0;
+}
+
 static int gtp_newlink(struct net *src_net, struct net_device *dev,
                       struct nlattr *tb[], struct nlattr *data[],
                       struct netlink_ext_ack *extack)
 {
+       unsigned int role = GTP_ROLE_GGSN;
        struct gtp_dev *gtp;
        struct gtp_net *gn;
        int hashsize, err;
 
-       if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
-               return -EINVAL;
-
        gtp = netdev_priv(dev);
 
        if (!data[IFLA_GTP_PDP_HASHSIZE]) {
@@ -677,11 +1057,28 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
                        hashsize = 1024;
        }
 
+       if (data[IFLA_GTP_ROLE]) {
+               role = nla_get_u32(data[IFLA_GTP_ROLE]);
+               if (role > GTP_ROLE_SGSN)
+                       return -EINVAL;
+       }
+       gtp->role = role;
+
+       if (!data[IFLA_GTP_RESTART_COUNT])
+               gtp->restart_count = 0;
+       else
+               gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]);
+
+       gtp->net = src_net;
+
        err = gtp_hashtable_new(gtp, hashsize);
        if (err < 0)
                return err;
 
-       err = gtp_encap_enable(gtp, data);
+       if (data[IFLA_GTP_CREATE_SOCKETS])
+               err = gtp_create_sockets(gtp, data);
+       else
+               err = gtp_encap_enable(gtp, data);
        if (err < 0)
                goto out_hashtable;
 
@@ -726,6 +1123,8 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
        [IFLA_GTP_FD1]                  = { .type = NLA_U32 },
        [IFLA_GTP_PDP_HASHSIZE]         = { .type = NLA_U32 },
        [IFLA_GTP_ROLE]                 = { .type = NLA_U32 },
+       [IFLA_GTP_CREATE_SOCKETS]       = { .type = NLA_U8 },
+       [IFLA_GTP_RESTART_COUNT]        = { .type = NLA_U8 },
 };
 
 static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -740,7 +1139,8 @@ static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
 static size_t gtp_get_size(const struct net_device *dev)
 {
        return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
-               nla_total_size(sizeof(__u32)); /* IFLA_GTP_ROLE */
+               nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */
+               nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */
 }
 
 static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
@@ -751,6 +1151,8 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
                goto nla_put_failure;
        if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
                goto nla_put_failure;
+       if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count))
+               goto nla_put_failure;
 
        return 0;
 
@@ -848,7 +1250,9 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
 {
        struct sock *sk1u = NULL;
        struct sock *sk0 = NULL;
-       unsigned int role = GTP_ROLE_GGSN;
+
+       if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
+               return -EINVAL;
 
        if (data[IFLA_GTP_FD0]) {
                u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
@@ -868,18 +1272,8 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
                }
        }
 
-       if (data[IFLA_GTP_ROLE]) {
-               role = nla_get_u32(data[IFLA_GTP_ROLE]);
-               if (role > GTP_ROLE_SGSN) {
-                       gtp_encap_disable_sock(sk0);
-                       gtp_encap_disable_sock(sk1u);
-                       return -EINVAL;
-               }
-       }
-
        gtp->sk0 = sk0;
        gtp->sk1u = sk1u;
-       gtp->role = role;
 
        return 0;
 }
@@ -1183,16 +1577,6 @@ out_unlock:
        return err;
 }
 
-static struct genl_family gtp_genl_family;
-
-enum gtp_multicast_groups {
-       GTP_GENL_MCGRP,
-};
-
-static const struct genl_multicast_group gtp_genl_mcgrps[] = {
-       [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
-};
-
 static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
                              int flags, u32 type, struct pdp_ctx *pctx)
 {
@@ -1336,6 +1720,95 @@ out:
        return skb->len;
 }
 
+static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *skb_to_send;
+       __be32 src_ip, dst_ip;
+       unsigned int version;
+       struct gtp_dev *gtp;
+       struct flowi4 fl4;
+       struct rtable *rt;
+       struct sock *sk;
+       __be16 port;
+       int len;
+
+       if (!info->attrs[GTPA_VERSION] ||
+           !info->attrs[GTPA_LINK] ||
+           !info->attrs[GTPA_PEER_ADDRESS] ||
+           !info->attrs[GTPA_MS_ADDRESS])
+               return -EINVAL;
+
+       version = nla_get_u32(info->attrs[GTPA_VERSION]);
+       dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
+       src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+       gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
+       if (!gtp)
+               return -ENODEV;
+
+       if (!gtp->sk_created)
+               return -EOPNOTSUPP;
+       if (!(gtp->dev->flags & IFF_UP))
+               return -ENETDOWN;
+
+       if (version == GTP_V0) {
+               struct gtp0_header *gtp0_h;
+
+               len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) +
+                       sizeof(struct iphdr) + sizeof(struct udphdr);
+
+               skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
+               if (!skb_to_send)
+                       return -ENOMEM;
+
+               sk = gtp->sk0;
+               port = htons(GTP0_PORT);
+
+               gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header));
+               memset(gtp0_h, 0, sizeof(struct gtp0_header));
+               gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ);
+       } else if (version == GTP_V1) {
+               struct gtp1_header_long *gtp1u_h;
+
+               len = LL_RESERVED_SPACE(gtp->dev) +
+                       sizeof(struct gtp1_header_long) +
+                       sizeof(struct iphdr) + sizeof(struct udphdr);
+
+               skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
+               if (!skb_to_send)
+                       return -ENOMEM;
+
+               sk = gtp->sk1u;
+               port = htons(GTP1U_PORT);
+
+               gtp1u_h = skb_push(skb_to_send,
+                                  sizeof(struct gtp1_header_long));
+               memset(gtp1u_h, 0, sizeof(struct gtp1_header_long));
+               gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ);
+       } else {
+               return -ENODEV;
+       }
+
+       rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip);
+       if (IS_ERR(rt)) {
+               netdev_dbg(gtp->dev, "no route for echo request to %pI4\n",
+                          &dst_ip);
+               kfree_skb(skb_to_send);
+               return -ENODEV;
+       }
+
+       udp_tunnel_xmit_skb(rt, sk, skb_to_send,
+                           fl4.saddr, fl4.daddr,
+                           fl4.flowi4_tos,
+                           ip4_dst_hoplimit(&rt->dst),
+                           0,
+                           port, port,
+                           !net_eq(sock_net(sk),
+                                   dev_net(gtp->dev)),
+                           false);
+       return 0;
+}
+
 static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
        [GTPA_LINK]             = { .type = NLA_U32, },
        [GTPA_VERSION]          = { .type = NLA_U32, },
@@ -1368,6 +1841,12 @@ static const struct genl_small_ops gtp_genl_ops[] = {
                .dumpit = gtp_genl_dump_pdp,
                .flags = GENL_ADMIN_PERM,
        },
+       {
+               .cmd = GTP_CMD_ECHOREQ,
+               .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+               .doit = gtp_genl_send_echo_req,
+               .flags = GENL_ADMIN_PERM,
+       },
 };
 
 static struct genl_family gtp_genl_family __ro_after_init = {
index a03d0b4..3e69079 100644 (file)
@@ -982,10 +982,10 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
                bc->cfg.extmodem = 0;
        if (strstr(modestr,"extmodem"))
                bc->cfg.extmodem = 1;
-       if (strstr(modestr,"noloopback"))
-               bc->cfg.loopback = 0;
        if (strstr(modestr,"loopback"))
                bc->cfg.loopback = 1;
+       if (strstr(modestr, "noloopback"))
+               bc->cfg.loopback = 0;
        if ((cp = strstr(modestr,"fclk="))) {
                bc->cfg.fclk = simple_strtoul(cp+5, NULL, 0);
                if (bc->cfg.fclk < 1000000)
index e675d10..9442f75 100644 (file)
@@ -1630,7 +1630,6 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
 
        case VM_PKT_DATA_USING_XFER_PAGES:
                return netvsc_receive(ndev, net_device, nvchan, desc);
-               break;
 
        case VM_PKT_DATA_INBAND:
                netvsc_receive_inband(ndev, net_device, desc);
index 3646469..fde1c49 100644 (file)
@@ -1587,6 +1587,9 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
        pcpu_sum = kvmalloc_array(num_possible_cpus(),
                                  sizeof(struct netvsc_ethtool_pcpu_stats),
                                  GFP_KERNEL);
+       if (!pcpu_sum)
+               return;
+
        netvsc_get_pcpu_stats(dev, pcpu_sum);
        for_each_present_cpu(cpu) {
                struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
index f2989aa..db5ac75 100644 (file)
 
 #define IPA_AUTOSUSPEND_DELAY  500     /* milliseconds */
 
-/**
- * struct ipa_interconnect - IPA interconnect information
- * @path:              Interconnect path
- * @average_bandwidth: Average interconnect bandwidth (KB/second)
- * @peak_bandwidth:    Peak interconnect bandwidth (KB/second)
- */
-struct ipa_interconnect {
-       struct icc_path *path;
-       u32 average_bandwidth;
-       u32 peak_bandwidth;
-};
-
 /**
  * enum ipa_power_flag - IPA power flags
  * @IPA_POWER_FLAG_RESUMED:    Whether resume from suspend has been signaled
@@ -79,164 +67,78 @@ struct ipa_power {
        spinlock_t spinlock;    /* used with STOPPED/STARTED power flags */
        DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
        u32 interconnect_count;
-       struct ipa_interconnect *interconnect;
+       struct icc_bulk_data interconnect[];
 };
 
-static int ipa_interconnect_init_one(struct device *dev,
-                                    struct ipa_interconnect *interconnect,
-                                    const struct ipa_interconnect_data *data)
-{
-       struct icc_path *path;
-
-       path = of_icc_get(dev, data->name);
-       if (IS_ERR(path)) {
-               int ret = PTR_ERR(path);
-
-               dev_err_probe(dev, ret, "error getting %s interconnect\n",
-                             data->name);
-
-               return ret;
-       }
-
-       interconnect->path = path;
-       interconnect->average_bandwidth = data->average_bandwidth;
-       interconnect->peak_bandwidth = data->peak_bandwidth;
-
-       return 0;
-}
-
-static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
-{
-       icc_put(interconnect->path);
-       memset(interconnect, 0, sizeof(*interconnect));
-}
-
 /* Initialize interconnects required for IPA operation */
-static int ipa_interconnect_init(struct ipa_power *power, struct device *dev,
+static int ipa_interconnect_init(struct ipa_power *power,
                                 const struct ipa_interconnect_data *data)
 {
-       struct ipa_interconnect *interconnect;
-       u32 count;
-       int ret;
-
-       count = power->interconnect_count;
-       interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
-       if (!interconnect)
-               return -ENOMEM;
-       power->interconnect = interconnect;
-
-       while (count--) {
-               ret = ipa_interconnect_init_one(dev, interconnect, data++);
-               if (ret)
-                       goto out_unwind;
-               interconnect++;
-       }
-
-       return 0;
-
-out_unwind:
-       while (interconnect-- > power->interconnect)
-               ipa_interconnect_exit_one(interconnect);
-       kfree(power->interconnect);
-       power->interconnect = NULL;
-
-       return ret;
-}
-
-/* Inverse of ipa_interconnect_init() */
-static void ipa_interconnect_exit(struct ipa_power *power)
-{
-       struct ipa_interconnect *interconnect;
-
-       interconnect = power->interconnect + power->interconnect_count;
-       while (interconnect-- > power->interconnect)
-               ipa_interconnect_exit_one(interconnect);
-       kfree(power->interconnect);
-       power->interconnect = NULL;
-}
-
-/* Currently we only use one bandwidth level, so just "enable" interconnects */
-static int ipa_interconnect_enable(struct ipa *ipa)
-{
-       struct ipa_interconnect *interconnect;
-       struct ipa_power *power = ipa->power;
+       struct icc_bulk_data *interconnect;
        int ret;
        u32 i;
 
-       interconnect = power->interconnect;
+       /* Initialize our interconnect data array for bulk operations */
+       interconnect = &power->interconnect[0];
        for (i = 0; i < power->interconnect_count; i++) {
-               ret = icc_set_bw(interconnect->path,
-                                interconnect->average_bandwidth,
-                                interconnect->peak_bandwidth);
-               if (ret) {
-                       dev_err(&ipa->pdev->dev,
-                               "error %d enabling %s interconnect\n",
-                               ret, icc_get_name(interconnect->path));
-                       goto out_unwind;
-               }
+               /* interconnect->path is filled in by of_icc_bulk_get() */
+               interconnect->name = data->name;
+               interconnect->avg_bw = data->average_bandwidth;
+               interconnect->peak_bw = data->peak_bandwidth;
+               data++;
                interconnect++;
        }
 
-       return 0;
+       ret = of_icc_bulk_get(power->dev, power->interconnect_count,
+                             power->interconnect);
+       if (ret)
+               return ret;
 
-out_unwind:
-       while (interconnect-- > power->interconnect)
-               (void)icc_set_bw(interconnect->path, 0, 0);
+       /* All interconnects are initially disabled */
+       icc_bulk_disable(power->interconnect_count, power->interconnect);
+
+       /* Set the bandwidth values to be used when enabled */
+       ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect);
+       if (ret)
+               icc_bulk_put(power->interconnect_count, power->interconnect);
 
        return ret;
 }
 
-/* To disable an interconnect, we just its bandwidth to 0 */
-static int ipa_interconnect_disable(struct ipa *ipa)
+/* Inverse of ipa_interconnect_init() */
+static void ipa_interconnect_exit(struct ipa_power *power)
 {
-       struct ipa_interconnect *interconnect;
-       struct ipa_power *power = ipa->power;
-       struct device *dev = &ipa->pdev->dev;
-       int result = 0;
-       u32 count;
-       int ret;
-
-       count = power->interconnect_count;
-       interconnect = power->interconnect + count;
-       while (count--) {
-               interconnect--;
-               ret = icc_set_bw(interconnect->path, 0, 0);
-               if (ret) {
-                       dev_err(dev, "error %d disabling %s interconnect\n",
-                               ret, icc_get_name(interconnect->path));
-                       /* Try to disable all; record only the first error */
-                       if (!result)
-                               result = ret;
-               }
-       }
-
-       return result;
+       icc_bulk_put(power->interconnect_count, power->interconnect);
 }
 
 /* Enable IPA power, enabling interconnects and the core clock */
 static int ipa_power_enable(struct ipa *ipa)
 {
+       struct ipa_power *power = ipa->power;
        int ret;
 
-       ret = ipa_interconnect_enable(ipa);
+       ret = icc_bulk_enable(power->interconnect_count, power->interconnect);
        if (ret)
                return ret;
 
-       ret = clk_prepare_enable(ipa->power->core);
+       ret = clk_prepare_enable(power->core);
        if (ret) {
-               dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
-               (void)ipa_interconnect_disable(ipa);
+               dev_err(power->dev, "error %d enabling core clock\n", ret);
+               icc_bulk_disable(power->interconnect_count,
+                                power->interconnect);
        }
 
        return ret;
 }
 
 /* Inverse of ipa_power_enable() */
-static int ipa_power_disable(struct ipa *ipa)
+static void ipa_power_disable(struct ipa *ipa)
 {
-       clk_disable_unprepare(ipa->power->core);
+       struct ipa_power *power = ipa->power;
 
-       return ipa_interconnect_disable(ipa);
+       clk_disable_unprepare(power->core);
+
+       icc_bulk_disable(power->interconnect_count, power->interconnect);
 }
 
 static int ipa_runtime_suspend(struct device *dev)
@@ -250,7 +152,9 @@ static int ipa_runtime_suspend(struct device *dev)
                gsi_suspend(&ipa->gsi);
        }
 
-       return ipa_power_disable(ipa);
+       ipa_power_disable(ipa);
+
+       return 0;
 }
 
 static int ipa_runtime_resume(struct device *dev)
@@ -453,6 +357,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
 {
        struct ipa_power *power;
        struct clk *clk;
+       size_t size;
        int ret;
 
        clk = clk_get(dev, "core");
@@ -469,7 +374,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
                goto err_clk_put;
        }
 
-       power = kzalloc(sizeof(*power), GFP_KERNEL);
+       size = struct_size(power, interconnect, data->interconnect_count);
+       power = kzalloc(size, GFP_KERNEL);
        if (!power) {
                ret = -ENOMEM;
                goto err_clk_put;
@@ -479,7 +385,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
        spin_lock_init(&power->spinlock);
        power->interconnect_count = data->interconnect_count;
 
-       ret = ipa_interconnect_init(power, dev, data->interconnect_data);
+       ret = ipa_interconnect_init(power, data->interconnect_data);
        if (ret)
                goto err_kfree;
 
index c613900..6ffb274 100644 (file)
@@ -555,7 +555,7 @@ static void ipvlan_multicast_enqueue(struct ipvl_port *port,
                schedule_work(&port->wq);
        } else {
                spin_unlock(&port->backlog.lock);
-               atomic_long_inc(&skb->dev->rx_dropped);
+               dev_core_stats_rx_dropped_inc(skb->dev);
                kfree_skb(skb);
        }
 }
index 33753a2..069e882 100644 (file)
@@ -371,7 +371,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
 free_nskb:
        kfree_skb(nskb);
 err:
-       atomic_long_inc(&skb->dev->rx_dropped);
+       dev_core_stats_rx_dropped_inc(skb->dev);
 }
 
 static void macvlan_flush_sources(struct macvlan_port *port,
@@ -889,7 +889,7 @@ static void macvlan_set_lockdep_class(struct net_device *dev)
 static int macvlan_init(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
-       const struct net_device *lowerdev = vlan->lowerdev;
+       struct net_device *lowerdev = vlan->lowerdev;
        struct macvlan_port *port = vlan->port;
 
        dev->state              = (dev->state & ~MACVLAN_STATE_MASK) |
@@ -911,6 +911,9 @@ static int macvlan_init(struct net_device *dev)
 
        port->count += 1;
 
+       /* Get macvlan's reference to lowerdev */
+       dev_hold_track(lowerdev, &vlan->dev_tracker, GFP_KERNEL);
+
        return 0;
 }
 
@@ -1173,6 +1176,14 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_features_check     = passthru_features_check,
 };
 
+static void macvlan_dev_free(struct net_device *dev)
+{
+       struct macvlan_dev *vlan = netdev_priv(dev);
+
+       /* Get rid of the macvlan's reference to lowerdev */
+       dev_put_track(vlan->lowerdev, &vlan->dev_tracker);
+}
+
 void macvlan_common_setup(struct net_device *dev)
 {
        ether_setup(dev);
@@ -1184,6 +1195,7 @@ void macvlan_common_setup(struct net_device *dev)
        dev->priv_flags        |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN;
        dev->netdev_ops         = &macvlan_netdev_ops;
        dev->needs_free_netdev  = true;
+       dev->priv_destructor    = macvlan_dev_free;
        dev->header_ops         = &macvlan_hard_header_ops;
        dev->ethtool_ops        = &macvlan_ethtool_ops;
 }
index 62723a7..7cd103f 100644 (file)
@@ -286,7 +286,7 @@ static void mctp_serial_rx(struct mctp_serial *dev)
        cb = __mctp_cb(skb);
        cb->halen = 0;
 
-       netif_rx_ni(skb);
+       netif_rx(skb);
        dev->netdev->stats.rx_packets++;
        dev->netdev->stats.rx_bytes += dev->rxlen;
 }
index 7d2abaf..c483ba6 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/of_mdio.h>
 #include <linux/phy.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 
 #define MSCC_MIIM_REG_STATUS           0x0
 #define                PHY_CFG_PHY_RESET       (BIT(5) | BIT(6) | BIT(7) | BIT(8))
 #define MSCC_PHY_REG_PHY_STATUS        0x4
 
+#define LAN966X_CUPHY_COMMON_CFG       0x0
+#define                CUPHY_COMMON_CFG_RESET_N        BIT(0)
+
+struct mscc_miim_info {
+       unsigned int phy_reset_offset;
+       unsigned int phy_reset_bits;
+};
+
 struct mscc_miim_dev {
        struct regmap *regs;
        int mii_status_offset;
        struct regmap *phy_regs;
-       int phy_reset_offset;
+       const struct mscc_miim_info *info;
 };
 
 /* When high resolution timers aren't built-in: we can't use usleep_range() as
@@ -157,27 +166,29 @@ out:
 static int mscc_miim_reset(struct mii_bus *bus)
 {
        struct mscc_miim_dev *miim = bus->priv;
-       int offset = miim->phy_reset_offset;
+       unsigned int offset, bits;
        int ret;
 
-       if (miim->phy_regs) {
-               ret = regmap_write(miim->phy_regs,
-                                  MSCC_PHY_REG_PHY_CFG + offset, 0);
-               if (ret < 0) {
-                       WARN_ONCE(1, "mscc reset set error %d\n", ret);
-                       return ret;
-               }
+       if (!miim->phy_regs)
+               return 0;
 
-               ret = regmap_write(miim->phy_regs,
-                                  MSCC_PHY_REG_PHY_CFG + offset, 0x1ff);
-               if (ret < 0) {
-                       WARN_ONCE(1, "mscc reset clear error %d\n", ret);
-                       return ret;
-               }
+       offset = miim->info->phy_reset_offset;
+       bits = miim->info->phy_reset_bits;
+
+       ret = regmap_update_bits(miim->phy_regs, offset, bits, 0);
+       if (ret < 0) {
+               WARN_ONCE(1, "mscc reset set error %d\n", ret);
+               return ret;
+       }
 
-               mdelay(500);
+       ret = regmap_update_bits(miim->phy_regs, offset, bits, bits);
+       if (ret < 0) {
+               WARN_ONCE(1, "mscc reset clear error %d\n", ret);
+               return ret;
        }
 
+       mdelay(500);
+
        return 0;
 }
 
@@ -187,6 +198,13 @@ static const struct regmap_config mscc_miim_regmap_config = {
        .reg_stride     = 4,
 };
 
+static const struct regmap_config mscc_miim_phy_regmap_config = {
+       .reg_bits       = 32,
+       .val_bits       = 32,
+       .reg_stride     = 4,
+       .name           = "phy",
+};
+
 int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
                    struct regmap *mii_regmap, int status_offset)
 {
@@ -250,7 +268,7 @@ static int mscc_miim_probe(struct platform_device *pdev)
                }
 
                phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs,
-                                                  &mscc_miim_regmap_config);
+                                                  &mscc_miim_phy_regmap_config);
                if (IS_ERR(phy_regmap)) {
                        dev_err(&pdev->dev, "Unable to create phy register regmap\n");
                        return PTR_ERR(phy_regmap);
@@ -265,7 +283,10 @@ static int mscc_miim_probe(struct platform_device *pdev)
 
        miim = bus->priv;
        miim->phy_regs = phy_regmap;
-       miim->phy_reset_offset = 0;
+
+       miim->info = device_get_match_data(&pdev->dev);
+       if (!miim->info)
+               return -EINVAL;
 
        ret = of_mdiobus_register(bus, pdev->dev.of_node);
        if (ret < 0) {
@@ -287,8 +308,25 @@ static int mscc_miim_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct mscc_miim_info mscc_ocelot_miim_info = {
+       .phy_reset_offset = MSCC_PHY_REG_PHY_CFG,
+       .phy_reset_bits = PHY_CFG_PHY_ENA | PHY_CFG_PHY_COMMON_RESET |
+                         PHY_CFG_PHY_RESET,
+};
+
+static const struct mscc_miim_info microchip_lan966x_miim_info = {
+       .phy_reset_offset = LAN966X_CUPHY_COMMON_CFG,
+       .phy_reset_bits = CUPHY_COMMON_CFG_RESET_N,
+};
+
 static const struct of_device_id mscc_miim_match[] = {
-       { .compatible = "mscc,ocelot-miim" },
+       {
+               .compatible = "mscc,ocelot-miim",
+               .data = &mscc_ocelot_miim_info
+       }, {
+               .compatible = "microchip,lan966x-miim",
+               .data = &microchip_lan966x_miim_info
+       },
        { }
 };
 MODULE_DEVICE_TABLE(of, mscc_miim_match);
index ebd001f..a881e35 100644 (file)
@@ -168,8 +168,8 @@ int mdio_mux_init(struct device *dev,
                cb->mii_bus->priv = cb;
 
                cb->mii_bus->name = "mdio_mux";
-               snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
-                        pb->parent_id, v);
+               snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x.%x",
+                        cb->mii_bus->name, pb->parent_id, v);
                cb->mii_bus->parent = dev;
                cb->mii_bus->read = mdio_mux_read;
                cb->mii_bus->write = mdio_mux_write;
index 86ec5aa..21a0435 100644 (file)
@@ -89,7 +89,7 @@ static int net_failover_close(struct net_device *dev)
 static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb,
                                          struct net_device *dev)
 {
-       atomic_long_inc(&dev->tx_dropped);
+       dev_core_stats_tx_dropped_inc(dev);
        dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
index a1cbfa4..5735e5b 100644 (file)
@@ -3,7 +3,7 @@
 obj-$(CONFIG_NETDEVSIM) += netdevsim.o
 
 netdevsim-objs := \
-       netdev.o dev.o ethtool.o fib.o bus.o health.o udp_tunnels.o
+       netdev.o dev.o ethtool.o fib.o bus.o health.o hwstats.o udp_tunnels.o
 
 ifeq ($(CONFIG_BPF_SYSCALL),y)
 netdevsim-objs += \
index 08d7b46..57a3ac8 100644 (file)
@@ -59,7 +59,7 @@ static struct dentry *nsim_dev_ddir;
 unsigned int nsim_dev_get_vfs(struct nsim_dev *nsim_dev)
 {
        WARN_ON(!lockdep_rtnl_is_held() &&
-               !lockdep_is_held(&nsim_dev->vfs_lock));
+               !devl_lock_is_held(priv_to_devlink(nsim_dev)));
 
        return nsim_dev->nsim_bus_dev->num_vfs;
 }
@@ -275,7 +275,7 @@ static ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
                return -ENOMEM;
 
        nsim_dev = file->private_data;
-       mutex_lock(&nsim_dev->vfs_lock);
+       devl_lock(priv_to_devlink(nsim_dev));
        /* Reject if VFs are configured */
        if (nsim_dev_get_vfs(nsim_dev)) {
                ret = -EBUSY;
@@ -285,7 +285,7 @@ static ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
                *ppos += count;
                ret = count;
        }
-       mutex_unlock(&nsim_dev->vfs_lock);
+       devl_unlock(priv_to_devlink(nsim_dev));
 
        kfree(vfconfigs);
        return ret;
@@ -339,6 +339,7 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
        debugfs_create_bool("fail_trap_policer_counter_get", 0600,
                            nsim_dev->ddir,
                            &nsim_dev->fail_trap_policer_counter_get);
+       /* caution, dev_max_vfs write takes devlink lock */
        debugfs_create_file("max_vfs", 0600, nsim_dev->ddir,
                            nsim_dev, &nsim_dev_max_vfs_fops);
 
@@ -567,6 +568,9 @@ static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
        devlink_region_destroy(nsim_dev->dummy_region);
 }
 
+static int
+__nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
+                   unsigned int port_index);
 static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port);
 
 static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
@@ -575,12 +579,10 @@ static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
        struct devlink *devlink = priv_to_devlink(nsim_dev);
        struct nsim_dev_port *nsim_dev_port, *tmp;
 
-       devlink_rate_nodes_destroy(devlink);
-       mutex_lock(&nsim_dev->port_list_lock);
+       devl_rate_nodes_destroy(devlink);
        list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
                if (nsim_dev_port_is_vf(nsim_dev_port))
                        __nsim_dev_port_del(nsim_dev_port);
-       mutex_unlock(&nsim_dev->port_list_lock);
        nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
        return 0;
 }
@@ -588,11 +590,11 @@ static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
 static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
                                     struct netlink_ext_ack *extack)
 {
-       struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+       struct nsim_dev_port *nsim_dev_port, *tmp;
        int i, err;
 
        for (i = 0; i < nsim_dev_get_vfs(nsim_dev); i++) {
-               err = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+               err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_VF, i);
                if (err) {
                        NL_SET_ERR_MSG_MOD(extack, "Failed to initialize VFs' netdevsim ports");
                        pr_err("Failed to initialize VF id=%d. %d.\n", i, err);
@@ -603,8 +605,9 @@ static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
        return 0;
 
 err_port_add_vfs:
-       for (i--; i >= 0; i--)
-               nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+       list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
+               if (nsim_dev_port_is_vf(nsim_dev_port))
+                       __nsim_dev_port_del(nsim_dev_port);
        return err;
 }
 
@@ -612,22 +615,16 @@ static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
                                         struct netlink_ext_ack *extack)
 {
        struct nsim_dev *nsim_dev = devlink_priv(devlink);
-       int err = 0;
 
-       mutex_lock(&nsim_dev->vfs_lock);
        if (mode == nsim_dev->esw_mode)
-               goto unlock;
+               return 0;
 
        if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
-               err = nsim_esw_legacy_enable(nsim_dev, extack);
-       else if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
-               err = nsim_esw_switchdev_enable(nsim_dev, extack);
-       else
-               err = -EINVAL;
+               return nsim_esw_legacy_enable(nsim_dev, extack);
+       if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+               return nsim_esw_switchdev_enable(nsim_dev, extack);
 
-unlock:
-       mutex_unlock(&nsim_dev->vfs_lock);
-       return err;
+       return -EINVAL;
 }
 
 static int nsim_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
@@ -835,14 +832,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
        /* For each running port and enabled packet trap, generate a UDP
         * packet with a random 5-tuple and report it.
         */
-       mutex_lock(&nsim_dev->port_list_lock);
+       devl_lock(priv_to_devlink(nsim_dev));
        list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
                if (!netif_running(nsim_dev_port->ns->netdev))
                        continue;
 
                nsim_dev_trap_report(nsim_dev_port);
        }
-       mutex_unlock(&nsim_dev->port_list_lock);
+       devl_unlock(priv_to_devlink(nsim_dev));
 
        schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
                              msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
@@ -924,6 +921,7 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
 {
        struct nsim_dev *nsim_dev = devlink_priv(devlink);
 
+       /* caution, trap work takes devlink lock */
        cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
        devlink_traps_unregister(devlink, nsim_traps_arr,
                                 ARRAY_SIZE(nsim_traps_arr));
@@ -1380,8 +1378,8 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_typ
        memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
        attrs.switch_id.id_len = nsim_dev->switch_id.id_len;
        devlink_port_attrs_set(devlink_port, &attrs);
-       err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
-                                   nsim_dev_port->port_index);
+       err = devl_port_register(priv_to_devlink(nsim_dev), devlink_port,
+                                nsim_dev_port->port_index);
        if (err)
                goto err_port_free;
 
@@ -1396,8 +1394,8 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_typ
        }
 
        if (nsim_dev_port_is_vf(nsim_dev_port)) {
-               err = devlink_rate_leaf_create(&nsim_dev_port->devlink_port,
-                                              nsim_dev_port);
+               err = devl_rate_leaf_create(&nsim_dev_port->devlink_port,
+                                           nsim_dev_port);
                if (err)
                        goto err_nsim_destroy;
        }
@@ -1412,7 +1410,7 @@ err_nsim_destroy:
 err_port_debugfs_exit:
        nsim_dev_port_debugfs_exit(nsim_dev_port);
 err_dl_port_unregister:
-       devlink_port_unregister(devlink_port);
+       devl_port_unregister(devlink_port);
 err_port_free:
        kfree(nsim_dev_port);
        return err;
@@ -1424,11 +1422,11 @@ static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
 
        list_del(&nsim_dev_port->list);
        if (nsim_dev_port_is_vf(nsim_dev_port))
-               devlink_rate_leaf_destroy(&nsim_dev_port->devlink_port);
+               devl_rate_leaf_destroy(&nsim_dev_port->devlink_port);
        devlink_port_type_clear(devlink_port);
        nsim_destroy(nsim_dev_port->ns);
        nsim_dev_port_debugfs_exit(nsim_dev_port);
-       devlink_port_unregister(devlink_port);
+       devl_port_unregister(devlink_port);
        kfree(nsim_dev_port);
 }
 
@@ -1436,11 +1434,11 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
 {
        struct nsim_dev_port *nsim_dev_port, *tmp;
 
-       mutex_lock(&nsim_dev->port_list_lock);
+       devl_lock(priv_to_devlink(nsim_dev));
        list_for_each_entry_safe(nsim_dev_port, tmp,
                                 &nsim_dev->port_list, list)
                __nsim_dev_port_del(nsim_dev_port);
-       mutex_unlock(&nsim_dev->port_list_lock);
+       devl_unlock(priv_to_devlink(nsim_dev));
 }
 
 static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
@@ -1449,7 +1447,9 @@ static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
        int i, err;
 
        for (i = 0; i < port_count; i++) {
+               devl_lock(priv_to_devlink(nsim_dev));
                err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_PF, i);
+               devl_unlock(priv_to_devlink(nsim_dev));
                if (err)
                        goto err_port_del_all;
        }
@@ -1470,7 +1470,6 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
        devlink = priv_to_devlink(nsim_dev);
        nsim_dev = devlink_priv(devlink);
        INIT_LIST_HEAD(&nsim_dev->port_list);
-       mutex_init(&nsim_dev->port_list_lock);
        nsim_dev->fw_update_status = true;
        nsim_dev->fw_update_overwrite_mask = 0;
 
@@ -1498,10 +1497,14 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
        if (err)
                goto err_health_exit;
 
-       err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+       err = nsim_dev_hwstats_init(nsim_dev);
        if (err)
                goto err_psample_exit;
 
+       err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+       if (err)
+               goto err_hwstats_exit;
+
        nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
                                                      0200,
                                                      nsim_dev->ddir,
@@ -1509,6 +1512,8 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
                                                &nsim_dev_take_snapshot_fops);
        return 0;
 
+err_hwstats_exit:
+       nsim_dev_hwstats_exit(nsim_dev);
 err_psample_exit:
        nsim_dev_psample_exit(nsim_dev);
 err_health_exit:
@@ -1537,8 +1542,6 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
        nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
        get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
        INIT_LIST_HEAD(&nsim_dev->port_list);
-       mutex_init(&nsim_dev->vfs_lock);
-       mutex_init(&nsim_dev->port_list_lock);
        nsim_dev->fw_update_status = true;
        nsim_dev->fw_update_overwrite_mask = 0;
        nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
@@ -1595,15 +1598,21 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
        if (err)
                goto err_bpf_dev_exit;
 
-       err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+       err = nsim_dev_hwstats_init(nsim_dev);
        if (err)
                goto err_psample_exit;
 
+       err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+       if (err)
+               goto err_hwstats_exit;
+
        nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
        devlink_set_features(devlink, DEVLINK_F_RELOAD);
        devlink_register(devlink);
        return 0;
 
+err_hwstats_exit:
+       nsim_dev_hwstats_exit(nsim_dev);
 err_psample_exit:
        nsim_dev_psample_exit(nsim_dev);
 err_bpf_dev_exit:
@@ -1639,21 +1648,21 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
                return;
        debugfs_remove(nsim_dev->take_snapshot);
 
-       mutex_lock(&nsim_dev->vfs_lock);
+       devl_lock(devlink);
        if (nsim_dev_get_vfs(nsim_dev)) {
                nsim_bus_dev_set_vfs(nsim_dev->nsim_bus_dev, 0);
                if (nsim_esw_mode_is_switchdev(nsim_dev))
                        nsim_esw_legacy_enable(nsim_dev, NULL);
        }
-       mutex_unlock(&nsim_dev->vfs_lock);
+       devl_unlock(devlink);
 
        nsim_dev_port_del_all(nsim_dev);
+       nsim_dev_hwstats_exit(nsim_dev);
        nsim_dev_psample_exit(nsim_dev);
        nsim_dev_health_exit(nsim_dev);
        nsim_fib_destroy(devlink, nsim_dev->fib_data);
        nsim_dev_traps_exit(devlink);
        nsim_dev_dummy_region_exit(nsim_dev);
-       mutex_destroy(&nsim_dev->port_list_lock);
 }
 
 void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
@@ -1693,12 +1702,12 @@ int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
        struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
        int err;
 
-       mutex_lock(&nsim_dev->port_list_lock);
+       devl_lock(priv_to_devlink(nsim_dev));
        if (__nsim_dev_port_lookup(nsim_dev, type, port_index))
                err = -EEXIST;
        else
                err = __nsim_dev_port_add(nsim_dev, type, port_index);
-       mutex_unlock(&nsim_dev->port_list_lock);
+       devl_unlock(priv_to_devlink(nsim_dev));
        return err;
 }
 
@@ -1709,13 +1718,13 @@ int nsim_drv_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
        struct nsim_dev_port *nsim_dev_port;
        int err = 0;
 
-       mutex_lock(&nsim_dev->port_list_lock);
+       devl_lock(priv_to_devlink(nsim_dev));
        nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, type, port_index);
        if (!nsim_dev_port)
                err = -ENOENT;
        else
                __nsim_dev_port_del(nsim_dev_port);
-       mutex_unlock(&nsim_dev->port_list_lock);
+       devl_unlock(priv_to_devlink(nsim_dev));
        return err;
 }
 
@@ -1723,9 +1732,10 @@ int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
                           unsigned int num_vfs)
 {
        struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+       struct devlink *devlink = priv_to_devlink(nsim_dev);
        int ret = 0;
 
-       mutex_lock(&nsim_dev->vfs_lock);
+       devl_lock(devlink);
        if (nsim_bus_dev->num_vfs == num_vfs)
                goto exit_unlock;
        if (nsim_bus_dev->num_vfs && num_vfs) {
@@ -1751,7 +1761,7 @@ int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
        }
 
 exit_unlock:
-       mutex_unlock(&nsim_dev->vfs_lock);
+       devl_unlock(devlink);
 
        return ret;
 }
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
new file mode 100644 (file)
index 0000000..605a38e
--- /dev/null
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/debugfs.h>
+
+#include "netdevsim.h"
+
+#define NSIM_DEV_HWSTATS_TRAFFIC_MS    100
+
+static struct list_head *
+nsim_dev_hwstats_get_list_head(struct nsim_dev_hwstats *hwstats,
+                              enum netdev_offload_xstats_type type)
+{
+       switch (type) {
+       case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
+               return &hwstats->l3_list;
+       }
+
+       WARN_ON_ONCE(1);
+       return NULL;
+}
+
+static void nsim_dev_hwstats_traffic_bump(struct nsim_dev_hwstats *hwstats,
+                                         enum netdev_offload_xstats_type type)
+{
+       struct nsim_dev_hwstats_netdev *hwsdev;
+       struct list_head *hwsdev_list;
+
+       hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+       if (WARN_ON(!hwsdev_list))
+               return;
+
+       list_for_each_entry(hwsdev, hwsdev_list, list) {
+               if (hwsdev->enabled) {
+                       hwsdev->stats.rx_packets += 1;
+                       hwsdev->stats.tx_packets += 2;
+                       hwsdev->stats.rx_bytes += 100;
+                       hwsdev->stats.tx_bytes += 300;
+               }
+       }
+}
+
+static void nsim_dev_hwstats_traffic_work(struct work_struct *work)
+{
+       struct nsim_dev_hwstats *hwstats;
+
+       hwstats = container_of(work, struct nsim_dev_hwstats, traffic_dw.work);
+       mutex_lock(&hwstats->hwsdev_list_lock);
+       nsim_dev_hwstats_traffic_bump(hwstats, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+       mutex_unlock(&hwstats->hwsdev_list_lock);
+
+       schedule_delayed_work(&hwstats->traffic_dw,
+                             msecs_to_jiffies(NSIM_DEV_HWSTATS_TRAFFIC_MS));
+}
+
+static struct nsim_dev_hwstats_netdev *
+nsim_dev_hwslist_find_hwsdev(struct list_head *hwsdev_list,
+                            int ifindex)
+{
+       struct nsim_dev_hwstats_netdev *hwsdev;
+
+       list_for_each_entry(hwsdev, hwsdev_list, list) {
+               if (hwsdev->netdev->ifindex == ifindex)
+                       return hwsdev;
+       }
+
+       return NULL;
+}
+
+static int nsim_dev_hwsdev_enable(struct nsim_dev_hwstats_netdev *hwsdev,
+                                 struct netlink_ext_ack *extack)
+{
+       if (hwsdev->fail_enable) {
+               hwsdev->fail_enable = false;
+               NL_SET_ERR_MSG_MOD(extack, "Stats enablement set to fail");
+               return -ECANCELED;
+       }
+
+       hwsdev->enabled = true;
+       return 0;
+}
+
+static void nsim_dev_hwsdev_disable(struct nsim_dev_hwstats_netdev *hwsdev)
+{
+       hwsdev->enabled = false;
+       memset(&hwsdev->stats, 0, sizeof(hwsdev->stats));
+}
+
+static int
+nsim_dev_hwsdev_report_delta(struct nsim_dev_hwstats_netdev *hwsdev,
+                            struct netdev_notifier_offload_xstats_info *info)
+{
+       netdev_offload_xstats_report_delta(info->report_delta, &hwsdev->stats);
+       memset(&hwsdev->stats, 0, sizeof(hwsdev->stats));
+       return 0;
+}
+
+static void
+nsim_dev_hwsdev_report_used(struct nsim_dev_hwstats_netdev *hwsdev,
+                           struct netdev_notifier_offload_xstats_info *info)
+{
+       if (hwsdev->enabled)
+               netdev_offload_xstats_report_used(info->report_used);
+}
+
+static int nsim_dev_hwstats_event_off_xstats(struct nsim_dev_hwstats *hwstats,
+                                            struct net_device *dev,
+                                            unsigned long event, void *ptr)
+{
+       struct netdev_notifier_offload_xstats_info *info;
+       struct nsim_dev_hwstats_netdev *hwsdev;
+       struct list_head *hwsdev_list;
+       int err = 0;
+
+       info = ptr;
+       hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, info->type);
+       if (!hwsdev_list)
+               return 0;
+
+       mutex_lock(&hwstats->hwsdev_list_lock);
+
+       hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, dev->ifindex);
+       if (!hwsdev)
+               goto out;
+
+       switch (event) {
+       case NETDEV_OFFLOAD_XSTATS_ENABLE:
+               err = nsim_dev_hwsdev_enable(hwsdev, info->info.extack);
+               break;
+       case NETDEV_OFFLOAD_XSTATS_DISABLE:
+               nsim_dev_hwsdev_disable(hwsdev);
+               break;
+       case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+               nsim_dev_hwsdev_report_used(hwsdev, info);
+               break;
+       case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+               err = nsim_dev_hwsdev_report_delta(hwsdev, info);
+               break;
+       }
+
+out:
+       mutex_unlock(&hwstats->hwsdev_list_lock);
+       return err;
+}
+
+static void nsim_dev_hwsdev_fini(struct nsim_dev_hwstats_netdev *hwsdev)
+{
+       dev_put(hwsdev->netdev);
+       kfree(hwsdev);
+}
+
+static void
+__nsim_dev_hwstats_event_unregister(struct nsim_dev_hwstats *hwstats,
+                                   struct net_device *dev,
+                                   enum netdev_offload_xstats_type type)
+{
+       struct nsim_dev_hwstats_netdev *hwsdev;
+       struct list_head *hwsdev_list;
+
+       hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+       if (WARN_ON(!hwsdev_list))
+               return;
+
+       hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, dev->ifindex);
+       if (!hwsdev)
+               return;
+
+       list_del(&hwsdev->list);
+       nsim_dev_hwsdev_fini(hwsdev);
+}
+
+static void nsim_dev_hwstats_event_unregister(struct nsim_dev_hwstats *hwstats,
+                                             struct net_device *dev)
+{
+       mutex_lock(&hwstats->hwsdev_list_lock);
+       __nsim_dev_hwstats_event_unregister(hwstats, dev,
+                                           NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+       mutex_unlock(&hwstats->hwsdev_list_lock);
+}
+
+static int nsim_dev_hwstats_event(struct nsim_dev_hwstats *hwstats,
+                                 struct net_device *dev,
+                                 unsigned long event, void *ptr)
+{
+       switch (event) {
+       case NETDEV_OFFLOAD_XSTATS_ENABLE:
+       case NETDEV_OFFLOAD_XSTATS_DISABLE:
+       case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+       case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+               return nsim_dev_hwstats_event_off_xstats(hwstats, dev,
+                                                        event, ptr);
+       case NETDEV_UNREGISTER:
+               nsim_dev_hwstats_event_unregister(hwstats, dev);
+               break;
+       }
+
+       return 0;
+}
+
+static int nsim_dev_netdevice_event(struct notifier_block *nb,
+                                   unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct nsim_dev_hwstats *hwstats;
+       int err = 0;
+
+       hwstats = container_of(nb, struct nsim_dev_hwstats, netdevice_nb);
+       err = nsim_dev_hwstats_event(hwstats, dev, event, ptr);
+       if (err)
+               return notifier_from_errno(err);
+
+       return NOTIFY_OK;
+}
+
+static int
+nsim_dev_hwstats_enable_ifindex(struct nsim_dev_hwstats *hwstats,
+                               int ifindex,
+                               enum netdev_offload_xstats_type type,
+                               struct list_head *hwsdev_list)
+{
+       struct nsim_dev_hwstats_netdev *hwsdev;
+       struct nsim_dev *nsim_dev;
+       struct net_device *netdev;
+       bool notify = false;
+       struct net *net;
+       int err = 0;
+
+       nsim_dev = container_of(hwstats, struct nsim_dev, hwstats);
+       net = nsim_dev_net(nsim_dev);
+
+       rtnl_lock();
+       mutex_lock(&hwstats->hwsdev_list_lock);
+       hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+       if (hwsdev)
+               goto out_unlock_list;
+
+       netdev = dev_get_by_index(net, ifindex);
+       if (!netdev) {
+               err = -ENODEV;
+               goto out_unlock_list;
+       }
+
+       hwsdev = kzalloc(sizeof(*hwsdev), GFP_KERNEL);
+       if (!hwsdev) {
+               err = -ENOMEM;
+               goto out_put_netdev;
+       }
+
+       hwsdev->netdev = netdev;
+       list_add_tail(&hwsdev->list, hwsdev_list);
+       mutex_unlock(&hwstats->hwsdev_list_lock);
+
+       if (netdev_offload_xstats_enabled(netdev, type)) {
+               nsim_dev_hwsdev_enable(hwsdev, NULL);
+               notify = true;
+       }
+
+       if (notify)
+               rtnl_offload_xstats_notify(netdev);
+       rtnl_unlock();
+       return err;
+
+out_put_netdev:
+       dev_put(netdev);
+out_unlock_list:
+       mutex_unlock(&hwstats->hwsdev_list_lock);
+       rtnl_unlock();
+       return err;
+}
+
+static int
+nsim_dev_hwstats_disable_ifindex(struct nsim_dev_hwstats *hwstats,
+                                int ifindex,
+                                enum netdev_offload_xstats_type type,
+                                struct list_head *hwsdev_list)
+{
+       struct nsim_dev_hwstats_netdev *hwsdev;
+       int err = 0;
+
+       rtnl_lock();
+       mutex_lock(&hwstats->hwsdev_list_lock);
+       hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+       if (hwsdev)
+               list_del(&hwsdev->list);
+       mutex_unlock(&hwstats->hwsdev_list_lock);
+
+       if (!hwsdev) {
+               err = -ENOENT;
+               goto unlock_out;
+       }
+
+       if (netdev_offload_xstats_enabled(hwsdev->netdev, type)) {
+               netdev_offload_xstats_push_delta(hwsdev->netdev, type,
+                                                &hwsdev->stats);
+               rtnl_offload_xstats_notify(hwsdev->netdev);
+       }
+       nsim_dev_hwsdev_fini(hwsdev);
+
+unlock_out:
+       rtnl_unlock();
+       return err;
+}
+
+static int
+nsim_dev_hwstats_fail_ifindex(struct nsim_dev_hwstats *hwstats,
+                             int ifindex,
+                             enum netdev_offload_xstats_type type,
+                             struct list_head *hwsdev_list)
+{
+       struct nsim_dev_hwstats_netdev *hwsdev;
+       int err = 0;
+
+       mutex_lock(&hwstats->hwsdev_list_lock);
+
+       hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+       if (!hwsdev) {
+               err = -ENOENT;
+               goto err_hwsdev_list_unlock;
+       }
+
+       hwsdev->fail_enable = true;
+
+err_hwsdev_list_unlock:
+       mutex_unlock(&hwstats->hwsdev_list_lock);
+       return err;
+}
+
+enum nsim_dev_hwstats_do {
+       NSIM_DEV_HWSTATS_DO_DISABLE,
+       NSIM_DEV_HWSTATS_DO_ENABLE,
+       NSIM_DEV_HWSTATS_DO_FAIL,
+};
+
+struct nsim_dev_hwstats_fops {
+       const struct file_operations fops;
+       enum nsim_dev_hwstats_do action;
+       enum netdev_offload_xstats_type type;
+};
+
+static ssize_t
+nsim_dev_hwstats_do_write(struct file *file,
+                         const char __user *data,
+                         size_t count, loff_t *ppos)
+{
+       struct nsim_dev_hwstats *hwstats = file->private_data;
+       struct nsim_dev_hwstats_fops *hwsfops;
+       struct list_head *hwsdev_list;
+       int ifindex;
+       int err;
+
+       hwsfops = container_of(debugfs_real_fops(file),
+                              struct nsim_dev_hwstats_fops, fops);
+
+       err = kstrtoint_from_user(data, count, 0, &ifindex);
+       if (err)
+               return err;
+
+       hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, hwsfops->type);
+       if (WARN_ON(!hwsdev_list))
+               return -EINVAL;
+
+       switch (hwsfops->action) {
+       case NSIM_DEV_HWSTATS_DO_DISABLE:
+               err = nsim_dev_hwstats_disable_ifindex(hwstats, ifindex,
+                                                      hwsfops->type,
+                                                      hwsdev_list);
+               break;
+       case NSIM_DEV_HWSTATS_DO_ENABLE:
+               err = nsim_dev_hwstats_enable_ifindex(hwstats, ifindex,
+                                                     hwsfops->type,
+                                                     hwsdev_list);
+               break;
+       case NSIM_DEV_HWSTATS_DO_FAIL:
+               err = nsim_dev_hwstats_fail_ifindex(hwstats, ifindex,
+                                                   hwsfops->type,
+                                                   hwsdev_list);
+               break;
+       }
+       if (err)
+               return err;
+
+       return count;
+}
+
+#define NSIM_DEV_HWSTATS_FOPS(ACTION, TYPE)                    \
+       {                                                       \
+               .fops = {                                       \
+                       .open = simple_open,                    \
+                       .write = nsim_dev_hwstats_do_write,     \
+                       .llseek = generic_file_llseek,          \
+                       .owner = THIS_MODULE,                   \
+               },                                              \
+               .action = ACTION,                               \
+               .type = TYPE,                                   \
+       }
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_disable_fops =
+       NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_DISABLE,
+                             NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_enable_fops =
+       NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_ENABLE,
+                             NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_fail_fops =
+       NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_FAIL,
+                             NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+#undef NSIM_DEV_HWSTATS_FOPS
+
+int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
+{
+       struct nsim_dev_hwstats *hwstats = &nsim_dev->hwstats;
+       struct net *net = nsim_dev_net(nsim_dev);
+       int err;
+
+       mutex_init(&hwstats->hwsdev_list_lock);
+       INIT_LIST_HEAD(&hwstats->l3_list);
+
+       hwstats->netdevice_nb.notifier_call = nsim_dev_netdevice_event;
+       err = register_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+       if (err)
+               goto err_mutex_destroy;
+
+       hwstats->ddir = debugfs_create_dir("hwstats", nsim_dev->ddir);
+       if (IS_ERR(hwstats->ddir)) {
+               err = PTR_ERR(hwstats->ddir);
+               goto err_unregister_notifier;
+       }
+
+       hwstats->l3_ddir = debugfs_create_dir("l3", hwstats->ddir);
+       if (IS_ERR(hwstats->l3_ddir)) {
+               err = PTR_ERR(hwstats->l3_ddir);
+               goto err_remove_hwstats_recursive;
+       }
+
+       debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+                           &nsim_dev_hwstats_l3_enable_fops.fops);
+       debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+                           &nsim_dev_hwstats_l3_disable_fops.fops);
+       debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats,
+                           &nsim_dev_hwstats_l3_fail_fops.fops);
+
+       INIT_DELAYED_WORK(&hwstats->traffic_dw,
+                         &nsim_dev_hwstats_traffic_work);
+       schedule_delayed_work(&hwstats->traffic_dw,
+                             msecs_to_jiffies(NSIM_DEV_HWSTATS_TRAFFIC_MS));
+       return 0;
+
+err_remove_hwstats_recursive:
+       debugfs_remove_recursive(hwstats->ddir);
+err_unregister_notifier:
+       unregister_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+err_mutex_destroy:
+       mutex_destroy(&hwstats->hwsdev_list_lock);
+       return err;
+}
+
+static void nsim_dev_hwsdev_list_wipe(struct nsim_dev_hwstats *hwstats,
+                                     enum netdev_offload_xstats_type type)
+{
+       struct nsim_dev_hwstats_netdev *hwsdev, *tmp;
+       struct list_head *hwsdev_list;
+
+       hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+       if (WARN_ON(!hwsdev_list))
+               return;
+
+       mutex_lock(&hwstats->hwsdev_list_lock);
+       list_for_each_entry_safe(hwsdev, tmp, hwsdev_list, list) {
+               list_del(&hwsdev->list);
+               nsim_dev_hwsdev_fini(hwsdev);
+       }
+       mutex_unlock(&hwstats->hwsdev_list_lock);
+}
+
+void nsim_dev_hwstats_exit(struct nsim_dev *nsim_dev)
+{
+       struct nsim_dev_hwstats *hwstats = &nsim_dev->hwstats;
+       struct net *net = nsim_dev_net(nsim_dev);
+
+       cancel_delayed_work_sync(&hwstats->traffic_dw);
+       debugfs_remove_recursive(hwstats->ddir);
+       unregister_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+       nsim_dev_hwsdev_list_wipe(hwstats, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+       mutex_destroy(&hwstats->hwsdev_list_lock);
+}
index c49771f..0b12287 100644 (file)
@@ -184,6 +184,28 @@ struct nsim_dev_health {
 int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink);
 void nsim_dev_health_exit(struct nsim_dev *nsim_dev);
 
+struct nsim_dev_hwstats_netdev {
+       struct list_head list;
+       struct net_device *netdev;
+       struct rtnl_hw_stats64 stats;
+       bool enabled;
+       bool fail_enable;
+};
+
+struct nsim_dev_hwstats {
+       struct dentry *ddir;
+       struct dentry *l3_ddir;
+
+       struct mutex hwsdev_list_lock; /* protects hwsdev list(s) */
+       struct list_head l3_list;
+
+       struct notifier_block netdevice_nb;
+       struct delayed_work traffic_dw;
+};
+
+int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev);
+void nsim_dev_hwstats_exit(struct nsim_dev *nsim_dev);
+
 #if IS_ENABLED(CONFIG_PSAMPLE)
 int nsim_dev_psample_init(struct nsim_dev *nsim_dev);
 void nsim_dev_psample_exit(struct nsim_dev *nsim_dev);
@@ -239,7 +261,6 @@ struct nsim_dev {
        struct dentry *take_snapshot;
        struct dentry *nodes_ddir;
 
-       struct mutex vfs_lock;  /* Protects vfconfigs */
        struct nsim_vf_config *vfconfigs;
 
        struct bpf_offload_dev *bpf_dev;
@@ -252,7 +273,6 @@ struct nsim_dev {
        struct list_head bpf_bound_maps;
        struct netdev_phys_item_id switch_id;
        struct list_head port_list;
-       struct mutex port_list_lock; /* protects port list */
        bool fw_update_status;
        u32 fw_update_overwrite_mask;
        u32 max_macs;
@@ -261,6 +281,7 @@ struct nsim_dev {
        bool fail_reload;
        struct devlink_region *dummy_region;
        struct nsim_dev_health health;
+       struct nsim_dev_hwstats hwstats;
        struct flow_action_cookie *fa_cookie;
        spinlock_t fa_cookie_lock; /* protects fa_cookie */
        bool fail_trap_group_set;
index 902495a..ea7571a 100644 (file)
@@ -220,6 +220,7 @@ config MEDIATEK_GE_PHY
 
 config MICREL_PHY
        tristate "Micrel PHYs"
+       depends on PTP_1588_CLOCK_OPTIONAL
        help
          Supports the KSZ9021, VSC8201, KS8001 PHYs.
 
index c2d1a85..ef8b141 100644 (file)
@@ -886,7 +886,7 @@ out:
        spin_unlock_irqrestore(&dp83640->rx_lock, flags);
 
        if (shhwtstamps)
-               netif_rx_ni(skb);
+               netif_rx(skb);
 }
 
 static void decode_txts(struct dp83640_private *dp83640,
@@ -970,17 +970,6 @@ static void decode_status_frame(struct dp83640_private *dp83640,
        }
 }
 
-static int is_sync(struct sk_buff *skb, int type)
-{
-       struct ptp_header *hdr;
-
-       hdr = ptp_parse_header(skb, type);
-       if (!hdr)
-               return 0;
-
-       return ptp_get_msgtype(hdr, type) == PTP_MSGTYPE_SYNC;
-}
-
 static void dp83640_free_clocks(void)
 {
        struct dp83640_clock *clock;
@@ -1329,7 +1318,7 @@ static void rx_timestamp_work(struct work_struct *work)
                        break;
                }
 
-               netif_rx_ni(skb);
+               netif_rx(skb);
        }
 
        if (!skb_queue_empty(&dp83640->rx_queue))
@@ -1380,7 +1369,7 @@ static bool dp83640_rxtstamp(struct mii_timestamper *mii_ts,
                skb_queue_tail(&dp83640->rx_queue, skb);
                schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
        } else {
-               netif_rx_ni(skb);
+               netif_rx(skb);
        }
 
        return true;
@@ -1396,7 +1385,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts,
        switch (dp83640->hwts_tx_en) {
 
        case HWTSTAMP_TX_ONESTEP_SYNC:
-               if (is_sync(skb, type)) {
+               if (ptp_msg_is_sync(skb, type)) {
                        kfree_skb(skb);
                        return;
                }
index 211b547..ce17b2a 100644 (file)
@@ -274,7 +274,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
                if (err < 0)
                        return err;
 
-               err = phy_write(phydev, MII_DP83822_MISR1, 0);
+               err = phy_write(phydev, MII_DP83822_MISR2, 0);
                if (err < 0)
                        return err;
 
index 2429db6..2702faf 100644 (file)
@@ -1687,8 +1687,8 @@ static int marvell_suspend(struct phy_device *phydev)
        int err;
 
        /* Suspend the fiber mode first */
-       if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
-                              phydev->supported)) {
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+                             phydev->supported)) {
                err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
                if (err < 0)
                        goto error;
@@ -1722,8 +1722,8 @@ static int marvell_resume(struct phy_device *phydev)
        int err;
 
        /* Resume the fiber mode first */
-       if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
-                              phydev->supported)) {
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+                             phydev->supported)) {
                err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
                if (err < 0)
                        goto error;
index 7e7904f..73f7962 100644 (file)
 #define  INTSRC_LINK_DOWN      BIT(4)
 #define  INTSRC_REMOTE_FAULT   BIT(5)
 #define  INTSRC_ANEG_COMPLETE  BIT(6)
+#define  INTSRC_ENERGY_DETECT  BIT(7)
 #define INTSRC_MASK    30
 
+#define INT_SOURCES (INTSRC_LINK_DOWN | INTSRC_ANEG_COMPLETE | \
+                    INTSRC_ENERGY_DETECT)
+
 #define BANK_ANALOG_DSP                0
 #define BANK_WOL               1
 #define BANK_BIST              3
@@ -200,7 +204,6 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
 
 static int meson_gxl_config_intr(struct phy_device *phydev)
 {
-       u16 val;
        int ret;
 
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -209,16 +212,9 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
                if (ret)
                        return ret;
 
-               val = INTSRC_ANEG_PR
-                       | INTSRC_PARALLEL_FAULT
-                       | INTSRC_ANEG_LP_ACK
-                       | INTSRC_LINK_DOWN
-                       | INTSRC_REMOTE_FAULT
-                       | INTSRC_ANEG_COMPLETE;
-               ret = phy_write(phydev, INTSRC_MASK, val);
+               ret = phy_write(phydev, INTSRC_MASK, INT_SOURCES);
        } else {
-               val = 0;
-               ret = phy_write(phydev, INTSRC_MASK, val);
+               ret = phy_write(phydev, INTSRC_MASK, 0);
 
                /* Ack any pending IRQ */
                ret = meson_gxl_ack_interrupt(phydev);
@@ -237,10 +233,23 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
                return IRQ_NONE;
        }
 
+       irq_status &= INT_SOURCES;
+
        if (irq_status == 0)
                return IRQ_NONE;
 
-       phy_trigger_machine(phydev);
+       /* Aneg-complete interrupt is used for link-up detection */
+       if (phydev->autoneg == AUTONEG_ENABLE &&
+           irq_status == INTSRC_ENERGY_DETECT)
+               return IRQ_HANDLED;
+
+       /* Give PHY some time before MAC starts sending data. This works
+        * around an issue where network doesn't come up properly.
+        */
+       if (!(irq_status & INTSRC_LINK_DOWN))
+               phy_queue_state_machine(phydev, msecs_to_jiffies(100));
+       else
+               phy_trigger_machine(phydev);
 
        return IRQ_HANDLED;
 }
index 81a7632..19b11e8 100644 (file)
@@ -1976,17 +1976,6 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
 }
 
-static bool is_sync(struct sk_buff *skb, int type)
-{
-       struct ptp_header *hdr;
-
-       hdr = ptp_parse_header(skb, type);
-       if (!hdr)
-               return false;
-
-       return ((ptp_get_msgtype(hdr, type) & 0xf) == 0);
-}
-
 static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
                             struct sk_buff *skb, int type)
 {
@@ -1994,7 +1983,7 @@ static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
 
        switch (ptp_priv->hwts_tx_type) {
        case HWTSTAMP_TX_ONESTEP_SYNC:
-               if (is_sync(skb, type)) {
+               if (ptp_msg_is_sync(skb, type)) {
                        kfree_skb(skb);
                        return;
                }
@@ -2045,8 +2034,6 @@ static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
                shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds,
                                                  rx_ts->nsec);
-               netif_rx_ni(skb);
-
                list_del(&rx_ts->list);
                kfree(rx_ts);
 
@@ -2055,6 +2042,8 @@ static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
        }
        spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
 
+       if (ret)
+               netif_rx(skb);
        return ret;
 }
 
@@ -2398,7 +2387,7 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
                shhwtstamps = skb_hwtstamps(skb);
                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
                shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
-               netif_rx_ni(skb);
+               netif_rx(skb);
        }
 
        return ret;
index 8292f73..389df3f 100644 (file)
@@ -674,34 +674,6 @@ static int lan87xx_cable_test_get_status(struct phy_device *phydev,
        return 0;
 }
 
-static int lan87xx_read_master_slave(struct phy_device *phydev)
-{
-       int rc = 0;
-
-       phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
-       phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
-
-       rc = phy_read(phydev, MII_CTRL1000);
-       if (rc < 0)
-               return rc;
-
-       if (rc & CTL1000_AS_MASTER)
-               phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
-       else
-               phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
-
-       rc = phy_read(phydev, MII_STAT1000);
-       if (rc < 0)
-               return rc;
-
-       if (rc & LPA_1000MSRES)
-               phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
-       else
-               phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
-
-       return rc;
-}
-
 static int lan87xx_read_status(struct phy_device *phydev)
 {
        int rc = 0;
@@ -720,7 +692,7 @@ static int lan87xx_read_status(struct phy_device *phydev)
        phydev->pause = 0;
        phydev->asym_pause = 0;
 
-       rc = lan87xx_read_master_slave(phydev);
+       rc = genphy_read_master_slave(phydev);
        if (rc < 0)
                return rc;
 
index ebfeeb3..7e3017e 100644 (file)
@@ -2685,3 +2685,6 @@ MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl);
 MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver");
 MODULE_AUTHOR("Nagaraju Lakkaraju");
 MODULE_LICENSE("Dual MIT/GPL");
+
+MODULE_FIRMWARE(MSCC_VSC8584_REVB_INT8051_FW);
+MODULE_FIRMWARE(MSCC_VSC8574_REVB_INT8051_FW);
index 34f8298..cf728bf 100644 (file)
@@ -1212,7 +1212,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
                ts.tv_sec--;
 
        shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
-       netif_rx_ni(skb);
+       netif_rx(skb);
 
        return true;
 }
index 06fdbae..047c581 100644 (file)
@@ -478,7 +478,7 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
                shhwtstamps_rx = skb_hwtstamps(skb);
                shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
                NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
-               netif_rx_ni(skb);
+               netif_rx(skb);
        }
 
        if (priv->extts) {
index ce0bb59..8406ac7 100644 (file)
@@ -2051,17 +2051,11 @@ static int genphy_setup_master_slave(struct phy_device *phydev)
                                   CTL1000_PREFER_MASTER), ctl);
 }
 
-static int genphy_read_master_slave(struct phy_device *phydev)
+int genphy_read_master_slave(struct phy_device *phydev)
 {
        int cfg, state;
        int val;
 
-       if (!phydev->is_gigabit_capable) {
-               phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
-               phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
-               return 0;
-       }
-
        phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
        phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
 
@@ -2102,6 +2096,7 @@ static int genphy_read_master_slave(struct phy_device *phydev)
 
        return 0;
 }
+EXPORT_SYMBOL(genphy_read_master_slave);
 
 /**
  * genphy_restart_aneg - Enable and Restart Autonegotiation
@@ -2396,14 +2391,18 @@ int genphy_read_status(struct phy_device *phydev)
        if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
                return 0;
 
+       phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
+       phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
        phydev->speed = SPEED_UNKNOWN;
        phydev->duplex = DUPLEX_UNKNOWN;
        phydev->pause = 0;
        phydev->asym_pause = 0;
 
-       err = genphy_read_master_slave(phydev);
-       if (err < 0)
-               return err;
+       if (phydev->is_gigabit_capable) {
+               err = genphy_read_master_slave(phydev);
+               if (err < 0)
+                       return err;
+       }
 
        err = genphy_read_lpa(phydev);
        if (err < 0)
index c1512c9..15aa5ac 100644 (file)
@@ -74,6 +74,12 @@ static const struct sfp_quirk sfp_quirks[] = {
                .vendor = "HUAWEI",
                .part = "MA5671A",
                .modes = sfp_quirk_2500basex,
+       }, {
+               // Lantech 8330-262D-E can operate at 2500base-X, but
+               // incorrectly report 2500MBd NRZ in their EEPROM
+               .vendor = "Lantech",
+               .part = "8330-262D-E",
+               .modes = sfp_quirk_2500basex,
        }, {
                .vendor = "UBNT",
                .part = "UF-INSTANT",
index 0d491b4..dafd3e9 100644 (file)
@@ -676,7 +676,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
        case PLIP_PK_DONE:
                /* Inform the upper layer for the arrival of a packet. */
                rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
-               netif_rx_ni(rcv->skb);
+               netif_rx(rcv->skb);
                dev->stats.rx_bytes += rcv->length.h;
                dev->stats.rx_packets++;
                rcv->skb = NULL;
index 98f586f..88396ff 100644 (file)
@@ -368,7 +368,7 @@ static void sl_bump(struct slip *sl)
        skb_put_data(skb, sl->rbuff, count);
        skb_reset_mac_header(skb);
        skb->protocol = htons(ETH_P_IP);
-       netif_rx_ni(skb);
+       netif_rx(skb);
        dev->stats.rx_packets++;
 }
 
index ba2ef54..c3d4206 100644 (file)
@@ -322,6 +322,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
        struct tap_dev *tap;
        struct tap_queue *q;
        netdev_features_t features = TAP_FEATURES;
+       enum skb_drop_reason drop_reason;
 
        tap = tap_dev_get_rcu(dev);
        if (!tap)
@@ -343,12 +344,16 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
                struct sk_buff *segs = __skb_gso_segment(skb, features, false);
                struct sk_buff *next;
 
-               if (IS_ERR(segs))
+               if (IS_ERR(segs)) {
+                       drop_reason = SKB_DROP_REASON_SKB_GSO_SEG;
                        goto drop;
+               }
 
                if (!segs) {
-                       if (ptr_ring_produce(&q->ring, skb))
+                       if (ptr_ring_produce(&q->ring, skb)) {
+                               drop_reason = SKB_DROP_REASON_FULL_RING;
                                goto drop;
+                       }
                        goto wake_up;
                }
 
@@ -356,8 +361,9 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
                skb_list_walk_safe(segs, skb, next) {
                        skb_mark_not_on_list(skb);
                        if (ptr_ring_produce(&q->ring, skb)) {
-                               kfree_skb(skb);
-                               kfree_skb_list(next);
+                               drop_reason = SKB_DROP_REASON_FULL_RING;
+                               kfree_skb_reason(skb, drop_reason);
+                               kfree_skb_list_reason(next, drop_reason);
                                break;
                        }
                }
@@ -369,10 +375,14 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
                 */
                if (skb->ip_summed == CHECKSUM_PARTIAL &&
                    !(features & NETIF_F_CSUM_MASK) &&
-                   skb_checksum_help(skb))
+                   skb_checksum_help(skb)) {
+                       drop_reason = SKB_DROP_REASON_SKB_CSUM;
                        goto drop;
-               if (ptr_ring_produce(&q->ring, skb))
+               }
+               if (ptr_ring_produce(&q->ring, skb)) {
+                       drop_reason = SKB_DROP_REASON_FULL_RING;
                        goto drop;
+               }
        }
 
 wake_up:
@@ -383,7 +393,7 @@ drop:
        /* Count errors/drops only here, thus don't care about args. */
        if (tap->count_rx_dropped)
                tap->count_rx_dropped(tap);
-       kfree_skb(skb);
+       kfree_skb_reason(skb, drop_reason);
        return RX_HANDLER_CONSUMED;
 }
 EXPORT_SYMBOL_GPL(tap_handle_frame);
@@ -632,6 +642,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
        int depth;
        bool zerocopy = false;
        size_t linear;
+       enum skb_drop_reason drop_reason;
 
        if (q->flags & IFF_VNET_HDR) {
                vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
@@ -696,8 +707,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
        else
                err = skb_copy_datagram_from_iter(skb, 0, from, len);
 
-       if (err)
+       if (err) {
+               drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
                goto err_kfree;
+       }
 
        skb_set_network_header(skb, ETH_HLEN);
        skb_reset_mac_header(skb);
@@ -706,8 +719,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
        if (vnet_hdr_len) {
                err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
                                            tap_is_little_endian(q));
-               if (err)
+               if (err) {
+                       drop_reason = SKB_DROP_REASON_DEV_HDR;
                        goto err_kfree;
+               }
        }
 
        skb_probe_transport_header(skb);
@@ -738,7 +753,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
        return total_len;
 
 err_kfree:
-       kfree_skb(skb);
+       kfree_skb_reason(skb, drop_reason);
 
 err:
        rcu_read_lock();
index 2a0d8a5..276a0e4 100644 (file)
@@ -1058,6 +1058,7 @@ static unsigned int run_ebpf_filter(struct tun_struct *tun,
 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
+       enum skb_drop_reason drop_reason;
        int txq = skb->queue_mapping;
        struct netdev_queue *queue;
        struct tun_file *tfile;
@@ -1067,8 +1068,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        tfile = rcu_dereference(tun->tfiles[txq]);
 
        /* Drop packet if interface is not attached */
-       if (!tfile)
+       if (!tfile) {
+               drop_reason = SKB_DROP_REASON_DEV_READY;
                goto drop;
+       }
 
        if (!rcu_dereference(tun->steering_prog))
                tun_automq_xmit(tun, skb);
@@ -1078,19 +1081,32 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Drop if the filter does not like it.
         * This is a noop if the filter is disabled.
         * Filter can be enabled only for the TAP devices. */
-       if (!check_filter(&tun->txflt, skb))
+       if (!check_filter(&tun->txflt, skb)) {
+               drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
                goto drop;
+       }
 
        if (tfile->socket.sk->sk_filter &&
-           sk_filter(tfile->socket.sk, skb))
+           sk_filter(tfile->socket.sk, skb)) {
+               drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
                goto drop;
+       }
 
        len = run_ebpf_filter(tun, skb, len);
-       if (len == 0 || pskb_trim(skb, len))
+       if (len == 0) {
+               drop_reason = SKB_DROP_REASON_TAP_FILTER;
                goto drop;
+       }
 
-       if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
+       if (pskb_trim(skb, len)) {
+               drop_reason = SKB_DROP_REASON_NOMEM;
                goto drop;
+       }
+
+       if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
+               drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
+               goto drop;
+       }
 
        skb_tx_timestamp(skb);
 
@@ -1101,8 +1117,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 
        nf_reset_ct(skb);
 
-       if (ptr_ring_produce(&tfile->tx_ring, skb))
+       if (ptr_ring_produce(&tfile->tx_ring, skb)) {
+               drop_reason = SKB_DROP_REASON_FULL_RING;
                goto drop;
+       }
 
        /* NETIF_F_LLTX requires to do our own update of trans_start */
        queue = netdev_get_tx_queue(dev, txq);
@@ -1117,9 +1135,9 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 
 drop:
-       atomic_long_inc(&dev->tx_dropped);
+       dev_core_stats_tx_dropped_inc(dev);
        skb_tx_error(skb);
-       kfree_skb(skb);
+       kfree_skb_reason(skb, drop_reason);
        rcu_read_unlock();
        return NET_XMIT_DROP;
 }
@@ -1273,7 +1291,7 @@ resample:
                void *frame = tun_xdp_to_ptr(xdp);
 
                if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
-                       atomic_long_inc(&dev->tx_dropped);
+                       dev_core_stats_tx_dropped_inc(dev);
                        break;
                }
                nxmit++;
@@ -1608,7 +1626,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
                trace_xdp_exception(tun->dev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
-               atomic_long_inc(&tun->dev->rx_dropped);
+               dev_core_stats_rx_dropped_inc(tun->dev);
                break;
        }
 
@@ -1717,6 +1735,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        u32 rxhash = 0;
        int skb_xdp = 1;
        bool frags = tun_napi_frags_enabled(tfile);
+       enum skb_drop_reason drop_reason;
 
        if (!(tun->flags & IFF_NO_PI)) {
                if (len < sizeof(pi))
@@ -1778,7 +1797,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                 */
                skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
                if (IS_ERR(skb)) {
-                       atomic_long_inc(&tun->dev->rx_dropped);
+                       dev_core_stats_rx_dropped_inc(tun->dev);
                        return PTR_ERR(skb);
                }
                if (!skb)
@@ -1807,7 +1826,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
                if (IS_ERR(skb)) {
                        if (PTR_ERR(skb) != -EAGAIN)
-                               atomic_long_inc(&tun->dev->rx_dropped);
+                               dev_core_stats_rx_dropped_inc(tun->dev);
                        if (frags)
                                mutex_unlock(&tfile->napi_mutex);
                        return PTR_ERR(skb);
@@ -1820,9 +1839,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
                if (err) {
                        err = -EFAULT;
+                       drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
 drop:
-                       atomic_long_inc(&tun->dev->rx_dropped);
-                       kfree_skb(skb);
+                       dev_core_stats_rx_dropped_inc(tun->dev);
+                       kfree_skb_reason(skb, drop_reason);
                        if (frags) {
                                tfile->napi.skb = NULL;
                                mutex_unlock(&tfile->napi_mutex);
@@ -1856,7 +1876,7 @@ drop:
                                pi.proto = htons(ETH_P_IPV6);
                                break;
                        default:
-                               atomic_long_inc(&tun->dev->rx_dropped);
+                               dev_core_stats_rx_dropped_inc(tun->dev);
                                kfree_skb(skb);
                                return -EINVAL;
                        }
@@ -1869,6 +1889,7 @@ drop:
        case IFF_TAP:
                if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
                        err = -ENOMEM;
+                       drop_reason = SKB_DROP_REASON_HDR_TRUNC;
                        goto drop;
                }
                skb->protocol = eth_type_trans(skb, tun->dev);
@@ -1922,6 +1943,7 @@ drop:
        if (unlikely(!(tun->dev->flags & IFF_UP))) {
                err = -EIO;
                rcu_read_unlock();
+               drop_reason = SKB_DROP_REASON_DEV_READY;
                goto drop;
        }
 
@@ -1934,7 +1956,7 @@ drop:
                                          skb_headlen(skb));
 
                if (unlikely(headlen > skb_headlen(skb))) {
-                       atomic_long_inc(&tun->dev->rx_dropped);
+                       dev_core_stats_rx_dropped_inc(tun->dev);
                        napi_free_frags(&tfile->napi);
                        rcu_read_unlock();
                        mutex_unlock(&tfile->napi_mutex);
@@ -1962,7 +1984,7 @@ drop:
        } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
                tun_rx_batched(tun, tfile, skb, more);
        } else {
-               netif_rx_ni(skb);
+               netif_rx(skb);
        }
        rcu_read_unlock();
 
index 4334aaf..2c81236 100644 (file)
 #define AX_EEPROM_MAGIC                0xdeadbeef
 #define AX_EEPROM_LEN          0x200
 
+#define AX_EMBD_PHY_ADDR       0x10
+
 /* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
 struct asix_data {
        u8 multi_filter[AX_MCAST_FILTER_SIZE];
@@ -177,14 +179,16 @@ struct asix_rx_fixup_info {
 struct asix_common_private {
        void (*resume)(struct usbnet *dev);
        void (*suspend)(struct usbnet *dev);
+       int (*reset)(struct usbnet *dev, int in_pm);
        u16 presvd_phy_advertise;
        u16 presvd_phy_bmcr;
        struct asix_rx_fixup_info rx_fixup_info;
        struct mii_bus *mdio;
        struct phy_device *phydev;
+       struct phy_device *phydev_int;
        u16 phy_addr;
-       char phy_name[20];
        bool embd_phy;
+       u8 chipcode;
 };
 
 extern const struct driver_info ax88172a_info;
index 6ea44e5..38e47a9 100644 (file)
@@ -450,7 +450,6 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
        struct asix_data *data = (struct asix_data *)&dev->data;
        struct asix_common_private *priv = dev->driver_priv;
        u16 rx_ctl, phy14h, phy15h, phy16h;
-       u8 chipcode = 0;
        int ret;
 
        ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm);
@@ -493,12 +492,7 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
                goto out;
        }
 
-       ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0,
-                           0, 1, &chipcode, in_pm);
-       if (ret < 0)
-               goto out;
-
-       if ((chipcode & AX_CHIPCODE_MASK) == AX_AX88772B_CHIPCODE) {
+       if (priv->chipcode == AX_AX88772B_CHIPCODE) {
                ret = asix_write_cmd(dev, AX_QCTCTRL, 0x8000, 0x8001,
                                     0, NULL, in_pm);
                if (ret < 0) {
@@ -506,7 +500,7 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
                                   ret);
                        goto out;
                }
-       } else if ((chipcode & AX_CHIPCODE_MASK) == AX_AX88772A_CHIPCODE) {
+       } else if (priv->chipcode == AX_AX88772A_CHIPCODE) {
                /* Check if the PHY registers have default settings */
                phy14h = asix_mdio_read_nopm(dev->net, dev->mii.phy_id,
                                             AX88772A_PHY14H);
@@ -625,27 +619,13 @@ static void ax88772_resume(struct usbnet *dev)
        int i;
 
        for (i = 0; i < 3; i++)
-               if (!ax88772_hw_reset(dev, 1))
+               if (!priv->reset(dev, 1))
                        break;
 
        if (netif_running(dev->net))
                phy_start(priv->phydev);
 }
 
-static void ax88772a_resume(struct usbnet *dev)
-{
-       struct asix_common_private *priv = dev->driver_priv;
-       int i;
-
-       for (i = 0; i < 3; i++) {
-               if (!ax88772a_hw_reset(dev, 1))
-                       break;
-       }
-
-       if (netif_running(dev->net))
-               phy_start(priv->phydev);
-}
-
 static int asix_resume(struct usb_interface *intf)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
@@ -681,15 +661,16 @@ static int ax88772_init_phy(struct usbnet *dev)
        struct asix_common_private *priv = dev->driver_priv;
        int ret;
 
-       snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
-                priv->mdio->id, priv->phy_addr);
+       priv->phydev = mdiobus_get_phy(priv->mdio, priv->phy_addr);
+       if (!priv->phydev) {
+               netdev_err(dev->net, "Could not find PHY\n");
+               return -ENODEV;
+       }
 
-       priv->phydev = phy_connect(dev->net, priv->phy_name, &asix_adjust_link,
-                                  PHY_INTERFACE_MODE_INTERNAL);
-       if (IS_ERR(priv->phydev)) {
-               netdev_err(dev->net, "Could not connect to PHY device %s\n",
-                          priv->phy_name);
-               ret = PTR_ERR(priv->phydev);
+       ret = phy_connect_direct(dev->net, priv->phydev, &asix_adjust_link,
+                                PHY_INTERFACE_MODE_INTERNAL);
+       if (ret) {
+               netdev_err(dev->net, "Could not connect PHY\n");
                return ret;
        }
 
@@ -698,13 +679,29 @@ static int ax88772_init_phy(struct usbnet *dev)
 
        phy_attached_info(priv->phydev);
 
+       if (priv->embd_phy)
+               return 0;
+
+       /* In case main PHY is not the embedded PHY and MAC is RMII clock
+        * provider, we need to suspend embedded PHY by keeping PLL enabled
+        * (AX_SWRESET_IPPD == 0).
+        */
+       priv->phydev_int = mdiobus_get_phy(priv->mdio, AX_EMBD_PHY_ADDR);
+       if (!priv->phydev_int) {
+               netdev_err(dev->net, "Could not find internal PHY\n");
+               return -ENODEV;
+       }
+
+       priv->phydev_int->mac_managed_pm = 1;
+       phy_suspend(priv->phydev_int);
+
        return 0;
 }
 
 static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
 {
-       u8 buf[ETH_ALEN] = {0}, chipcode = 0;
        struct asix_common_private *priv;
+       u8 buf[ETH_ALEN] = {0};
        int ret, i;
 
        priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
@@ -753,19 +750,25 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
                return ret;
 
        priv->phy_addr = ret;
-       priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10);
+       priv->embd_phy = ((priv->phy_addr & 0x1f) == AX_EMBD_PHY_ADDR);
 
-       ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
+       ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1,
+                           &priv->chipcode, 0);
        if (ret < 0) {
                netdev_dbg(dev->net, "Failed to read STATMNGSTS_REG: %d\n", ret);
                return ret;
        }
 
-       chipcode &= AX_CHIPCODE_MASK;
+       priv->chipcode &= AX_CHIPCODE_MASK;
 
-       ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
-                                                 ax88772a_hw_reset(dev, 0);
+       priv->resume = ax88772_resume;
+       priv->suspend = ax88772_suspend;
+       if (priv->chipcode == AX_AX88772_CHIPCODE)
+               priv->reset = ax88772_hw_reset;
+       else
+               priv->reset = ax88772a_hw_reset;
 
+       ret = priv->reset(dev, 0);
        if (ret < 0) {
                netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
                return ret;
@@ -780,13 +783,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
 
        priv->presvd_phy_bmcr = 0;
        priv->presvd_phy_advertise = 0;
-       if (chipcode == AX_AX88772_CHIPCODE) {
-               priv->resume = ax88772_resume;
-               priv->suspend = ax88772_suspend;
-       } else {
-               priv->resume = ax88772a_resume;
-               priv->suspend = ax88772_suspend;
-       }
 
        ret = ax88772_init_mdio(dev);
        if (ret)
index 5567220..4ef61f6 100644 (file)
@@ -86,9 +86,10 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
        ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
                 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
                 0, index, &buf, 4);
-       if (unlikely(ret < 0)) {
-               netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
-                           index, ret);
+       if (ret < 0) {
+               if (ret != -ENODEV)
+                       netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+                                   index, ret);
                return ret;
        }
 
@@ -118,7 +119,7 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
        ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
                 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
                 0, index, &buf, 4);
-       if (unlikely(ret < 0))
+       if (ret < 0 && ret != -ENODEV)
                netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
                            index, ret);
 
@@ -161,6 +162,9 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
        do {
                ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm);
                if (ret < 0) {
+                       /* Ignore -ENODEV error during disconnect() */
+                       if (ret == -ENODEV)
+                               return 0;
                        netdev_warn(dev->net, "Error reading MII_ACCESS\n");
                        return ret;
                }
@@ -196,7 +200,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
        addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_);
        ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
        if (ret < 0) {
-               netdev_warn(dev->net, "Error writing MII_ADDR\n");
+               if (ret != -ENODEV)
+                       netdev_warn(dev->net, "Error writing MII_ADDR\n");
                goto done;
        }
 
@@ -208,7 +213,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
 
        ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm);
        if (ret < 0) {
-               netdev_warn(dev->net, "Error reading MII_DATA\n");
+               if (ret != -ENODEV)
+                       netdev_warn(dev->net, "Error reading MII_DATA\n");
                goto done;
        }
 
@@ -216,6 +222,10 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
 
 done:
        mutex_unlock(&dev->phy_mutex);
+
+       /* Ignore -ENODEV error during disconnect() */
+       if (ret == -ENODEV)
+               return 0;
        return ret;
 }
 
@@ -237,7 +247,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
        val = regval;
        ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm);
        if (ret < 0) {
-               netdev_warn(dev->net, "Error writing MII_DATA\n");
+               if (ret != -ENODEV)
+                       netdev_warn(dev->net, "Error writing MII_DATA\n");
                goto done;
        }
 
@@ -245,7 +256,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
        addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_);
        ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
        if (ret < 0) {
-               netdev_warn(dev->net, "Error writing MII_ADDR\n");
+               if (ret != -ENODEV)
+                       netdev_warn(dev->net, "Error writing MII_ADDR\n");
                goto done;
        }
 
index 714cafc..85e3624 100644 (file)
@@ -472,14 +472,13 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
 
        memset(&fl6, 0, sizeof(fl6));
        /* needed to match OIF rule */
-       fl6.flowi6_oif = dev->ifindex;
+       fl6.flowi6_l3mdev = dev->ifindex;
        fl6.flowi6_iif = LOOPBACK_IFINDEX;
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
        fl6.flowi6_mark = skb->mark;
        fl6.flowi6_proto = iph->nexthdr;
-       fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
 
        dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
        if (IS_ERR(dst) || dst == dst_null)
@@ -551,10 +550,10 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
 
        memset(&fl4, 0, sizeof(fl4));
        /* needed to match OIF rule */
-       fl4.flowi4_oif = vrf_dev->ifindex;
+       fl4.flowi4_l3mdev = vrf_dev->ifindex;
        fl4.flowi4_iif = LOOPBACK_IFINDEX;
        fl4.flowi4_tos = RT_TOS(ip4h->tos);
-       fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
+       fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
        fl4.flowi4_proto = ip4h->protocol;
        fl4.daddr = ip4h->daddr;
        fl4.saddr = ip4h->saddr;
index 4ab09dd..de97ff9 100644 (file)
@@ -811,37 +811,35 @@ static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
                goto err_inval;
        }
 
-       if (nh) {
-               if (!nexthop_get(nh)) {
-                       NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
-                       nh = NULL;
-                       goto err_inval;
-               }
-               if (!nexthop_is_fdb(nh)) {
-                       NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop");
-                       goto err_inval;
-               }
+       if (!nexthop_get(nh)) {
+               NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+               nh = NULL;
+               goto err_inval;
+       }
+       if (!nexthop_is_fdb(nh)) {
+               NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop");
+               goto err_inval;
+       }
 
-               if (!nexthop_is_multipath(nh)) {
-                       NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group");
+       if (!nexthop_is_multipath(nh)) {
+               NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group");
+               goto err_inval;
+       }
+
+       /* check nexthop group family */
+       switch (vxlan->default_dst.remote_ip.sa.sa_family) {
+       case AF_INET:
+               if (!nexthop_has_v4(nh)) {
+                       err = -EAFNOSUPPORT;
+                       NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
                        goto err_inval;
                }
-
-               /* check nexthop group family */
-               switch (vxlan->default_dst.remote_ip.sa.sa_family) {
-               case AF_INET:
-                       if (!nexthop_has_v4(nh)) {
-                               err = -EAFNOSUPPORT;
-                               NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
-                               goto err_inval;
-                       }
-                       break;
-               case AF_INET6:
-                       if (nexthop_has_v4(nh)) {
-                               err = -EAFNOSUPPORT;
-                               NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
-                               goto err_inval;
-                       }
+               break;
+       case AF_INET6:
+               if (nexthop_has_v4(nh)) {
+                       err = -EAFNOSUPPORT;
+                       NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
+                       goto err_inval;
                }
        }
 
@@ -1762,7 +1760,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
 
        if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
                rcu_read_unlock();
-               atomic_long_inc(&vxlan->dev->rx_dropped);
+               dev_core_stats_rx_dropped_inc(vxlan->dev);
                vxlan_vnifilter_count(vxlan, vni, vninode,
                                      VXLAN_VNI_STATS_RX_DROPS, 0);
                goto drop;
@@ -1877,7 +1875,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
                reply->ip_summed = CHECKSUM_UNNECESSARY;
                reply->pkt_type = PACKET_HOST;
 
-               if (netif_rx_ni(reply) == NET_RX_DROP) {
+               if (netif_rx(reply) == NET_RX_DROP) {
                        dev->stats.rx_dropped++;
                        vxlan_vnifilter_count(vxlan, vni, NULL,
                                              VXLAN_VNI_STATS_RX_DROPS, 0);
@@ -2036,7 +2034,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
                if (reply == NULL)
                        goto out;
 
-               if (netif_rx_ni(reply) == NET_RX_DROP) {
+               if (netif_rx(reply) == NET_RX_DROP) {
                        dev->stats.rx_dropped++;
                        vxlan_vnifilter_count(vxlan, vni, NULL,
                                              VXLAN_VNI_STATS_RX_DROPS, 0);
@@ -2992,7 +2990,6 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
 static int vxlan_stop(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       int ret = 0;
 
        vxlan_multicast_leave(vxlan);
 
@@ -3001,7 +2998,7 @@ static int vxlan_stop(struct net_device *dev)
        vxlan_flush(vxlan, false);
        vxlan_sock_release(vxlan);
 
-       return ret;
+       return 0;
 }
 
 /* Stub, nothing needs to be done. */
index 141c1b5..9cabd34 100644 (file)
@@ -104,7 +104,7 @@ static void ar5523_cmd_rx_cb(struct urb *urb)
        }
 
        if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
-               ar5523_err(ar, "RX USB to short.\n");
+               ar5523_err(ar, "RX USB too short.\n");
                goto skip;
        }
 
index 9ad64ca..771252d 100644 (file)
@@ -429,7 +429,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                                RX_MSDU_END_INFO0_LAST_MSDU;
 
                /* FIXME: why are we skipping the first part of the rx_desc? */
-               trace_ath10k_htt_rx_desc(ar, rx_desc + sizeof(u32),
+               trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
                                         hw->rx_desc_ops->rx_desc_size - sizeof(u32));
 
                if (last_msdu)
index 681e1ab..8328966 100644 (file)
@@ -1551,11 +1551,11 @@ static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
        node = of_parse_phandle(dev->of_node, "memory-region", 0);
        if (node) {
                ret = of_address_to_resource(node, 0, &r);
+               of_node_put(node);
                if (ret) {
                        dev_err(dev, "failed to resolve msa fixed region\n");
                        return ret;
                }
-               of_node_put(node);
 
                ar->msa.paddr = r.start;
                ar->msa.mem_size = resource_size(&r);
index 25e0ad3..b4733b5 100644 (file)
@@ -17,7 +17,7 @@ struct ath10k_fw_file;
 struct ath10k_swap_code_seg_tlv {
        __le32 address;
        __le32 length;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 struct ath10k_swap_code_seg_tail {
index 2b78ed8..cd438f7 100644 (file)
@@ -2611,36 +2611,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
                ath10k_mac_handle_beacon(ar, skb);
 
        if (ieee80211_is_beacon(hdr->frame_control) ||
-           ieee80211_is_probe_resp(hdr->frame_control)) {
-               struct ieee80211_mgmt *mgmt = (void *)skb->data;
-               enum cfg80211_bss_frame_type ftype;
-               u8 *ies;
-               int ies_ch;
-
+           ieee80211_is_probe_resp(hdr->frame_control))
                status->boottime_ns = ktime_get_boottime_ns();
 
-               if (!ar->scan_channel)
-                       goto drop;
-
-               ies = mgmt->u.beacon.variable;
-
-               if (ieee80211_is_beacon(mgmt->frame_control))
-                       ftype = CFG80211_BSS_FTYPE_BEACON;
-               else
-                       ftype = CFG80211_BSS_FTYPE_PRESP;
-
-               ies_ch = cfg80211_get_ies_channel_number(mgmt->u.beacon.variable,
-                                                        skb_tail_pointer(skb) - ies,
-                                                        sband->band, ftype);
-
-               if (ies_ch > 0 && ies_ch != channel) {
-                       ath10k_dbg(ar, ATH10K_DBG_MGMT,
-                                  "channel mismatched ds channel %d scan channel %d\n",
-                                  ies_ch, channel);
-                       goto drop;
-               }
-       }
-
        ath10k_dbg(ar, ATH10K_DBG_MGMT,
                   "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
                   skb, skb->len,
@@ -2654,10 +2627,6 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
        ieee80211_rx_ni(ar->hw, skb);
 
        return 0;
-
-drop:
-       dev_kfree_skb(skb);
-       return 0;
 }
 
 static int freq_to_idx(struct ath10k *ar, int freq)
index 3fb0aa0..f407d4a 100644 (file)
@@ -391,6 +391,8 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
 
                for (j = 0; j < irq_grp->num_irq; j++)
                        free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+               netif_napi_del(&irq_grp->napi);
        }
 }
 
@@ -466,7 +468,7 @@ static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
+static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
 {
        struct ath11k_hw_params *hw = &ab->hw_params;
        int i, j;
@@ -574,7 +576,7 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
        }
 
        /* Configure external interrupts */
-       ret = ath11k_ahb_ext_irq_config(ab);
+       ret = ath11k_ahb_config_ext_irq(ab);
 
        return ret;
 }
index 8255b6c..9644ff9 100644 (file)
@@ -145,7 +145,7 @@ struct ath11k_ce_ring {
        u32 hal_ring_id;
 
        /* keep last */
-       struct sk_buff *skb[0];
+       struct sk_buff *skb[];
 };
 
 struct ath11k_ce_pipe {
index 7c508e9..71eb7d0 100644 (file)
@@ -99,6 +99,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .supports_rssi_stats = false,
                .fw_wmi_diag_event = false,
                .current_cc_support = false,
+               .dbr_debug_support = true,
        },
        {
                .hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -164,6 +165,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .supports_rssi_stats = false,
                .fw_wmi_diag_event = false,
                .current_cc_support = false,
+               .dbr_debug_support = true,
        },
        {
                .name = "qca6390 hw2.0",
@@ -228,6 +230,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .supports_rssi_stats = true,
                .fw_wmi_diag_event = true,
                .current_cc_support = true,
+               .dbr_debug_support = false,
        },
        {
                .name = "qcn9074 hw1.0",
@@ -292,6 +295,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .supports_rssi_stats = false,
                .fw_wmi_diag_event = false,
                .current_cc_support = false,
+               .dbr_debug_support = true,
        },
        {
                .name = "wcn6855 hw2.0",
@@ -356,6 +360,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .supports_rssi_stats = true,
                .fw_wmi_diag_event = true,
                .current_cc_support = true,
+               .dbr_debug_support = false,
        },
        {
                .name = "wcn6855 hw2.1",
@@ -419,6 +424,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .supports_rssi_stats = true,
                .fw_wmi_diag_event = true,
                .current_cc_support = true,
+               .dbr_debug_support = false,
        },
 };
 
@@ -1411,7 +1417,6 @@ EXPORT_SYMBOL(ath11k_core_deinit);
 
 void ath11k_core_free(struct ath11k_base *ab)
 {
-       flush_workqueue(ab->workqueue);
        destroy_workqueue(ab->workqueue);
 
        kfree(ab);
index 4eda15c..c0228e9 100644 (file)
@@ -263,6 +263,9 @@ struct ath11k_vif {
        bool bcca_zero_sent;
        bool do_not_send_tmpl;
        struct ieee80211_chanctx_conf chanctx;
+#ifdef CONFIG_ATH11K_DEBUGFS
+       struct dentry *debugfs_twt;
+#endif /* CONFIG_ATH11K_DEBUGFS */
 };
 
 struct ath11k_vif_iter {
@@ -441,6 +444,8 @@ struct ath11k_dbg_htt_stats {
        spinlock_t lock;
 };
 
+#define MAX_MODULE_ID_BITMAP_WORDS     16
+
 struct ath11k_debug {
        struct dentry *debugfs_pdev;
        struct ath11k_dbg_htt_stats htt_stats;
@@ -454,6 +459,9 @@ struct ath11k_debug {
        u32 pktlog_peer_valid;
        u8 pktlog_peer_addr[ETH_ALEN];
        u32 rx_filter;
+       u32 mem_offset;
+       u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS];
+       struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX];
 };
 
 struct ath11k_per_peer_tx_stats {
@@ -604,6 +612,7 @@ struct ath11k {
        bool pending_11d;
        bool regdom_set_by_user;
        int hw_rate_code;
+       u8 twt_enabled;
 };
 
 struct ath11k_band_cap {
@@ -807,7 +816,7 @@ struct ath11k_base {
        } id;
 
        /* must be last */
-       u8 drv_priv[0] __aligned(sizeof(void *));
+       u8 drv_priv[] __aligned(sizeof(void *));
 };
 
 struct ath11k_fw_stats_pdev {
index eda67eb..2107ec0 100644 (file)
@@ -37,7 +37,8 @@ static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
 
 static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
                                        struct ath11k_dbring *ring,
-                                       struct ath11k_dbring_element *buff)
+                                       struct ath11k_dbring_element *buff,
+                                       enum wmi_direct_buffer_module id)
 {
        struct ath11k_base *ab = ar->ab;
        struct hal_srng *srng;
@@ -84,6 +85,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
 
        ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
 
+       ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
        ath11k_hal_srng_access_end(ab, srng);
 
        return 0;
@@ -101,7 +103,8 @@ err:
 }
 
 static int ath11k_dbring_fill_bufs(struct ath11k *ar,
-                                  struct ath11k_dbring *ring)
+                                  struct ath11k_dbring *ring,
+                                  enum wmi_direct_buffer_module id)
 {
        struct ath11k_dbring_element *buff;
        struct hal_srng *srng;
@@ -129,7 +132,7 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
                        kfree(buff);
                        break;
                }
-               ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
+               ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
                if (ret) {
                        ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
                                    num_remain, req_entries);
@@ -210,7 +213,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
        ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
        ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
 
-       ret = ath11k_dbring_fill_bufs(ar, ring);
+       ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
 
        return ret;
 }
@@ -270,7 +273,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
        struct ath11k_buffer_addr desc;
        u8 *vaddr_unalign;
        u32 num_entry, num_buff_reaped;
-       u8 pdev_idx, rbm;
+       u8 pdev_idx, rbm, module_id;
        u32 cookie;
        int buf_id;
        int size;
@@ -278,6 +281,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
        int ret = 0;
 
        pdev_idx = ev->fixed.pdev_id;
+       module_id = ev->fixed.module_id;
 
        if (pdev_idx >= ab->num_radios) {
                ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
@@ -346,6 +350,9 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
                dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
                                 DMA_FROM_DEVICE);
 
+               ath11k_debugfs_add_dbring_entry(ar, module_id,
+                                               ATH11K_DBG_DBR_EVENT_RX, srng);
+
                if (ring->handler) {
                        vaddr_unalign = buff->payload;
                        handler_data.data = PTR_ALIGN(vaddr_unalign,
@@ -357,7 +364,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
 
                buff->paddr = 0;
                memset(buff->payload, 0, size);
-               ath11k_dbring_bufs_replenish(ar, ring, buff);
+               ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
        }
 
        spin_unlock_bh(&srng->lock);
index 215b601..a82266c 100644 (file)
@@ -52,6 +52,45 @@ static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
        "MONITOR_DEST_RING",
 };
 
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+                                    enum wmi_direct_buffer_module id,
+                                    enum ath11k_dbg_dbr_event event,
+                                    struct hal_srng *srng)
+{
+       struct ath11k_debug_dbr *dbr_debug;
+       struct ath11k_dbg_dbr_data *dbr_data;
+       struct ath11k_dbg_dbr_entry *entry;
+
+       if (id >= WMI_DIRECT_BUF_MAX || event >= ATH11K_DBG_DBR_EVENT_MAX)
+               return;
+
+       dbr_debug = ar->debug.dbr_debug[id];
+       if (!dbr_debug)
+               return;
+
+       if (!dbr_debug->dbr_debug_enabled)
+               return;
+
+       dbr_data = &dbr_debug->dbr_dbg_data;
+
+       spin_lock_bh(&dbr_data->lock);
+
+       if (dbr_data->entries) {
+               entry = &dbr_data->entries[dbr_data->dbr_debug_idx];
+               entry->hp = srng->u.src_ring.hp;
+               entry->tp = *srng->u.src_ring.tp_addr;
+               entry->timestamp = jiffies;
+               entry->event = event;
+
+               dbr_data->dbr_debug_idx++;
+               if (dbr_data->dbr_debug_idx ==
+                   dbr_data->num_ring_debug_entries)
+                       dbr_data->dbr_debug_idx = 0;
+       }
+
+       spin_unlock_bh(&dbr_data->lock);
+}
+
 static void ath11k_fw_stats_pdevs_free(struct list_head *head)
 {
        struct ath11k_fw_stats_pdev *i, *tmp;
@@ -876,6 +915,69 @@ static const struct file_operations fops_soc_dp_stats = {
        .llseek = default_llseek,
 };
 
+static ssize_t ath11k_write_fw_dbglog(struct file *file,
+                                     const char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct ath11k *ar = file->private_data;
+       char buf[128] = {0};
+       struct ath11k_fw_dbglog dbglog;
+       unsigned int param, mod_id_index, is_end;
+       u64 value;
+       int ret, num;
+
+       ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+                                    user_buf, count);
+       if (ret <= 0)
+               return ret;
+
+       num = sscanf(buf, "%u %llx %u %u", &param, &value, &mod_id_index, &is_end);
+
+       if (num < 2)
+               return -EINVAL;
+
+       mutex_lock(&ar->conf_mutex);
+       if (param == WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP ||
+           param == WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP) {
+               if (num != 4 || mod_id_index > (MAX_MODULE_ID_BITMAP_WORDS - 1)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               ar->debug.module_id_bitmap[mod_id_index] = upper_32_bits(value);
+               if (!is_end) {
+                       ret = count;
+                       goto out;
+               }
+       } else {
+               if (num != 2) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       dbglog.param = param;
+       dbglog.value = lower_32_bits(value);
+       ret = ath11k_wmi_fw_dbglog_cfg(ar, ar->debug.module_id_bitmap, &dbglog);
+       if (ret) {
+               ath11k_warn(ar->ab, "fw dbglog config failed from debugfs: %d\n",
+                           ret);
+               goto out;
+       }
+
+       ret = count;
+
+out:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static const struct file_operations fops_fw_dbglog = {
+       .write = ath11k_write_fw_dbglog,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
 {
        if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
@@ -1113,6 +1215,169 @@ static const struct file_operations fops_simulate_radar = {
        .open = simple_open
 };
 
+static ssize_t ath11k_debug_dump_dbr_entries(struct file *file,
+                                            char __user *user_buf,
+                                            size_t count, loff_t *ppos)
+{
+       struct ath11k_dbg_dbr_data *dbr_dbg_data = file->private_data;
+       static const char * const event_id_to_string[] = {"empty", "Rx", "Replenish"};
+       int size = ATH11K_DEBUG_DBR_ENTRIES_MAX * 100;
+       char *buf;
+       int i, ret;
+       int len = 0;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       len += scnprintf(buf + len, size - len,
+                        "-----------------------------------------\n");
+       len += scnprintf(buf + len, size - len,
+                        "| idx |  hp  |  tp  | timestamp |  event |\n");
+       len += scnprintf(buf + len, size - len,
+                        "-----------------------------------------\n");
+
+       spin_lock_bh(&dbr_dbg_data->lock);
+
+       for (i = 0; i < dbr_dbg_data->num_ring_debug_entries; i++) {
+               len += scnprintf(buf + len, size - len,
+                                "|%4u|%8u|%8u|%11llu|%8s|\n", i,
+                                dbr_dbg_data->entries[i].hp,
+                                dbr_dbg_data->entries[i].tp,
+                                dbr_dbg_data->entries[i].timestamp,
+                                event_id_to_string[dbr_dbg_data->entries[i].event]);
+       }
+
+       spin_unlock_bh(&dbr_dbg_data->lock);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return ret;
+}
+
+static const struct file_operations fops_debug_dump_dbr_entries = {
+       .read = ath11k_debug_dump_dbr_entries,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static void ath11k_debugfs_dbr_dbg_destroy(struct ath11k *ar, int dbr_id)
+{
+       struct ath11k_debug_dbr *dbr_debug;
+       struct ath11k_dbg_dbr_data *dbr_dbg_data;
+
+       if (!ar->debug.dbr_debug[dbr_id])
+               return;
+
+       dbr_debug = ar->debug.dbr_debug[dbr_id];
+       dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+       debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+       kfree(dbr_dbg_data->entries);
+       kfree(dbr_debug);
+       ar->debug.dbr_debug[dbr_id] = NULL;
+}
+
+static int ath11k_debugfs_dbr_dbg_init(struct ath11k *ar, int dbr_id)
+{
+       struct ath11k_debug_dbr *dbr_debug;
+       struct ath11k_dbg_dbr_data *dbr_dbg_data;
+       static const char * const dbr_id_to_str[] = {"spectral", "CFR"};
+
+       if (ar->debug.dbr_debug[dbr_id])
+               return 0;
+
+       ar->debug.dbr_debug[dbr_id] = kzalloc(sizeof(*dbr_debug),
+                                             GFP_KERNEL);
+
+       if (!ar->debug.dbr_debug[dbr_id])
+               return -ENOMEM;
+
+       dbr_debug = ar->debug.dbr_debug[dbr_id];
+       dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+       if (dbr_debug->dbr_debugfs)
+               return 0;
+
+       dbr_debug->dbr_debugfs = debugfs_create_dir(dbr_id_to_str[dbr_id],
+                                                   ar->debug.debugfs_pdev);
+       if (IS_ERR_OR_NULL(dbr_debug->dbr_debugfs)) {
+               if (IS_ERR(dbr_debug->dbr_debugfs))
+                       return PTR_ERR(dbr_debug->dbr_debugfs);
+               return -ENOMEM;
+       }
+
+       dbr_debug->dbr_debug_enabled = true;
+       dbr_dbg_data->num_ring_debug_entries = ATH11K_DEBUG_DBR_ENTRIES_MAX;
+       dbr_dbg_data->dbr_debug_idx = 0;
+       dbr_dbg_data->entries = kcalloc(ATH11K_DEBUG_DBR_ENTRIES_MAX,
+                                       sizeof(struct ath11k_dbg_dbr_entry),
+                                       GFP_KERNEL);
+       if (!dbr_dbg_data->entries)
+               return -ENOMEM;
+
+       spin_lock_init(&dbr_dbg_data->lock);
+
+       debugfs_create_file("dump_dbr_debug", 0444, dbr_debug->dbr_debugfs,
+                           dbr_dbg_data, &fops_debug_dump_dbr_entries);
+
+       return 0;
+}
+
+static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file,
+                                                  const char __user *ubuf,
+                                                  size_t count, loff_t *ppos)
+{
+       struct ath11k *ar = file->private_data;
+       char buf[32] = {0};
+       u32 dbr_id, enable;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (ar->state != ATH11K_STATE_ON) {
+               ret = -ENETDOWN;
+               goto out;
+       }
+
+       ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+       if (ret < 0)
+               goto out;
+
+       buf[ret] = '\0';
+       ret = sscanf(buf, "%u %u", &dbr_id, &enable);
+       if (ret != 2 || dbr_id > 1 || enable > 1) {
+               ret = -EINVAL;
+               ath11k_warn(ar->ab, "usage: echo <dbr_id> <val> dbr_id:0-Spectral 1-CFR val:0-disable 1-enable\n");
+               goto out;
+       }
+
+       if (enable) {
+               ret = ath11k_debugfs_dbr_dbg_init(ar, dbr_id);
+               if (ret) {
+                       ath11k_warn(ar->ab, "db ring module debugfs init failed: %d\n",
+                                   ret);
+                       goto out;
+               }
+       } else {
+               ath11k_debugfs_dbr_dbg_destroy(ar, dbr_id);
+       }
+
+       ret = count;
+out:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static const struct file_operations fops_dbr_debug = {
+       .write = ath11k_debugfs_write_enable_dbr_dbg,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 int ath11k_debugfs_register(struct ath11k *ar)
 {
        struct ath11k_base *ab = ar->ab;
@@ -1142,6 +1407,9 @@ int ath11k_debugfs_register(struct ath11k *ar)
        debugfs_create_file("pktlog_filter", 0644,
                            ar->debug.debugfs_pdev, ar,
                            &fops_pktlog_filter);
+       debugfs_create_file("fw_dbglog_config", 0600,
+                           ar->debug.debugfs_pdev, ar,
+                           &fops_fw_dbglog);
 
        if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
                debugfs_create_file("dfs_simulate_radar", 0200,
@@ -1152,9 +1420,250 @@ int ath11k_debugfs_register(struct ath11k *ar)
                                    &ar->dfs_block_radar_events);
        }
 
+       if (ab->hw_params.dbr_debug_support)
+               debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev,
+                                   ar, &fops_dbr_debug);
+
        return 0;
 }
 
 void ath11k_debugfs_unregister(struct ath11k *ar)
 {
+       struct ath11k_debug_dbr *dbr_debug;
+       struct ath11k_dbg_dbr_data *dbr_dbg_data;
+       int i;
+
+       for (i = 0; i < WMI_DIRECT_BUF_MAX; i++) {
+               dbr_debug = ar->debug.dbr_debug[i];
+               if (!dbr_debug)
+                       continue;
+
+               dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+               kfree(dbr_dbg_data->entries);
+               debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+               kfree(dbr_debug);
+               ar->debug.dbr_debug[i] = NULL;
+       }
+}
+
+static ssize_t ath11k_write_twt_add_dialog(struct file *file,
+                                          const char __user *ubuf,
+                                          size_t count, loff_t *ppos)
+{
+       struct ath11k_vif *arvif = file->private_data;
+       struct wmi_twt_add_dialog_params params = { 0 };
+       u8 buf[128] = {0};
+       int ret;
+
+       if (arvif->ar->twt_enabled == 0) {
+               ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+       if (ret < 0)
+               return ret;
+
+       buf[ret] = '\0';
+       ret = sscanf(buf,
+                    "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u %u %u %hhu %hhu %hhu %hhu %hhu",
+                    &params.peer_macaddr[0],
+                    &params.peer_macaddr[1],
+                    &params.peer_macaddr[2],
+                    &params.peer_macaddr[3],
+                    &params.peer_macaddr[4],
+                    &params.peer_macaddr[5],
+                    &params.dialog_id,
+                    &params.wake_intvl_us,
+                    &params.wake_intvl_mantis,
+                    &params.wake_dura_us,
+                    &params.sp_offset_us,
+                    &params.twt_cmd,
+                    &params.flag_bcast,
+                    &params.flag_trigger,
+                    &params.flag_flow_type,
+                    &params.flag_protection);
+       if (ret != 16)
+               return -EINVAL;
+
+       params.vdev_id = arvif->vdev_id;
+
+       ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, &params);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static ssize_t ath11k_write_twt_del_dialog(struct file *file,
+                                          const char __user *ubuf,
+                                          size_t count, loff_t *ppos)
+{
+       struct ath11k_vif *arvif = file->private_data;
+       struct wmi_twt_del_dialog_params params = { 0 };
+       u8 buf[64] = {0};
+       int ret;
+
+       if (arvif->ar->twt_enabled == 0) {
+               ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+       if (ret < 0)
+               return ret;
+
+       buf[ret] = '\0';
+       ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+                    &params.peer_macaddr[0],
+                    &params.peer_macaddr[1],
+                    &params.peer_macaddr[2],
+                    &params.peer_macaddr[3],
+                    &params.peer_macaddr[4],
+                    &params.peer_macaddr[5],
+                    &params.dialog_id);
+       if (ret != 7)
+               return -EINVAL;
+
+       params.vdev_id = arvif->vdev_id;
+
+       ret = ath11k_wmi_send_twt_del_dialog_cmd(arvif->ar, &params);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static ssize_t ath11k_write_twt_pause_dialog(struct file *file,
+                                            const char __user *ubuf,
+                                            size_t count, loff_t *ppos)
+{
+       struct ath11k_vif *arvif = file->private_data;
+       struct wmi_twt_pause_dialog_params params = { 0 };
+       u8 buf[64] = {0};
+       int ret;
+
+       if (arvif->ar->twt_enabled == 0) {
+               ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+       if (ret < 0)
+               return ret;
+
+       buf[ret] = '\0';
+       ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+                    &params.peer_macaddr[0],
+                    &params.peer_macaddr[1],
+                    &params.peer_macaddr[2],
+                    &params.peer_macaddr[3],
+                    &params.peer_macaddr[4],
+                    &params.peer_macaddr[5],
+                    &params.dialog_id);
+       if (ret != 7)
+               return -EINVAL;
+
+       params.vdev_id = arvif->vdev_id;
+
+       ret = ath11k_wmi_send_twt_pause_dialog_cmd(arvif->ar, &params);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static ssize_t ath11k_write_twt_resume_dialog(struct file *file,
+                                             const char __user *ubuf,
+                                             size_t count, loff_t *ppos)
+{
+       struct ath11k_vif *arvif = file->private_data;
+       struct wmi_twt_resume_dialog_params params = { 0 };
+       u8 buf[64] = {0};
+       int ret;
+
+       if (arvif->ar->twt_enabled == 0) {
+               ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+       if (ret < 0)
+               return ret;
+
+       buf[ret] = '\0';
+       ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u",
+                    &params.peer_macaddr[0],
+                    &params.peer_macaddr[1],
+                    &params.peer_macaddr[2],
+                    &params.peer_macaddr[3],
+                    &params.peer_macaddr[4],
+                    &params.peer_macaddr[5],
+                    &params.dialog_id,
+                    &params.sp_offset_us,
+                    &params.next_twt_size);
+       if (ret != 9)
+               return -EINVAL;
+
+       params.vdev_id = arvif->vdev_id;
+
+       ret = ath11k_wmi_send_twt_resume_dialog_cmd(arvif->ar, &params);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static const struct file_operations ath11k_fops_twt_add_dialog = {
+       .write = ath11k_write_twt_add_dialog,
+       .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_del_dialog = {
+       .write = ath11k_write_twt_del_dialog,
+       .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_pause_dialog = {
+       .write = ath11k_write_twt_pause_dialog,
+       .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_resume_dialog = {
+       .write = ath11k_write_twt_resume_dialog,
+       .open = simple_open
+};
+
+int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+{
+       if (arvif->vif->type == NL80211_IFTYPE_AP && !arvif->debugfs_twt) {
+               arvif->debugfs_twt = debugfs_create_dir("twt",
+                                                       arvif->vif->debugfs_dir);
+               if (!arvif->debugfs_twt || IS_ERR(arvif->debugfs_twt)) {
+                       ath11k_warn(arvif->ar->ab,
+                                   "failed to create directory %p\n",
+                                   arvif->debugfs_twt);
+                       arvif->debugfs_twt = NULL;
+                       return -1;
+               }
+
+               debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+                                   arvif, &ath11k_fops_twt_add_dialog);
+
+               debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+                                   arvif, &ath11k_fops_twt_del_dialog);
+
+               debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+                                   arvif, &ath11k_fops_twt_pause_dialog);
+
+               debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+                                   arvif, &ath11k_fops_twt_resume_dialog);
+       }
+       return 0;
+}
+
+void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+{
+       debugfs_remove_recursive(arvif->debugfs_twt);
+       arvif->debugfs_twt = NULL;
 }
index 4c07403..30c00cb 100644 (file)
@@ -47,6 +47,36 @@ enum ath11k_dbg_htt_ext_stats_type {
        ATH11K_DBG_HTT_NUM_EXT_STATS,
 };
 
+#define ATH11K_DEBUG_DBR_ENTRIES_MAX 512
+
+enum ath11k_dbg_dbr_event {
+       ATH11K_DBG_DBR_EVENT_INVALID,
+       ATH11K_DBG_DBR_EVENT_RX,
+       ATH11K_DBG_DBR_EVENT_REPLENISH,
+       ATH11K_DBG_DBR_EVENT_MAX,
+};
+
+struct ath11k_dbg_dbr_entry {
+       u32 hp;
+       u32 tp;
+       u64 timestamp;
+       enum ath11k_dbg_dbr_event event;
+};
+
+struct ath11k_dbg_dbr_data {
+       /* protects ath11k_db_ring_debug data */
+       spinlock_t lock;
+       struct ath11k_dbg_dbr_entry *entries;
+       u32 dbr_debug_idx;
+       u32 num_ring_debug_entries;
+};
+
+struct ath11k_debug_dbr {
+       struct ath11k_dbg_dbr_data dbr_dbg_data;
+       struct dentry *dbr_debugfs;
+       bool dbr_debug_enabled;
+};
+
 struct debug_htt_stats_req {
        bool done;
        u8 pdev_id;
@@ -88,6 +118,7 @@ enum ath11k_pktlog_mode {
 };
 
 enum ath11k_pktlog_enum {
+       ATH11K_PKTLOG_TYPE_INVALID      = 0,
        ATH11K_PKTLOG_TYPE_TX_CTRL      = 1,
        ATH11K_PKTLOG_TYPE_TX_STAT      = 2,
        ATH11K_PKTLOG_TYPE_TX_MSDU_ID   = 3,
@@ -107,6 +138,130 @@ enum ath11k_dbg_aggr_mode {
        ATH11K_DBG_AGGR_MODE_MAX,
 };
 
+enum fw_dbglog_wlan_module_id {
+       WLAN_MODULE_ID_MIN = 0,
+       WLAN_MODULE_INF = WLAN_MODULE_ID_MIN,
+       WLAN_MODULE_WMI,
+       WLAN_MODULE_STA_PWRSAVE,
+       WLAN_MODULE_WHAL,
+       WLAN_MODULE_COEX,
+       WLAN_MODULE_ROAM,
+       WLAN_MODULE_RESMGR_CHAN_MANAGER,
+       WLAN_MODULE_RESMGR,
+       WLAN_MODULE_VDEV_MGR,
+       WLAN_MODULE_SCAN,
+       WLAN_MODULE_RATECTRL,
+       WLAN_MODULE_AP_PWRSAVE,
+       WLAN_MODULE_BLOCKACK,
+       WLAN_MODULE_MGMT_TXRX,
+       WLAN_MODULE_DATA_TXRX,
+       WLAN_MODULE_HTT,
+       WLAN_MODULE_HOST,
+       WLAN_MODULE_BEACON,
+       WLAN_MODULE_OFFLOAD,
+       WLAN_MODULE_WAL,
+       WLAN_WAL_MODULE_DE,
+       WLAN_MODULE_PCIELP,
+       WLAN_MODULE_RTT,
+       WLAN_MODULE_RESOURCE,
+       WLAN_MODULE_DCS,
+       WLAN_MODULE_CACHEMGR,
+       WLAN_MODULE_ANI,
+       WLAN_MODULE_P2P,
+       WLAN_MODULE_CSA,
+       WLAN_MODULE_NLO,
+       WLAN_MODULE_CHATTER,
+       WLAN_MODULE_WOW,
+       WLAN_MODULE_WAL_VDEV,
+       WLAN_MODULE_WAL_PDEV,
+       WLAN_MODULE_TEST,
+       WLAN_MODULE_STA_SMPS,
+       WLAN_MODULE_SWBMISS,
+       WLAN_MODULE_WMMAC,
+       WLAN_MODULE_TDLS,
+       WLAN_MODULE_HB,
+       WLAN_MODULE_TXBF,
+       WLAN_MODULE_BATCH_SCAN,
+       WLAN_MODULE_THERMAL_MGR,
+       WLAN_MODULE_PHYERR_DFS,
+       WLAN_MODULE_RMC,
+       WLAN_MODULE_STATS,
+       WLAN_MODULE_NAN,
+       WLAN_MODULE_IBSS_PWRSAVE,
+       WLAN_MODULE_HIF_UART,
+       WLAN_MODULE_LPI,
+       WLAN_MODULE_EXTSCAN,
+       WLAN_MODULE_UNIT_TEST,
+       WLAN_MODULE_MLME,
+       WLAN_MODULE_SUPPL,
+       WLAN_MODULE_ERE,
+       WLAN_MODULE_OCB,
+       WLAN_MODULE_RSSI_MONITOR,
+       WLAN_MODULE_WPM,
+       WLAN_MODULE_CSS,
+       WLAN_MODULE_PPS,
+       WLAN_MODULE_SCAN_CH_PREDICT,
+       WLAN_MODULE_MAWC,
+       WLAN_MODULE_CMC_QMIC,
+       WLAN_MODULE_EGAP,
+       WLAN_MODULE_NAN20,
+       WLAN_MODULE_QBOOST,
+       WLAN_MODULE_P2P_LISTEN_OFFLOAD,
+       WLAN_MODULE_HALPHY,
+       WLAN_WAL_MODULE_ENQ,
+       WLAN_MODULE_GNSS,
+       WLAN_MODULE_WAL_MEM,
+       WLAN_MODULE_SCHED_ALGO,
+       WLAN_MODULE_TX,
+       WLAN_MODULE_RX,
+       WLAN_MODULE_WLM,
+       WLAN_MODULE_RU_ALLOCATOR,
+       WLAN_MODULE_11K_OFFLOAD,
+       WLAN_MODULE_STA_TWT,
+       WLAN_MODULE_AP_TWT,
+       WLAN_MODULE_UL_OFDMA,
+       WLAN_MODULE_HPCS_PULSE,
+       WLAN_MODULE_DTF,
+       WLAN_MODULE_QUIET_IE,
+       WLAN_MODULE_SHMEM_MGR,
+       WLAN_MODULE_CFIR,
+       WLAN_MODULE_CODE_COVER,
+       WLAN_MODULE_SHO,
+       WLAN_MODULE_MLO_MGR,
+       WLAN_MODULE_PEER_INIT,
+       WLAN_MODULE_STA_MLO_PS,
+
+       WLAN_MODULE_ID_MAX,
+       WLAN_MODULE_ID_INVALID = WLAN_MODULE_ID_MAX,
+};
+
+enum fw_dbglog_log_level {
+       ATH11K_FW_DBGLOG_ML = 0,
+       ATH11K_FW_DBGLOG_VERBOSE = 0,
+       ATH11K_FW_DBGLOG_INFO,
+       ATH11K_FW_DBGLOG_INFO_LVL_1,
+       ATH11K_FW_DBGLOG_INFO_LVL_2,
+       ATH11K_FW_DBGLOG_WARN,
+       ATH11K_FW_DBGLOG_ERR,
+       ATH11K_FW_DBGLOG_LVL_MAX
+};
+
+struct ath11k_fw_dbglog {
+       enum wmi_debug_log_param param;
+       union {
+               struct {
+                       /* log_level values are given in enum fw_dbglog_log_level */
+                       u16 log_level;
+                       /* module_id values are given in  enum fw_dbglog_wlan_module_id */
+                       u16 module_id;
+               };
+               /* value is either log_level&module_id/vdev_id/vdev_id_bitmap/log_level
+                * according to param
+                */
+               u32 value;
+       };
+};
+
 #ifdef CONFIG_ATH11K_DEBUGFS
 int ath11k_debugfs_soc_create(struct ath11k_base *ab);
 void ath11k_debugfs_soc_destroy(struct ath11k_base *ab);
@@ -151,6 +306,13 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
        return ar->debug.rx_filter;
 }
 
+int ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+                                    enum wmi_direct_buffer_module id,
+                                    enum ath11k_dbg_dbr_event event,
+                                    struct hal_srng *srng);
+
 #else
 static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab)
 {
@@ -224,6 +386,22 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
        return 0;
 }
 
-#endif /* CONFIG_MAC80211_DEBUGFS*/
+static inline int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+{
+       return 0;
+}
+
+static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+{
+}
+
+static inline void
+ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+                               enum wmi_direct_buffer_module id,
+                               enum ath11k_dbg_dbr_event event,
+                               struct hal_srng *srng)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUGFS*/
 
 #endif /* _ATH11K_DEBUGFS_H_ */
index 409d6cc..e9dfa20 100644 (file)
@@ -115,6 +115,8 @@ struct ath11k_pdev_mon_stats {
        u32 dest_mpdu_drop;
        u32 dup_mon_linkdesc_cnt;
        u32 dup_mon_buf_cnt;
+       u32 dest_mon_stuck;
+       u32 dest_mon_not_reaped;
 };
 
 struct dp_full_mon_mpdu {
@@ -167,6 +169,7 @@ struct ath11k_mon_data {
 
 struct ath11k_pdev_dp {
        u32 mac_id;
+       u32 mon_dest_ring_stuck_cnt;
        atomic_t num_tx_pending;
        wait_queue_head_t tx_empty_waitq;
        struct dp_rxdma_ring rx_refill_buf_ring;
@@ -1170,12 +1173,12 @@ struct ath11k_htt_ppdu_stats_msg {
        u32 ppdu_id;
        u32 timestamp;
        u32 rsvd;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 struct htt_tlv {
        u32 header;
-       u8 value[0];
+       u8 value[];
 } __packed;
 
 #define HTT_TLV_TAG                    GENMASK(11, 0)
@@ -1362,7 +1365,7 @@ struct htt_ppdu_stats_usr_cmn_array {
         * tx_ppdu_stats_info is variable length, with length =
         *     number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
         */
-       struct htt_tx_ppdu_stats_info tx_ppdu_info[0];
+       struct htt_tx_ppdu_stats_info tx_ppdu_info[];
 } __packed;
 
 struct htt_ppdu_user_stats {
@@ -1424,7 +1427,7 @@ struct htt_ppdu_stats_info {
  */
 struct htt_pktlog_msg {
        u32 hdr;
-       u8 payload[0];
+       u8 payload[];
 };
 
 /**
@@ -1645,7 +1648,7 @@ struct ath11k_htt_extd_stats_msg {
        u32 info0;
        u64 cookie;
        u32 info1;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 #define        HTT_MAC_ADDR_L32_0      GENMASK(7, 0)
index 89b77b9..049774c 100644 (file)
@@ -2652,9 +2652,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
 
        spin_lock_bh(&srng->lock);
 
+try_again:
        ath11k_hal_srng_access_begin(ab, srng);
 
-try_again:
        while (likely(desc =
              (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
                                                                             srng))) {
@@ -4807,7 +4807,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
 {
        struct ath11k_base *ab = ar->ab;
        struct sk_buff *msdu, *prev_buf;
-       u32 wifi_hdr_len;
        struct hal_rx_desc *rx_desc;
        char *hdr_desc;
        u8 *dest, decap_format;
@@ -4849,38 +4848,27 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
 
                skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
        } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
-               __le16 qos_field;
                u8 qos_pkt = 0;
 
                rx_desc = (struct hal_rx_desc *)head_msdu->data;
                hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
 
                /* Base size */
-               wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
                wh = (struct ieee80211_hdr_3addr *)hdr_desc;
 
-               if (ieee80211_is_data_qos(wh->frame_control)) {
-                       struct ieee80211_qos_hdr *qwh =
-                                       (struct ieee80211_qos_hdr *)hdr_desc;
-
-                       qos_field = qwh->qos_ctrl;
+               if (ieee80211_is_data_qos(wh->frame_control))
                        qos_pkt = 1;
-               }
+
                msdu = head_msdu;
 
                while (msdu) {
-                       rx_desc = (struct hal_rx_desc *)msdu->data;
-                       hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
-
+                       ath11k_dp_rx_msdus_set_payload(ar, msdu);
                        if (qos_pkt) {
                                dest = skb_push(msdu, sizeof(__le16));
                                if (!dest)
                                        goto err_merge_fail;
-                               memcpy(dest, hdr_desc, wifi_hdr_len);
-                               memcpy(dest + wifi_hdr_len,
-                                      (u8 *)&qos_field, sizeof(__le16));
+                               memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
                        }
-                       ath11k_dp_rx_msdus_set_payload(ar, msdu);
                        prev_buf = msdu;
                        msdu = msdu->next;
                }
@@ -4904,8 +4892,98 @@ err_merge_fail:
        return NULL;
 }
 
+static void
+ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
+                               u8 *rtap_buf)
+{
+       u32 rtap_len = 0;
+
+       put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
+       rtap_len += 2;
+
+       put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
+       rtap_len += 2;
+
+       put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
+       rtap_len += 2;
+
+       put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
+       rtap_len += 2;
+
+       put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
+       rtap_len += 2;
+
+       put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
+}
+
+static void
+ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
+                                  u8 *rtap_buf)
+{
+       u32 rtap_len = 0;
+
+       put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
+       rtap_len += 2;
+
+       put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
+       rtap_len += 2;
+
+       rtap_buf[rtap_len] = rx_status->he_RU[0];
+       rtap_len += 1;
+
+       rtap_buf[rtap_len] = rx_status->he_RU[1];
+       rtap_len += 1;
+
+       rtap_buf[rtap_len] = rx_status->he_RU[2];
+       rtap_len += 1;
+
+       rtap_buf[rtap_len] = rx_status->he_RU[3];
+}
+
+static void ath11k_update_radiotap(struct ath11k *ar,
+                                  struct hal_rx_mon_ppdu_info *ppduinfo,
+                                  struct sk_buff *mon_skb,
+                                  struct ieee80211_rx_status *rxs)
+{
+       struct ieee80211_supported_band *sband;
+       u8 *ptr = NULL;
+
+       rxs->flag |= RX_FLAG_MACTIME_START;
+       rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
+
+       if (ppduinfo->nss)
+               rxs->nss = ppduinfo->nss;
+
+       if (ppduinfo->he_mu_flags) {
+               rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
+               rxs->encoding = RX_ENC_HE;
+               ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
+               ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
+       } else if (ppduinfo->he_flags) {
+               rxs->flag |= RX_FLAG_RADIOTAP_HE;
+               rxs->encoding = RX_ENC_HE;
+               ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
+               ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
+               rxs->rate_idx = ppduinfo->rate;
+       } else if (ppduinfo->vht_flags) {
+               rxs->encoding = RX_ENC_VHT;
+               rxs->rate_idx = ppduinfo->rate;
+       } else if (ppduinfo->ht_flags) {
+               rxs->encoding = RX_ENC_HT;
+               rxs->rate_idx = ppduinfo->rate;
+       } else {
+               rxs->encoding = RX_ENC_LEGACY;
+               sband = &ar->mac.sbands[rxs->band];
+               rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
+                                                         ppduinfo->cck_flag);
+       }
+
+       rxs->mactime = ppduinfo->tsft;
+}
+
 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
                                    struct sk_buff *head_msdu,
+                                   struct hal_rx_mon_ppdu_info *ppduinfo,
                                    struct sk_buff *tail_msdu,
                                    struct napi_struct *napi)
 {
@@ -4940,7 +5018,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
                } else {
                        rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
                }
-               rxs->flag |= RX_FLAG_ONLY_MONITOR;
+               ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
 
                ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
                mon_skb = skb_next;
@@ -4959,6 +5037,12 @@ mon_deliver_fail:
        return -EINVAL;
 }
 
+/* The destination ring processing is stuck if the destination is not
+ * moving while status ring moves 16 PPDU. The destination ring processing
+ * skips this destination ring PPDU as a workaround.
+ */
+#define MON_DEST_RING_STUCK_MAX_CNT 16
+
 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
                                          u32 quota, struct napi_struct *napi)
 {
@@ -4972,6 +5056,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
        u32 ring_id;
        struct ath11k_pdev_mon_stats *rx_mon_stats;
        u32      npackets = 0;
+       u32 mpdu_rx_bufs_used;
 
        if (ar->ab->hw_params.rxdma1_enable)
                ring_id = dp->rxdma_mon_dst_ring.ring_id;
@@ -5001,20 +5086,44 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
                head_msdu = NULL;
                tail_msdu = NULL;
 
-               rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
-                                                         &head_msdu,
-                                                         &tail_msdu,
-                                                         &npackets, &ppdu_id);
+               mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
+                                                             &head_msdu,
+                                                             &tail_msdu,
+                                                             &npackets, &ppdu_id);
+
+               rx_bufs_used += mpdu_rx_bufs_used;
+
+               if (mpdu_rx_bufs_used) {
+                       dp->mon_dest_ring_stuck_cnt = 0;
+               } else {
+                       dp->mon_dest_ring_stuck_cnt++;
+                       rx_mon_stats->dest_mon_not_reaped++;
+               }
+
+               if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
+                       rx_mon_stats->dest_mon_stuck++;
+                       ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+                                  "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
+                                  pmon->mon_ppdu_info.ppdu_id, ppdu_id,
+                                  dp->mon_dest_ring_stuck_cnt,
+                                  rx_mon_stats->dest_mon_not_reaped,
+                                  rx_mon_stats->dest_mon_stuck);
+                       pmon->mon_ppdu_info.ppdu_id = ppdu_id;
+                       continue;
+               }
 
                if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
                        pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
                        ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
-                                  "dest_rx: new ppdu_id %x != status ppdu_id %x",
-                                  ppdu_id, pmon->mon_ppdu_info.ppdu_id);
+                                  "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
+                                  ppdu_id, pmon->mon_ppdu_info.ppdu_id,
+                                  rx_mon_stats->dest_mon_not_reaped,
+                                  rx_mon_stats->dest_mon_stuck);
                        break;
                }
                if (head_msdu && tail_msdu) {
                        ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
+                                                &pmon->mon_ppdu_info,
                                                 tail_msdu, napi);
                        rx_mon_stats->dest_mpdu_done++;
                }
@@ -5054,7 +5163,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
        struct ath11k_sta *arsta;
        int num_buffs_reaped = 0;
        u32 rx_buf_sz;
-       u16 log_type = 0;
+       u16 log_type;
        struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
        struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
        struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
@@ -5076,11 +5185,15 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
                } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
                        log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
                        rx_buf_sz = DP_RX_BUFFER_SIZE;
+               } else {
+                       log_type = ATH11K_PKTLOG_TYPE_INVALID;
+                       rx_buf_sz = 0;
                }
 
-               if (log_type)
+               if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
                        trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
 
+               memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
                hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
 
                if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
@@ -5341,6 +5454,7 @@ static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
                tail_msdu = mon_mpdu->tail;
                if (head_msdu && tail_msdu) {
                        ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
+                                                      &pmon->mon_ppdu_info,
                                                       tail_msdu, napi);
                        rx_mon_stats->dest_mpdu_done++;
                        ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
index 6d19547..00a4581 100644 (file)
@@ -427,7 +427,7 @@ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
        struct ath11k_sta *arsta;
        struct ieee80211_sta *sta;
        u16 rate, ru_tones;
-       u8 mcs, rate_idx, ofdma;
+       u8 mcs, rate_idx = 0, ofdma;
        int ret;
 
        spin_lock_bh(&ab->base_lock);
@@ -519,9 +519,13 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
                                       struct sk_buff *msdu,
                                       struct hal_tx_status *ts)
 {
+       struct ieee80211_tx_status status = { 0 };
        struct ath11k_base *ab = ar->ab;
        struct ieee80211_tx_info *info;
        struct ath11k_skb_cb *skb_cb;
+       struct ath11k_peer *peer;
+       struct ath11k_sta *arsta;
+       struct rate_info rate;
 
        if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
                /* Must not happen */
@@ -584,12 +588,26 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
                ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
        }
 
-       /* NOTE: Tx rate status reporting. Tx completion status does not have
-        * necessary information (for example nss) to build the tx rate.
-        * Might end up reporting it out-of-band from HTT stats.
-        */
+       spin_lock_bh(&ab->base_lock);
+       peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+       if (!peer || !peer->sta) {
+               ath11k_dbg(ab, ATH11K_DBG_DATA,
+                          "dp_tx: failed to find the peer with peer_id %d\n",
+                           ts->peer_id);
+               spin_unlock_bh(&ab->base_lock);
+               dev_kfree_skb_any(msdu);
+               return;
+       }
+       arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+       status.sta = peer->sta;
+       status.skb = msdu;
+       status.info = info;
+       rate = arsta->last_txrate;
+       status.rate = &rate;
 
-       ieee80211_tx_status(ar->hw, msdu);
+       spin_unlock_bh(&ab->base_lock);
+
+       ieee80211_tx_status_ext(ar->hw, &status);
 }
 
 static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
index 4067676..24e72e7 100644 (file)
@@ -474,6 +474,7 @@ enum hal_tlv_tag {
 
 #define HAL_TLV_HDR_TAG                GENMASK(9, 1)
 #define HAL_TLV_HDR_LEN                GENMASK(25, 10)
+#define HAL_TLV_USR_ID         GENMASK(31, 26)
 
 #define HAL_TLV_ALIGN  4
 
index a3b353a..4bb1fba 100644 (file)
@@ -453,10 +453,12 @@ void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
                             desc->info0));
        ath11k_dbg(ab, ATH11k_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
                   desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
-       ath11k_dbg(ab, ATH11k_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+       ath11k_dbg(ab, ATH11k_DBG_HAL,
+                  "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
                   desc->last_rx_enqueue_timestamp,
                   desc->last_rx_dequeue_timestamp);
-       ath11k_dbg(ab, ATH11k_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+       ath11k_dbg(ab, ATH11k_DBG_HAL,
+                  "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
                   desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
                   desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
                   desc->rx_bitmap[6], desc->rx_bitmap[7]);
@@ -802,12 +804,75 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
        }
 }
 
+#define HAL_MAX_UL_MU_USERS    37
+static inline void
+ath11k_hal_rx_handle_ofdma_info(void *rx_tlv,
+                               struct hal_rx_user_status *rx_user_status)
+{
+       struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+               (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+       rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6);
+
+       rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->rsvd2[10]);
+}
+
+static inline void
+ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
+                                 struct hal_rx_user_status *rx_user_status)
+{
+       struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+               (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+       rx_user_status->mpdu_ok_byte_count =
+               FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT,
+                         __le32_to_cpu(ppdu_end_user->rsvd2[6]));
+       rx_user_status->mpdu_err_byte_count =
+               FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT,
+                         __le32_to_cpu(ppdu_end_user->rsvd2[8]));
+}
+
+static inline void
+ath11k_hal_rx_populate_mu_user_info(void *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info,
+                                   struct hal_rx_user_status *rx_user_status)
+{
+       rx_user_status->ast_index = ppdu_info->ast_index;
+       rx_user_status->tid = ppdu_info->tid;
+       rx_user_status->tcp_msdu_count =
+               ppdu_info->tcp_msdu_count;
+       rx_user_status->udp_msdu_count =
+               ppdu_info->udp_msdu_count;
+       rx_user_status->other_msdu_count =
+               ppdu_info->other_msdu_count;
+       rx_user_status->frame_control = ppdu_info->frame_control;
+       rx_user_status->frame_control_info_valid =
+               ppdu_info->frame_control_info_valid;
+       rx_user_status->data_sequence_control_info_valid =
+               ppdu_info->data_sequence_control_info_valid;
+       rx_user_status->first_data_seq_ctrl =
+               ppdu_info->first_data_seq_ctrl;
+       rx_user_status->preamble_type = ppdu_info->preamble_type;
+       rx_user_status->ht_flags = ppdu_info->ht_flags;
+       rx_user_status->vht_flags = ppdu_info->vht_flags;
+       rx_user_status->he_flags = ppdu_info->he_flags;
+       rx_user_status->rs_flags = ppdu_info->rs_flags;
+
+       rx_user_status->mpdu_cnt_fcs_ok =
+               ppdu_info->num_mpdu_fcs_ok;
+       rx_user_status->mpdu_cnt_fcs_err =
+               ppdu_info->num_mpdu_fcs_err;
+
+       ath11k_hal_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
+}
+
 static enum hal_rx_mon_status
 ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
                                   struct hal_rx_mon_ppdu_info *ppdu_info,
-                                  u32 tlv_tag, u8 *tlv_data)
+                                  u32 tlv_tag, u8 *tlv_data, u32 userid)
 {
-       u32 info0, info1;
+       u32 info0, info1, value;
+       u8 he_dcm = 0, he_stbc = 0;
+       u16 he_gi = 0, he_ltf = 0;
 
        switch (tlv_tag) {
        case HAL_RX_PPDU_START: {
@@ -828,6 +893,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
                info0 = __le32_to_cpu(eu_stats->info0);
                info1 = __le32_to_cpu(eu_stats->info1);
 
+               ppdu_info->ast_index =
+                       FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX,
+                                 __le32_to_cpu(eu_stats->info2));
                ppdu_info->tid =
                        ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
                                      __le32_to_cpu(eu_stats->info6))) - 1;
@@ -851,6 +919,44 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
                ppdu_info->num_mpdu_fcs_err =
                        FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
                                  info0);
+               switch (ppdu_info->preamble_type) {
+               case HAL_RX_PREAMBLE_11N:
+                       ppdu_info->ht_flags = 1;
+                       break;
+               case HAL_RX_PREAMBLE_11AC:
+                       ppdu_info->vht_flags = 1;
+                       break;
+               case HAL_RX_PREAMBLE_11AX:
+                       ppdu_info->he_flags = 1;
+                       break;
+               default:
+                       break;
+               }
+
+               if (userid < HAL_MAX_UL_MU_USERS) {
+                       struct hal_rx_user_status *rxuser_stats =
+                               &ppdu_info->userstats;
+
+                       ath11k_hal_rx_handle_ofdma_info(tlv_data, rxuser_stats);
+                       ath11k_hal_rx_populate_mu_user_info(tlv_data, ppdu_info,
+                                                           rxuser_stats);
+               }
+               ppdu_info->userstats.mpdu_fcs_ok_bitmap[0] =
+                                       __le32_to_cpu(eu_stats->rsvd1[0]);
+               ppdu_info->userstats.mpdu_fcs_ok_bitmap[1] =
+                                       __le32_to_cpu(eu_stats->rsvd1[1]);
+
+               break;
+       }
+       case HAL_RX_PPDU_END_USER_STATS_EXT: {
+               struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
+                       (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+               ppdu_info->userstats.mpdu_fcs_ok_bitmap[2] = eu_stats->info1;
+               ppdu_info->userstats.mpdu_fcs_ok_bitmap[3] = eu_stats->info2;
+               ppdu_info->userstats.mpdu_fcs_ok_bitmap[4] = eu_stats->info3;
+               ppdu_info->userstats.mpdu_fcs_ok_bitmap[5] = eu_stats->info4;
+               ppdu_info->userstats.mpdu_fcs_ok_bitmap[6] = eu_stats->info5;
+               ppdu_info->userstats.mpdu_fcs_ok_bitmap[7] = eu_stats->info6;
                break;
        }
        case HAL_PHYRX_HT_SIG: {
@@ -949,50 +1055,151 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
                else
                        ppdu_info->reception_type =
                                HAL_RX_RECEPTION_TYPE_MU_MIMO;
+               ppdu_info->vht_flag_values5 = group_id;
+               ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
+                                                  ppdu_info->nss);
+               ppdu_info->vht_flag_values2 = ppdu_info->bw;
+               ppdu_info->vht_flag_values4 =
+                       FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING, info1);
                break;
        }
        case HAL_PHYRX_HE_SIG_A_SU: {
                struct hal_rx_he_sig_a_su_info *he_sig_a =
                        (struct hal_rx_he_sig_a_su_info *)tlv_data;
-               u32 nsts, cp_ltf, dcm;
 
+               ppdu_info->he_flags = 1;
                info0 = __le32_to_cpu(he_sig_a->info0);
                info1 = __le32_to_cpu(he_sig_a->info1);
 
-               ppdu_info->mcs =
-                       FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS,
-                                 info0);
-               ppdu_info->bw =
-                       FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW,
-                                 info0);
-               ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info0);
-               ppdu_info->is_stbc = info1 &
-                                    HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC;
-               ppdu_info->beamformed = info1 &
-                                       HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF;
-               dcm = info0 & HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM;
-               cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE,
-                                  info0);
-               nsts = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND, info0);
 
-               switch (cp_ltf) {
+               if (value == 0)
+                       ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG;
+               else
+                       ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU;
+
+               ppdu_info->he_data1 |=
+                       IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+               ppdu_info->he_data2 |=
+                       IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR, info0);
+               ppdu_info->he_data3 =
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE, info0);
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE, value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG, info0);
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS, info0);
+               ppdu_info->mcs = value;
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, value);
+
+               he_dcm = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM, info0);
+               ppdu_info->dcm = he_dcm;
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, he_dcm);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info1);
+               ppdu_info->ldpc = (value == HAL_RX_SU_MU_CODING_LDPC) ? 1 : 0;
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA, info1);
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+               he_stbc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC, info1);
+               ppdu_info->is_stbc = he_stbc;
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, he_stbc);
+
+               /* data4 */
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE, info0);
+               ppdu_info->he_data4 =
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+               /* data5 */
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW, info0);
+               ppdu_info->bw = value;
+               ppdu_info->he_data5 =
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE, info0);
+               switch (value) {
                case 0:
+                               he_gi = HE_GI_0_8;
+                               he_ltf = HE_LTF_1_X;
+                               break;
                case 1:
-                       ppdu_info->gi = HAL_RX_GI_0_8_US;
-                       break;
+                               he_gi = HE_GI_0_8;
+                               he_ltf = HE_LTF_2_X;
+                               break;
                case 2:
-                       ppdu_info->gi = HAL_RX_GI_1_6_US;
-                       break;
+                               he_gi = HE_GI_1_6;
+                               he_ltf = HE_LTF_2_X;
+                               break;
                case 3:
-                       if (dcm && ppdu_info->is_stbc)
-                               ppdu_info->gi = HAL_RX_GI_0_8_US;
-                       else
-                               ppdu_info->gi = HAL_RX_GI_3_2_US;
-                       break;
+                               if (he_dcm && he_stbc) {
+                                       he_gi = HE_GI_0_8;
+                                       he_ltf = HE_LTF_4_X;
+                               } else {
+                                       he_gi = HE_GI_3_2;
+                                       he_ltf = HE_LTF_4_X;
+                               }
+                               break;
                }
+               ppdu_info->gi = he_gi;
+               he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+               ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+               ppdu_info->ltf_size = he_ltf;
+               ppdu_info->he_data5 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+                                  (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+               ppdu_info->he_data5 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR, info1);
+               ppdu_info->he_data5 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF, info1);
+               ppdu_info->beamformed = value;
+               ppdu_info->he_data5 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_TXBF, value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM, info1);
+               ppdu_info->he_data5 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+               /* data6 */
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+               value++;
+               ppdu_info->nss = value;
+               ppdu_info->he_data6 =
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_NSTS, value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND, info1);
+               ppdu_info->he_data6 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION, info1);
+               ppdu_info->he_data6 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
 
-               ppdu_info->nss = nsts + 1;
-               ppdu_info->dcm = dcm;
                ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
                break;
        }
@@ -1000,29 +1207,142 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
                struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
                        (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
 
-               u32 cp_ltf;
-
                info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
                info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
 
-               ppdu_info->bw =
-                       FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW,
-                                 info0);
-               cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE,
-                                  info0);
-
-               switch (cp_ltf) {
+               ppdu_info->he_mu_flags = 1;
+
+               ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU;
+               ppdu_info->he_data1 |=
+                       IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+               ppdu_info->he_data2 =
+                       IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+               /*data3*/
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR, info0);
+               ppdu_info->he_data3 =
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG, info0);
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA, info1);
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC, info1);
+               he_stbc = value;
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, value);
+
+               /*data4*/
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE, info0);
+               ppdu_info->he_data4 =
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+               /*data5*/
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+               ppdu_info->bw = value;
+               ppdu_info->he_data5 =
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE, info0);
+               switch (value) {
                case 0:
+                       he_gi = HE_GI_0_8;
+                       he_ltf = HE_LTF_4_X;
+                       break;
                case 1:
-                       ppdu_info->gi = HAL_RX_GI_0_8_US;
+                       he_gi = HE_GI_0_8;
+                       he_ltf = HE_LTF_2_X;
                        break;
                case 2:
-                       ppdu_info->gi = HAL_RX_GI_1_6_US;
+                       he_gi = HE_GI_1_6;
+                       he_ltf = HE_LTF_2_X;
                        break;
                case 3:
-                       ppdu_info->gi = HAL_RX_GI_3_2_US;
+                       he_gi = HE_GI_3_2;
+                       he_ltf = HE_LTF_4_X;
                        break;
                }
+               ppdu_info->gi = he_gi;
+               he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+               ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+               ppdu_info->ltf_size = he_ltf;
+               ppdu_info->he_data5 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+                                  (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB, info1);
+               ppdu_info->he_data5 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR,
+                                 info1);
+               ppdu_info->he_data5 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM,
+                                 info1);
+               ppdu_info->he_data5 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+               /*data6*/
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION,
+                                 info0);
+               ppdu_info->he_data6 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION, info1);
+               ppdu_info->he_data6 |=
+                               FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
+
+               /* HE-MU Flags */
+               /* HE-MU-flags1 */
+               ppdu_info->he_flags1 =
+                       IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+                       IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+                       IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN |
+                       IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+                       IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN;
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB, info0);
+               ppdu_info->he_flags1 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN,
+                                  value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB, info0);
+               ppdu_info->he_flags1 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN,
+                                  value);
+
+               /* HE-MU-flags2 */
+               ppdu_info->he_flags2 =
+                       IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN;
+
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+               ppdu_info->he_flags2 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW,
+                                  value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB, info0);
+               ppdu_info->he_flags2 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP, value);
+               value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB, info0);
+               value = value - 1;
+               ppdu_info->he_flags2 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS,
+                                  value);
 
                ppdu_info->is_stbc = info1 &
                                     HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
@@ -1040,7 +1360,7 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
                                     info0);
                ppdu_info->ru_alloc =
                        ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
-
+               ppdu_info->he_RU[0] = ru_tones;
                ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
                break;
        }
@@ -1050,14 +1370,25 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
 
                info0 = __le32_to_cpu(he_sig_b2_mu->info0);
 
+               ppdu_info->he_data1 |= IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+                                      IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
                ppdu_info->mcs =
-                       FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS,
-                                 info0);
+                       FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS, info0);
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING, info0);
+               ppdu_info->ldpc = value;
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID, info0);
+               ppdu_info->he_data4 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
                ppdu_info->nss =
-                       FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS,
-                                 info0) + 1;
-               ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING,
-                                           info0);
+                       FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS, info0) + 1;
                break;
        }
        case HAL_PHYRX_HE_SIG_B2_OFDMA: {
@@ -1066,17 +1397,40 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
 
                info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
 
+               ppdu_info->he_data1 |=
+                       IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+                       IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
+               /* HE-data2 */
+               ppdu_info->he_data2 |= IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN;
+
                ppdu_info->mcs =
                        FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
                                  info0);
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM, info0);
+               he_dcm = value;
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, value);
+
+               value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING, info0);
+               ppdu_info->ldpc = value;
+               ppdu_info->he_data3 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+               /* HE-data4 */
+               value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID, info0);
+               ppdu_info->he_data4 |=
+                       FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
                ppdu_info->nss =
                        FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
                                  info0) + 1;
                ppdu_info->beamformed =
-                       info0 &
-                       HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
-               ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING,
-                                           info0);
+                       info0 & HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
                ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
                break;
        }
@@ -1118,6 +1472,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
                ppdu_info->rx_duration =
                        FIELD_GET(HAL_RX_PPDU_END_DURATION,
                                  __le32_to_cpu(ppdu_rx_duration->info0));
+               ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
+               ppdu_info->tsft = (ppdu_info->tsft << 32) |
+                                       __le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
                break;
        }
        case HAL_DUMMY:
@@ -1141,12 +1498,14 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
        enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
        u16 tlv_tag;
        u16 tlv_len;
+       u32 tlv_userid = 0;
        u8 *ptr = skb->data;
 
        do {
                tlv = (struct hal_tlv_hdr *)ptr;
                tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
                tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+               tlv_userid = FIELD_GET(HAL_TLV_USR_ID, tlv->tl);
                ptr += sizeof(*tlv);
 
                /* The actual length of PPDU_END is the combined length of many PHY
@@ -1158,7 +1517,7 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
                        tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
 
                hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
-                                                               tlv_tag, ptr);
+                                                               tlv_tag, ptr, tlv_userid);
                ptr += tlv_len;
                ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
 
index 7bba4f0..f6bae07 100644 (file)
@@ -73,6 +73,36 @@ enum hal_rx_mon_status {
        HAL_RX_MON_STATUS_BUF_DONE,
 };
 
+struct hal_rx_user_status {
+       u32 mcs:4,
+       nss:3,
+       ofdma_info_valid:1,
+       dl_ofdma_ru_start_index:7,
+       dl_ofdma_ru_width:7,
+       dl_ofdma_ru_size:8;
+       u32 ul_ofdma_user_v0_word0;
+       u32 ul_ofdma_user_v0_word1;
+       u32 ast_index;
+       u32 tid;
+       u16 tcp_msdu_count;
+       u16 udp_msdu_count;
+       u16 other_msdu_count;
+       u16 frame_control;
+       u8 frame_control_info_valid;
+       u8 data_sequence_control_info_valid;
+       u16 first_data_seq_ctrl;
+       u32 preamble_type;
+       u16 ht_flags;
+       u16 vht_flags;
+       u16 he_flags;
+       u8 rs_flags;
+       u32 mpdu_cnt_fcs_ok;
+       u32 mpdu_cnt_fcs_err;
+       u32 mpdu_fcs_ok_bitmap[8];
+       u32 mpdu_ok_byte_count;
+       u32 mpdu_err_byte_count;
+};
+
 #define HAL_TLV_STATUS_PPDU_NOT_DONE    HAL_RX_MON_STATUS_PPDU_NOT_DONE
 #define HAL_TLV_STATUS_PPDU_DONE        HAL_RX_MON_STATUS_PPDU_DONE
 #define HAL_TLV_STATUS_BUF_DONE         HAL_RX_MON_STATUS_BUF_DONE
@@ -107,6 +137,12 @@ struct hal_rx_mon_ppdu_info {
        u8 mcs;
        u8 nss;
        u8 bw;
+       u8 vht_flag_values1;
+       u8 vht_flag_values2;
+       u8 vht_flag_values3[4];
+       u8 vht_flag_values4;
+       u8 vht_flag_values5;
+       u16 vht_flag_values6;
        u8 is_stbc;
        u8 gi;
        u8 ldpc;
@@ -114,10 +150,46 @@ struct hal_rx_mon_ppdu_info {
        u8 rssi_comb;
        u8 rssi_chain_pri20[HAL_RX_MAX_NSS];
        u8 tid;
+       u16 ht_flags;
+       u16 vht_flags;
+       u16 he_flags;
+       u16 he_mu_flags;
        u8 dcm;
        u8 ru_alloc;
        u8 reception_type;
+       u64 tsft;
        u64 rx_duration;
+       u16 frame_control;
+       u32 ast_index;
+       u8 rs_fcs_err;
+       u8 rs_flags;
+       u8 cck_flag;
+       u8 ofdm_flag;
+       u8 ulofdma_flag;
+       u8 frame_control_info_valid;
+       u16 he_per_user_1;
+       u16 he_per_user_2;
+       u8 he_per_user_position;
+       u8 he_per_user_known;
+       u16 he_flags1;
+       u16 he_flags2;
+       u8 he_RU[4];
+       u16 he_data1;
+       u16 he_data2;
+       u16 he_data3;
+       u16 he_data4;
+       u16 he_data5;
+       u16 he_data6;
+       u32 ppdu_len;
+       u32 prev_ppdu_id;
+       u32 device_id;
+       u16 first_data_seq_ctrl;
+       u8 monitor_direct_used;
+       u8 data_sequence_control_info_valid;
+       u8 ltf_size;
+       u8 rxpcu_filter_pass;
+       char rssi_chain[8][8];
+       struct hal_rx_user_status userstats;
 };
 
 #define HAL_RX_PPDU_START_INFO0_PPDU_ID                GENMASK(15, 0)
@@ -150,6 +222,9 @@ struct hal_rx_ppdu_start {
 #define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP            GENMASK(15, 0)
 #define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP       GENMASK(31, 16)
 
+#define HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT  GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0)
+
 struct hal_rx_ppdu_end_user_stats {
        __le32 rsvd0[2];
        __le32 info0;
@@ -164,6 +239,16 @@ struct hal_rx_ppdu_end_user_stats {
        __le32 rsvd2[11];
 } __packed;
 
+struct hal_rx_ppdu_end_user_stats_ext {
+       u32 info0;
+       u32 info1;
+       u32 info2;
+       u32 info3;
+       u32 info4;
+       u32 info5;
+       u32 info6;
+} __packed;
+
 #define HAL_RX_HT_SIG_INFO_INFO0_MCS           GENMASK(6, 0)
 #define HAL_RX_HT_SIG_INFO_INFO0_BW            BIT(7)
 
@@ -212,25 +297,62 @@ enum hal_rx_vht_sig_a_gi_setting {
        HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
 };
 
+#define HAL_RX_SU_MU_CODING_LDPC 0x01
+
+#define HE_GI_0_8 0
+#define HE_GI_0_4 1
+#define HE_GI_1_6 2
+#define HE_GI_3_2 3
+
+#define HE_LTF_1_X 0
+#define HE_LTF_2_X 1
+#define HE_LTF_4_X 2
+#define HE_LTF_UNKNOWN 3
+
 #define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS     GENMASK(6, 3)
 #define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM              BIT(7)
 #define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW      GENMASK(20, 19)
 #define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE      GENMASK(22, 21)
 #define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS             GENMASK(25, 23)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR                GENMASK(13, 8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE    GENMASK(18, 15)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND       BIT(0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE      BIT(1)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG       BIT(2)
 
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION    GENMASK(6, 0)
 #define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING           BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA       BIT(8)
 #define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC             BIT(9)
 #define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF             BIT(10)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR   GENMASK(12, 11)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND      BIT(15)
 
 struct hal_rx_he_sig_a_su_info {
        __le32 info0;
        __le32 info1;
 } __packed;
 
-#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW   GENMASK(17, 15)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE   GENMASK(24, 23)
-
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG               BIT(1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB           GENMASK(3, 1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB           BIT(4)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR             GENMASK(10, 5)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE GENMASK(14, 11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW           GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB GENMASK(21, 18)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB        BIT(22)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE           GENMASK(24, 23)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION    BIT(25)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_CODING                BIT(7)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB  GENMASK(10, 8)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA            BIT(11)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC          BIT(12)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXBF          BIT(10)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR        GENMASK(14, 13)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM      BIT(15)
 
 struct hal_rx_he_sig_a_mu_dl_info {
        __le32 info0;
@@ -243,6 +365,7 @@ struct hal_rx_he_sig_b1_mu_info {
        __le32 info0;
 } __packed;
 
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID          GENMASK(10, 0)
 #define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS         GENMASK(18, 15)
 #define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING      BIT(20)
 #define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS                GENMASK(31, 29)
@@ -251,6 +374,7 @@ struct hal_rx_he_sig_b2_mu_info {
        __le32 info0;
 } __packed;
 
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID       GENMASK(10, 0)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS     GENMASK(13, 11)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF     BIT(19)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS      GENMASK(18, 15)
@@ -279,11 +403,14 @@ struct hal_rx_phyrx_rssi_legacy_info {
 
 #define HAL_RX_MPDU_INFO_INFO0_PEERID  GENMASK(31, 16)
 #define HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855  GENMASK(15, 0)
+#define HAL_RX_MPDU_INFO_INFO1_MPDU_LEN                GENMASK(13, 0)
 
 struct hal_rx_mpdu_info {
        __le32 rsvd0;
        __le32 info0;
-       __le32 rsvd1[21];
+       __le32 rsvd1[11];
+       __le32 info1;
+       __le32 rsvd2[9];
 } __packed;
 
 struct hal_rx_mpdu_info_wcn6855 {
index b7e3b66..d1b0e76 100644 (file)
@@ -813,6 +813,12 @@ static u16 ath11k_hw_wcn6855_mpdu_info_get_peerid(u8 *tlv_data)
        return peer_id;
 }
 
+static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+       return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+                        __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
 const struct ath11k_hw_ops ipq8074_ops = {
        .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
        .wmi_init_config = ath11k_init_wmi_config_ipq8074,
@@ -983,6 +989,7 @@ const struct ath11k_hw_ops wcn6855_ops = {
        .rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
        .rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
        .rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
+       .rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
        .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
        .rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
        .rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
index c10e1a0..27ca4a9 100644 (file)
@@ -193,6 +193,7 @@ struct ath11k_hw_params {
        bool supports_rssi_stats;
        bool fw_wmi_diag_event;
        bool current_cc_support;
+       bool dbr_debug_support;
 };
 
 struct ath11k_hw_ops {
index 90fcd6a..d5b83f9 100644 (file)
@@ -5579,7 +5579,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
 
        skb_queue_tail(q, skb);
        atomic_inc(&ar->num_pending_mgmt_tx);
-       ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+       queue_work(ar->ab->workqueue, &ar->wmi_mgmt_tx_work);
 
        return 0;
 }
@@ -6354,6 +6354,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
                }
        }
 
+       ret = ath11k_debugfs_add_interface(arvif);
+       if (ret)
+               goto err_peer_del;
+
        mutex_unlock(&ar->conf_mutex);
 
        return 0;
@@ -6388,6 +6392,7 @@ err_vdev_del:
        spin_unlock_bh(&ar->data_lock);
 
 err:
+       ath11k_debugfs_remove_interface(arvif);
        mutex_unlock(&ar->conf_mutex);
 
        return ret;
@@ -6486,6 +6491,8 @@ err_vdev_del:
        /* Recalc txpower for remaining vdev */
        ath11k_mac_txpower_recalc(ar);
 
+       ath11k_debugfs_remove_interface(arvif);
+
        /* TODO: recal traffic pause state based on the available vdevs */
 
        mutex_unlock(&ar->conf_mutex);
@@ -6623,12 +6630,13 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
 
 static int
 ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
-                             const struct cfg80211_chan_def *chandef,
+                             struct ieee80211_chanctx_conf *ctx,
                              bool restart)
 {
        struct ath11k *ar = arvif->ar;
        struct ath11k_base *ab = ar->ab;
        struct wmi_vdev_start_req_arg arg = {};
+       const struct cfg80211_chan_def *chandef = &ctx->def;
        int he_support = arvif->vif->bss_conf.he_support;
        int ret = 0;
 
@@ -6663,8 +6671,7 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
                arg.channel.chan_radar =
                        !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
 
-               arg.channel.freq2_radar =
-                       !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+               arg.channel.freq2_radar = ctx->radar_enabled;
 
                arg.channel.passive = arg.channel.chan_radar;
 
@@ -6774,15 +6781,15 @@ err:
 }
 
 static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
-                                const struct cfg80211_chan_def *chandef)
+                                struct ieee80211_chanctx_conf *ctx)
 {
-       return ath11k_mac_vdev_start_restart(arvif, chandef, false);
+       return ath11k_mac_vdev_start_restart(arvif, ctx, false);
 }
 
 static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
-                                  const struct cfg80211_chan_def *chandef)
+                                  struct ieee80211_chanctx_conf *ctx)
 {
-       return ath11k_mac_vdev_start_restart(arvif, chandef, true);
+       return ath11k_mac_vdev_start_restart(arvif, ctx, true);
 }
 
 struct ath11k_mac_change_chanctx_arg {
@@ -6849,13 +6856,33 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
                if (WARN_ON(!arvif->is_started))
                        continue;
 
-               if (WARN_ON(!arvif->is_up))
-                       continue;
+               /* change_chanctx can be called even before vdev_up from
+                * ieee80211_start_ap->ieee80211_vif_use_channel->
+                * ieee80211_recalc_radar_chanctx.
+                *
+                * Firmware expect vdev_restart only if vdev is up.
+                * If vdev is down then it expect vdev_stop->vdev_start.
+                */
+               if (arvif->is_up) {
+                       ret = ath11k_mac_vdev_restart(arvif, vifs[i].new_ctx);
+                       if (ret) {
+                               ath11k_warn(ab, "failed to restart vdev %d: %d\n",
+                                           arvif->vdev_id, ret);
+                               continue;
+                       }
+               } else {
+                       ret = ath11k_mac_vdev_stop(arvif);
+                       if (ret) {
+                               ath11k_warn(ab, "failed to stop vdev %d: %d\n",
+                                           arvif->vdev_id, ret);
+                               continue;
+                       }
+
+                       ret = ath11k_mac_vdev_start(arvif, vifs[i].new_ctx);
+                       if (ret)
+                               ath11k_warn(ab, "failed to start vdev %d: %d\n",
+                                           arvif->vdev_id, ret);
 
-               ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
-               if (ret) {
-                       ath11k_warn(ab, "failed to restart vdev %d: %d\n",
-                                   arvif->vdev_id, ret);
                        continue;
                }
 
@@ -6940,7 +6967,8 @@ static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
        if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
                goto unlock;
 
-       if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
+       if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
+           changed & IEEE80211_CHANCTX_CHANGE_RADAR)
                ath11k_mac_update_active_vif_chan(ar, ctx);
 
        /* TODO: Recalc radar detection */
@@ -6960,7 +6988,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
        if (WARN_ON(arvif->is_started))
                return -EBUSY;
 
-       ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx.def);
+       ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx);
        if (ret) {
                ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
                            arvif->vdev_id, vif->addr,
@@ -7054,7 +7082,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
                goto out;
        }
 
-       ret = ath11k_mac_vdev_start(arvif, &ctx->def);
+       ret = ath11k_mac_vdev_start(arvif, ctx);
        if (ret) {
                ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
                            arvif->vdev_id, vif->addr,
@@ -8448,7 +8476,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
        ar->hw->queues = ATH11K_HW_MAX_QUEUES;
        ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
        ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
-       ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+       ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
 
        ar->hw->vif_data_size = sizeof(struct ath11k_vif);
        ar->hw->sta_data_size = sizeof(struct ath11k_sta);
index cccaa34..fc3524e 100644 (file)
@@ -13,6 +13,7 @@
 #include "pci.h"
 
 #define MHI_TIMEOUT_DEFAULT_MS 90000
+#define RDDM_DUMP_SIZE 0x420000
 
 static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
        {
@@ -382,6 +383,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
                mhi_ctrl->iova_stop = 0xFFFFFFFF;
        }
 
+       mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
        mhi_ctrl->sbl_size = SZ_512K;
        mhi_ctrl->seg_len = SZ_512K;
        mhi_ctrl->fbc_download = true;
@@ -561,7 +563,7 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
                ret = 0;
                break;
        case ATH11K_MHI_POWER_ON:
-               ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+               ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
                break;
        case ATH11K_MHI_POWER_OFF:
                mhi_power_down(ab_pci->mhi_ctrl, true);
index de71ad5..9037587 100644 (file)
@@ -1571,6 +1571,11 @@ static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
        struct ath11k_base *ab = dev_get_drvdata(dev);
        int ret;
 
+       if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+               ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
+               return 0;
+       }
+
        ret = ath11k_core_suspend(ab);
        if (ret)
                ath11k_warn(ab, "failed to suspend core: %d\n", ret);
@@ -1583,6 +1588,11 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
        struct ath11k_base *ab = dev_get_drvdata(dev);
        int ret;
 
+       if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+               ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
+               return 0;
+       }
+
        ret = ath11k_core_resume(ab);
        if (ret)
                ath11k_warn(ab, "failed to resume core: %d\n", ret);
index d0701e8..04e9668 100644 (file)
@@ -2342,6 +2342,7 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
        dma_free_coherent(ab->dev, m3_mem->size,
                          m3_mem->vaddr, m3_mem->paddr);
        m3_mem->vaddr = NULL;
+       m3_mem->size = 0;
 }
 
 static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
@@ -2959,7 +2960,11 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
                                clear_bit(ATH11K_FLAG_CRASH_FLUSH,
                                          &ab->dev_flags);
                                clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
-                               ath11k_core_qmi_firmware_ready(ab);
+                               ret = ath11k_core_qmi_firmware_ready(ab);
+                               if (ret) {
+                                       set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+                                       break;
+                               }
                                set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
                        }
 
@@ -3025,3 +3030,8 @@ void ath11k_qmi_deinit_service(struct ath11k_base *ab)
 }
 EXPORT_SYMBOL(ath11k_qmi_deinit_service);
 
+void ath11k_qmi_free_resource(struct ath11k_base *ab)
+{
+       ath11k_qmi_free_target_mem_chunk(ab);
+       ath11k_qmi_m3_free(ab);
+}
index ba2eff4..61678de 100644 (file)
@@ -492,5 +492,6 @@ void ath11k_qmi_event_work(struct work_struct *work);
 void ath11k_qmi_msg_recv_work(struct work_struct *work);
 void ath11k_qmi_deinit_service(struct ath11k_base *ab);
 int ath11k_qmi_init_service(struct ath11k_base *ab);
+void ath11k_qmi_free_resource(struct ath11k_base *ab);
 
 #endif
index 79c5080..26ecc1b 100644 (file)
@@ -1445,7 +1445,7 @@ struct hal_rx_desc_ipq8074 {
        __le32 hdr_status_tag;
        __le32 phy_ppdu_id;
        u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
-       u8 msdu_payload[0];
+       u8 msdu_payload[];
 } __packed;
 
 struct hal_rx_desc_qcn9074 {
@@ -1464,7 +1464,7 @@ struct hal_rx_desc_qcn9074 {
        __le32 hdr_status_tag;
        __le32 phy_ppdu_id;
        u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
-       u8 msdu_payload[0];
+       u8 msdu_payload[];
 } __packed;
 
 struct hal_rx_desc_wcn6855 {
@@ -1483,7 +1483,7 @@ struct hal_rx_desc_wcn6855 {
        __le32 hdr_status_tag;
        __le32 phy_ppdu_id;
        u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
-       u8 msdu_payload[0];
+       u8 msdu_payload[];
 } __packed;
 
 struct hal_rx_desc {
index 4100cc1..2b18871 100644 (file)
@@ -107,7 +107,7 @@ struct spectral_search_fft_report {
        __le32 info1;
        __le32 info2;
        __le32 reserve0;
-       u8 bins[0];
+       u8 bins[];
 } __packed;
 
 struct ath11k_spectral_search_report {
index 6b68ccf..b4f86c4 100644 (file)
@@ -144,6 +144,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
                .min_len = sizeof(struct wmi_11d_new_cc_ev) },
        [WMI_TAG_PER_CHAIN_RSSI_STATS] = {
                .min_len = sizeof(struct wmi_per_chain_rssi_stats) },
+       [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
+               .min_len = sizeof(struct wmi_twt_add_dialog_event) },
 };
 
 #define PRIMAP(_hw_mode_) \
@@ -3085,11 +3087,12 @@ ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
        /* TODO add MBSSID support */
        cmd->mbss_support = 0;
 
-       ret = ath11k_wmi_cmd_send(wmi, skb,
-                                 WMI_TWT_ENABLE_CMDID);
+       ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
        if (ret) {
                ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
                dev_kfree_skb(skb);
+       } else {
+               ar->twt_enabled = 1;
        }
        return ret;
 }
@@ -3114,11 +3117,181 @@ ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
                          FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
        cmd->pdev_id = pdev_id;
 
-       ret = ath11k_wmi_cmd_send(wmi, skb,
-                                 WMI_TWT_DISABLE_CMDID);
+       ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
        if (ret) {
                ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
                dev_kfree_skb(skb);
+       } else {
+               ar->twt_enabled = 0;
+       }
+       return ret;
+}
+
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+                                      struct wmi_twt_add_dialog_params *params)
+{
+       struct ath11k_pdev_wmi *wmi = ar->wmi;
+       struct ath11k_base *ab = wmi->wmi_ab->ab;
+       struct wmi_twt_add_dialog_params_cmd *cmd;
+       struct sk_buff *skb;
+       int ret, len;
+
+       len = sizeof(*cmd);
+
+       skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
+                         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+       cmd->vdev_id = params->vdev_id;
+       ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+       cmd->dialog_id = params->dialog_id;
+       cmd->wake_intvl_us = params->wake_intvl_us;
+       cmd->wake_intvl_mantis = params->wake_intvl_mantis;
+       cmd->wake_dura_us = params->wake_dura_us;
+       cmd->sp_offset_us = params->sp_offset_us;
+       cmd->flags = params->twt_cmd;
+       if (params->flag_bcast)
+               cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
+       if (params->flag_trigger)
+               cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
+       if (params->flag_flow_type)
+               cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
+       if (params->flag_protection)
+               cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
+
+       ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+                  "wmi add twt dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
+                  cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
+                  cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
+                  cmd->flags);
+
+       ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
+
+       if (ret) {
+               ath11k_warn(ab,
+                           "failed to send wmi command to add twt dialog: %d",
+                           ret);
+               dev_kfree_skb(skb);
+       }
+       return ret;
+}
+
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+                                      struct wmi_twt_del_dialog_params *params)
+{
+       struct ath11k_pdev_wmi *wmi = ar->wmi;
+       struct ath11k_base *ab = wmi->wmi_ab->ab;
+       struct wmi_twt_del_dialog_params_cmd *cmd;
+       struct sk_buff *skb;
+       int ret, len;
+
+       len = sizeof(*cmd);
+
+       skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
+                         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+       cmd->vdev_id = params->vdev_id;
+       ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+       cmd->dialog_id = params->dialog_id;
+
+       ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+                  "wmi delete twt dialog vdev %u dialog id %u\n",
+                  cmd->vdev_id, cmd->dialog_id);
+
+       ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
+       if (ret) {
+               ath11k_warn(ab,
+                           "failed to send wmi command to delete twt dialog: %d",
+                           ret);
+               dev_kfree_skb(skb);
+       }
+       return ret;
+}
+
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+                                        struct wmi_twt_pause_dialog_params *params)
+{
+       struct ath11k_pdev_wmi *wmi = ar->wmi;
+       struct ath11k_base *ab = wmi->wmi_ab->ab;
+       struct wmi_twt_pause_dialog_params_cmd *cmd;
+       struct sk_buff *skb;
+       int ret, len;
+
+       len = sizeof(*cmd);
+
+       skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                                    WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
+                         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+       cmd->vdev_id = params->vdev_id;
+       ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+       cmd->dialog_id = params->dialog_id;
+
+       ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+                  "wmi pause twt dialog vdev %u dialog id %u\n",
+                  cmd->vdev_id, cmd->dialog_id);
+
+       ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
+       if (ret) {
+               ath11k_warn(ab,
+                           "failed to send wmi command to pause twt dialog: %d",
+                           ret);
+               dev_kfree_skb(skb);
+       }
+       return ret;
+}
+
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+                                         struct wmi_twt_resume_dialog_params *params)
+{
+       struct ath11k_pdev_wmi *wmi = ar->wmi;
+       struct ath11k_base *ab = wmi->wmi_ab->ab;
+       struct wmi_twt_resume_dialog_params_cmd *cmd;
+       struct sk_buff *skb;
+       int ret, len;
+
+       len = sizeof(*cmd);
+
+       skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                                    WMI_TAG_TWT_RESUME_DIALOG_CMD) |
+                         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+       cmd->vdev_id = params->vdev_id;
+       ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+       cmd->dialog_id = params->dialog_id;
+       cmd->sp_offset_us = params->sp_offset_us;
+       cmd->next_twt_size = params->next_twt_size;
+
+       ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+                  "wmi resume twt dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
+                  cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
+                  cmd->next_twt_size);
+
+       ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
+       if (ret) {
+               ath11k_warn(ab,
+                           "failed to send wmi command to resume twt dialog: %d",
+                           ret);
+               dev_kfree_skb(skb);
        }
        return ret;
 }
@@ -7532,6 +7705,66 @@ ath11k_wmi_diag_event(struct ath11k_base *ab,
        trace_ath11k_wmi_diag(ab, skb->data, skb->len);
 }
 
+static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
+{
+       switch (status) {
+       case WMI_ADD_TWT_STATUS_OK:
+               return "ok";
+       case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
+               return "twt disabled";
+       case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
+               return "dialog id in use";
+       case WMI_ADD_TWT_STATUS_INVALID_PARAM:
+               return "invalid parameters";
+       case WMI_ADD_TWT_STATUS_NOT_READY:
+               return "not ready";
+       case WMI_ADD_TWT_STATUS_NO_RESOURCE:
+               return "resource unavailable";
+       case WMI_ADD_TWT_STATUS_NO_ACK:
+               return "no ack";
+       case WMI_ADD_TWT_STATUS_NO_RESPONSE:
+               return "no response";
+       case WMI_ADD_TWT_STATUS_DENIED:
+               return "denied";
+       case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
+               fallthrough;
+       default:
+               return "unknown error";
+       }
+}
+
+static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
+                                           struct sk_buff *skb)
+{
+       const void **tb;
+       const struct wmi_twt_add_dialog_event *ev;
+       int ret;
+
+       tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+       if (IS_ERR(tb)) {
+               ret = PTR_ERR(tb);
+               ath11k_warn(ab,
+                           "failed to parse wmi twt add dialog status event tlv: %d\n",
+                           ret);
+               return;
+       }
+
+       ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
+       if (!ev) {
+               ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
+               goto exit;
+       }
+
+       if (ev->status)
+               ath11k_warn(ab,
+                           "wmi add twt dialog event vdev %d dialog id %d status %s\n",
+                           ev->vdev_id, ev->dialog_id,
+                           ath11k_wmi_twt_add_dialog_event_status(ev->status));
+
+exit:
+       kfree(tb);
+}
+
 static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
 {
        struct wmi_cmd_hdr *cmd_hdr;
@@ -7629,11 +7862,17 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
        case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
                ath11k_wmi_obss_color_collision_event(ab, skb);
                break;
+       case WMI_TWT_ADD_DIALOG_EVENTID:
+               ath11k_wmi_twt_add_dialog_event(ab, skb);
+               break;
        /* add Unsupported events here */
        case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
        case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
        case WMI_TWT_ENABLE_EVENTID:
        case WMI_TWT_DISABLE_EVENTID:
+       case WMI_TWT_DEL_DIALOG_EVENTID:
+       case WMI_TWT_PAUSE_DIALOG_EVENTID:
+       case WMI_TWT_RESUME_DIALOG_EVENTID:
        case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
        case WMI_PEER_CREATE_CONF_EVENTID:
                ath11k_dbg(ab, ATH11K_DBG_WMI,
@@ -7798,6 +8037,59 @@ int ath11k_wmi_simulate_radar(struct ath11k *ar)
        return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
 }
 
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+                            struct ath11k_fw_dbglog *dbglog)
+{
+       struct ath11k_pdev_wmi *wmi = ar->wmi;
+       struct wmi_debug_log_config_cmd_fixed_param *cmd;
+       struct sk_buff *skb;
+       struct wmi_tlv *tlv;
+       int ret, len;
+
+       len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+       skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
+                         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+       cmd->dbg_log_param = dbglog->param;
+
+       tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+                     FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+
+       switch (dbglog->param) {
+       case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
+       case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
+       case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
+       case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
+               cmd->value = dbglog->value;
+               break;
+       case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
+       case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
+               cmd->value = dbglog->value;
+               memcpy(tlv->value, module_id_bitmap,
+                      MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+               /* clear current config to be used for next user config */
+               memset(module_id_bitmap, 0,
+                      MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+               break;
+       default:
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
+       if (ret) {
+               ath11k_warn(ar->ab,
+                           "failed to send WMI_DBGLOG_CFG_CMDID\n");
+               dev_kfree_skb(skb);
+       }
+       return ret;
+}
+
 int ath11k_wmi_connect(struct ath11k_base *ab)
 {
        u32 i;
index 2f26ec1..587f423 100644 (file)
@@ -12,6 +12,7 @@
 struct ath11k_base;
 struct ath11k;
 struct ath11k_fw_stats;
+struct ath11k_fw_dbglog;
 
 #define PSOC_HOST_MAX_NUM_SS (8)
 
@@ -4952,6 +4953,112 @@ struct wmi_twt_disable_params_cmd {
        u32 pdev_id;
 } __packed;
 
+enum WMI_HOST_TWT_COMMAND {
+       WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0,
+       WMI_HOST_TWT_COMMAND_SUGGEST_TWT,
+       WMI_HOST_TWT_COMMAND_DEMAND_TWT,
+       WMI_HOST_TWT_COMMAND_TWT_GROUPING,
+       WMI_HOST_TWT_COMMAND_ACCEPT_TWT,
+       WMI_HOST_TWT_COMMAND_ALTERNATE_TWT,
+       WMI_HOST_TWT_COMMAND_DICTATE_TWT,
+       WMI_HOST_TWT_COMMAND_REJECT_TWT,
+};
+
+#define WMI_TWT_ADD_DIALOG_FLAG_BCAST           BIT(8)
+#define WMI_TWT_ADD_DIALOG_FLAG_TRIGGER         BIT(9)
+#define WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE       BIT(10)
+#define WMI_TWT_ADD_DIALOG_FLAG_PROTECTION      BIT(11)
+
+struct wmi_twt_add_dialog_params_cmd {
+       u32 tlv_header;
+       u32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       u32 dialog_id;
+       u32 wake_intvl_us;
+       u32 wake_intvl_mantis;
+       u32 wake_dura_us;
+       u32 sp_offset_us;
+       u32 flags;
+} __packed;
+
+struct wmi_twt_add_dialog_params {
+       u32 vdev_id;
+       u8 peer_macaddr[ETH_ALEN];
+       u32 dialog_id;
+       u32 wake_intvl_us;
+       u32 wake_intvl_mantis;
+       u32 wake_dura_us;
+       u32 sp_offset_us;
+       u8 twt_cmd;
+       u8 flag_bcast;
+       u8 flag_trigger;
+       u8 flag_flow_type;
+       u8 flag_protection;
+} __packed;
+
+enum  wmi_twt_add_dialog_status {
+       WMI_ADD_TWT_STATUS_OK,
+       WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED,
+       WMI_ADD_TWT_STATUS_USED_DIALOG_ID,
+       WMI_ADD_TWT_STATUS_INVALID_PARAM,
+       WMI_ADD_TWT_STATUS_NOT_READY,
+       WMI_ADD_TWT_STATUS_NO_RESOURCE,
+       WMI_ADD_TWT_STATUS_NO_ACK,
+       WMI_ADD_TWT_STATUS_NO_RESPONSE,
+       WMI_ADD_TWT_STATUS_DENIED,
+       WMI_ADD_TWT_STATUS_UNKNOWN_ERROR,
+};
+
+struct wmi_twt_add_dialog_event {
+       u32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       u32 dialog_id;
+       u32 status;
+} __packed;
+
+struct wmi_twt_del_dialog_params {
+       u32 vdev_id;
+       u8 peer_macaddr[ETH_ALEN];
+       u32 dialog_id;
+} __packed;
+
+struct wmi_twt_del_dialog_params_cmd {
+       u32 tlv_header;
+       u32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params {
+       u32 vdev_id;
+       u8 peer_macaddr[ETH_ALEN];
+       u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params_cmd {
+       u32 tlv_header;
+       u32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       u32 dialog_id;
+} __packed;
+
+struct wmi_twt_resume_dialog_params {
+       u32 vdev_id;
+       u8 peer_macaddr[ETH_ALEN];
+       u32 dialog_id;
+       u32 sp_offset_us;
+       u32 next_twt_size;
+} __packed;
+
+struct wmi_twt_resume_dialog_params_cmd {
+       u32 tlv_header;
+       u32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       u32 dialog_id;
+       u32 sp_offset_us;
+       u32 next_twt_size;
+} __packed;
+
 struct wmi_obss_spatial_reuse_params_cmd {
        u32 tlv_header;
        u32 pdev_id;
@@ -5240,6 +5347,21 @@ struct wmi_rfkill_state_change_ev {
        u32 radio_state;
 } __packed;
 
+enum wmi_debug_log_param {
+       WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
+       WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
+       WMI_DEBUG_LOG_PARAM_VDEV_DISABLE,
+       WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP,
+       WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP,
+       WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP,
+};
+
+struct wmi_debug_log_config_cmd_fixed_param {
+       u32 tlv_header;
+       u32 dbg_log_param;
+       u32 value;
+} __packed;
+
 #define WMI_MAX_MEM_REQS 32
 
 #define MAX_RADIOS 3
@@ -5546,6 +5668,14 @@ void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
 int ath11k_wmi_simulate_radar(struct ath11k *ar);
 int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
 int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+                                      struct wmi_twt_add_dialog_params *params);
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+                                      struct wmi_twt_del_dialog_params *params);
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+                                        struct wmi_twt_pause_dialog_params *params);
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+                                         struct wmi_twt_resume_dialog_params *params);
 int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
                                 struct ieee80211_he_obss_pd *he_obss_pd);
 int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap);
@@ -5582,4 +5712,6 @@ int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar);
 int ath11k_wmi_wow_enable(struct ath11k *ar);
 int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
                                 const u8 mac_addr[ETH_ALEN]);
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+                            struct ath11k_fw_dbglog *dbglog);
 #endif
index b22ed49..a56fab6 100644 (file)
@@ -839,7 +839,7 @@ static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
 
        skb->protocol = eth_type_trans(skb, skb->dev);
 
-       netif_rx_ni(skb);
+       netif_rx(skb);
 }
 
 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
index aba70f3..65e683e 100644 (file)
@@ -1217,6 +1217,7 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface)
 static const struct usb_device_id ath6kl_usb_ids[] = {
        {USB_DEVICE(0x0cf3, 0x9375)},
        {USB_DEVICE(0x0cf3, 0x9374)},
+       {USB_DEVICE(0x04da, 0x390d)},
        { /* Terminating entry */ },
 };
 
index bd1ef63..3787b9f 100644 (file)
@@ -1750,7 +1750,6 @@ static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
 
 static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
 {
-       u16 ap_info_entry_size;
        struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
        struct wmi_ap_info_v1 *ap_info_v1;
        u8 index;
@@ -1759,14 +1758,12 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
            ev->ap_list_ver != APLIST_VER1)
                return -EINVAL;
 
-       ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
        ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
 
        ath6kl_dbg(ATH6KL_DBG_WMI,
                   "number of APs in aplist event: %d\n", ev->num_ap);
 
-       if (len < (int) (sizeof(struct wmi_aplist_event) +
-                        (ev->num_ap - 1) * ap_info_entry_size))
+       if (len < struct_size(ev, ap_list, ev->num_ap))
                return -EINVAL;
 
        /* AP list version 1 contents */
@@ -1959,21 +1956,15 @@ static int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
 {
        struct sk_buff *skb;
        struct wmi_start_scan_cmd *sc;
-       s8 size;
        int i, ret;
 
-       size = sizeof(struct wmi_start_scan_cmd);
-
        if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
                return -EINVAL;
 
        if (num_chan > WMI_MAX_CHANNELS)
                return -EINVAL;
 
-       if (num_chan)
-               size += sizeof(u16) * (num_chan - 1);
-
-       skb = ath6kl_wmi_get_new_buf(size);
+       skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan));
        if (!skb)
                return -ENOMEM;
 
@@ -2008,7 +1999,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
        struct ieee80211_supported_band *sband;
        struct sk_buff *skb;
        struct wmi_begin_scan_cmd *sc;
-       s8 size, *supp_rates;
+       s8 *supp_rates;
        int i, band, ret;
        struct ath6kl *ar = wmi->parent_dev;
        int num_rates;
@@ -2023,18 +2014,13 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
                                                num_chan, ch_list);
        }
 
-       size = sizeof(struct wmi_begin_scan_cmd);
-
        if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
                return -EINVAL;
 
        if (num_chan > WMI_MAX_CHANNELS)
                return -EINVAL;
 
-       if (num_chan)
-               size += sizeof(u16) * (num_chan - 1);
-
-       skb = ath6kl_wmi_get_new_buf(size);
+       skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan));
        if (!skb)
                return -ENOMEM;
 
index 784940b..6720149 100644 (file)
@@ -863,7 +863,7 @@ struct wmi_begin_scan_cmd {
        u8 num_ch;
 
        /* channels in Mhz */
-       __le16 ch_list[1];
+       __le16 ch_list[];
 } __packed;
 
 /* wmi_start_scan_cmd is to be deprecated. Use
@@ -889,7 +889,7 @@ struct wmi_start_scan_cmd {
        u8 num_ch;
 
        /* channels in Mhz */
-       __le16 ch_list[1];
+       __le16 ch_list[];
 } __packed;
 
 /*
@@ -1373,7 +1373,7 @@ struct wmi_channel_list_reply {
        u8 num_ch;
 
        /* channel in Mhz */
-       __le16 ch_list[1];
+       __le16 ch_list[];
 } __packed;
 
 /* List of Events (target to host) */
@@ -1545,7 +1545,7 @@ struct wmi_connect_event {
        u8 beacon_ie_len;
        u8 assoc_req_len;
        u8 assoc_resp_len;
-       u8 assoc_info[1];
+       u8 assoc_info[];
 } __packed;
 
 /* Disconnect Event */
@@ -1596,7 +1596,7 @@ struct wmi_disconnect_event {
        u8 disconn_reason;
 
        u8 assoc_resp_len;
-       u8 assoc_info[1];
+       u8 assoc_info[];
 } __packed;
 
 /*
@@ -1637,7 +1637,7 @@ struct bss_bias {
 
 struct bss_bias_info {
        u8 num_bss;
-       struct bss_bias bss_bias[0];
+       struct bss_bias bss_bias[];
 } __packed;
 
 struct low_rssi_scan_params {
@@ -1720,7 +1720,7 @@ struct wmi_neighbor_info {
 
 struct wmi_neighbor_report_event {
        u8 num_neighbors;
-       struct wmi_neighbor_info neighbor[0];
+       struct wmi_neighbor_info neighbor[];
 } __packed;
 
 /* TKIP MIC Error Event */
@@ -1957,7 +1957,7 @@ union wmi_ap_info {
 struct wmi_aplist_event {
        u8 ap_list_ver;
        u8 num_ap;
-       union wmi_ap_info ap_list[1];
+       union wmi_ap_info ap_list[];
 } __packed;
 
 /* Developer Commands */
@@ -2051,7 +2051,7 @@ struct wmi_get_keepalive_cmd {
 struct wmi_set_appie_cmd {
        u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
        u8 ie_len;
-       u8 ie_info[0];
+       u8 ie_info[];
 } __packed;
 
 struct wmi_set_ie_cmd {
@@ -2059,7 +2059,7 @@ struct wmi_set_ie_cmd {
        u8 ie_field;    /* enum wmi_ie_field_type */
        u8 ie_len;
        u8 reserved;
-       u8 ie_info[0];
+       u8 ie_info[];
 } __packed;
 
 /* Notify the WSC registration status to the target */
@@ -2127,7 +2127,7 @@ struct wmi_add_wow_pattern_cmd {
        u8 filter_list_id;
        u8 filter_size;
        u8 filter_offset;
-       u8 filter[0];
+       u8 filter[];
 } __packed;
 
 struct wmi_del_wow_pattern_cmd {
@@ -2360,7 +2360,7 @@ struct wmi_send_action_cmd {
        __le32 freq;
        __le32 wait;
        __le16 len;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 struct wmi_send_mgmt_cmd {
@@ -2369,7 +2369,7 @@ struct wmi_send_mgmt_cmd {
        __le32 wait;
        __le32 no_cck;
        __le16 len;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 struct wmi_tx_status_event {
@@ -2389,7 +2389,7 @@ struct wmi_set_appie_extended_cmd {
        u8 role_id;
        u8 mgmt_frm_type;
        u8 ie_len;
-       u8 ie_info[0];
+       u8 ie_info[];
 } __packed;
 
 struct wmi_remain_on_chnl_event {
@@ -2406,18 +2406,18 @@ struct wmi_cancel_remain_on_chnl_event {
 struct wmi_rx_action_event {
        __le32 freq;
        __le16 len;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 struct wmi_p2p_capabilities_event {
        __le16 len;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 struct wmi_p2p_rx_probe_req_event {
        __le32 freq;
        __le16 len;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 #define P2P_FLAG_CAPABILITIES_REQ   (0x00000001)
@@ -2431,7 +2431,7 @@ struct wmi_get_p2p_info {
 struct wmi_p2p_info_event {
        __le32 info_req_flags;
        __le16 len;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 struct wmi_p2p_capabilities {
@@ -2450,7 +2450,7 @@ struct wmi_p2p_probe_response_cmd {
        __le32 freq;
        u8 destination_addr[ETH_ALEN];
        __le16 len;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 /* Extended WMI (WMIX)
index ef6f5ea..3ccf8cf 100644 (file)
@@ -1071,8 +1071,9 @@ struct ath_softc {
 #endif
 
 #ifdef CONFIG_ATH9K_HWRNG
+       struct hwrng rng_ops;
        u32 rng_last;
-       struct task_struct *rng_task;
+       char rng_name[sizeof("ath9k_65535")];
 #endif
 };
 
index 39d46c2..039bf0c 100644 (file)
@@ -43,7 +43,7 @@ static bool ath_mci_add_profile(struct ath_common *common,
                                struct ath_mci_profile_info *info)
 {
        struct ath_mci_profile_info *entry;
-       u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
+       static const u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
 
        if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
            (info->type == MCI_GPM_COEX_PROFILE_VOICE))
index f9d3d6e..cb54142 100644 (file)
 #include "hw.h"
 #include "ar9003_phy.h"
 
-#define ATH9K_RNG_BUF_SIZE     320
-#define ATH9K_RNG_ENTROPY(x)   (((x) * 8 * 10) >> 5) /* quality: 10/32 */
-
-static DECLARE_WAIT_QUEUE_HEAD(rng_queue);
-
 static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
 {
        int i, j;
@@ -71,61 +66,56 @@ static u32 ath9k_rng_delay_get(u32 fail_stats)
        return delay;
 }
 
-static int ath9k_rng_kthread(void *data)
+static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
 {
-       int bytes_read;
-       struct ath_softc *sc = data;
-       u32 *rng_buf;
-       u32 delay, fail_stats = 0;
-
-       rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
-       if (!rng_buf)
-               goto out;
-
-       while (!kthread_should_stop()) {
-               bytes_read = ath9k_rng_data_read(sc, rng_buf,
-                                                ATH9K_RNG_BUF_SIZE);
-               if (unlikely(!bytes_read)) {
-                       delay = ath9k_rng_delay_get(++fail_stats);
-                       wait_event_interruptible_timeout(rng_queue,
-                                                        kthread_should_stop(),
-                                                        msecs_to_jiffies(delay));
-                       continue;
+       struct ath_softc *sc = container_of(rng, struct ath_softc, rng_ops);
+       u32 fail_stats = 0, word;
+       int bytes_read = 0;
+
+       for (;;) {
+               if (max & ~3UL)
+                       bytes_read = ath9k_rng_data_read(sc, buf, max >> 2);
+               if ((max & 3UL) && ath9k_rng_data_read(sc, &word, 1)) {
+                       memcpy(buf + bytes_read, &word, max & 3UL);
+                       bytes_read += max & 3UL;
+                       memzero_explicit(&word, sizeof(word));
                }
+               if (!wait || !max || likely(bytes_read) || fail_stats > 110)
+                       break;
 
-               fail_stats = 0;
-
-               /* sleep until entropy bits under write_wakeup_threshold */
-               add_hwgenerator_randomness((void *)rng_buf, bytes_read,
-                                          ATH9K_RNG_ENTROPY(bytes_read));
+               msleep_interruptible(ath9k_rng_delay_get(++fail_stats));
        }
 
-       kfree(rng_buf);
-out:
-       sc->rng_task = NULL;
-
-       return 0;
+       if (wait && !bytes_read && max)
+               bytes_read = -EIO;
+       return bytes_read;
 }
 
 void ath9k_rng_start(struct ath_softc *sc)
 {
+       static atomic_t serial = ATOMIC_INIT(0);
        struct ath_hw *ah = sc->sc_ah;
 
-       if (sc->rng_task)
+       if (sc->rng_ops.read)
                return;
 
        if (!AR_SREV_9300_20_OR_LATER(ah))
                return;
 
-       sc->rng_task = kthread_run(ath9k_rng_kthread, sc, "ath9k-hwrng");
-       if (IS_ERR(sc->rng_task))
-               sc->rng_task = NULL;
+       snprintf(sc->rng_name, sizeof(sc->rng_name), "ath9k_%u",
+                (atomic_inc_return(&serial) - 1) & U16_MAX);
+       sc->rng_ops.name = sc->rng_name;
+       sc->rng_ops.read = ath9k_rng_read;
+       sc->rng_ops.quality = 320;
+
+       if (devm_hwrng_register(sc->dev, &sc->rng_ops))
+               sc->rng_ops.read = NULL;
 }
 
 void ath9k_rng_stop(struct ath_softc *sc)
 {
-       if (sc->rng_task) {
-               kthread_stop(sc->rng_task);
-               sc->rng_task = NULL;
+       if (sc->rng_ops.read) {
+               devm_hwrng_unregister(sc->dev, &sc->rng_ops);
+               sc->rng_ops.read = NULL;
        }
 }
index 84a8ce0..ba29b4a 100644 (file)
@@ -458,7 +458,6 @@ struct ar9170 {
 # define CARL9170_HWRNG_CACHE_SIZE     CARL9170_MAX_CMD_PAYLOAD_LEN
        struct {
                struct hwrng rng;
-               bool initialized;
                char name[30 + 1];
                u16 cache[CARL9170_HWRNG_CACHE_SIZE / sizeof(u16)];
                unsigned int cache_idx;
index 503b21a..10acb6a 100644 (file)
@@ -149,7 +149,7 @@ struct carl9170fw_fix_entry {
 
 struct carl9170fw_fix_desc {
        struct carl9170fw_desc_head head;
-       struct carl9170fw_fix_entry data[0];
+       struct carl9170fw_fix_entry data[];
 } __packed;
 #define CARL9170FW_FIX_DESC_SIZE                       \
        (sizeof(struct carl9170fw_fix_desc))
index 49f7ee1..76e84ad 100644 (file)
@@ -1412,7 +1412,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
                        return -EOPNOTSUPP;
 
                tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
-                                  GFP_ATOMIC);
+                                  GFP_KERNEL);
                if (!tid_info)
                        return -ENOMEM;
 
@@ -1494,7 +1494,7 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
        if (!(ar->features & CARL9170_WPS_BUTTON))
                return 0;
 
-       input = input_allocate_device();
+       input = devm_input_allocate_device(&ar->udev->dev);
        if (!input)
                return -ENOMEM;
 
@@ -1512,10 +1512,8 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
        input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
 
        err = input_register_device(input);
-       if (err) {
-               input_free_device(input);
+       if (err)
                return err;
-       }
 
        ar->wps.pbc = input;
        return 0;
@@ -1539,7 +1537,7 @@ static int carl9170_rng_get(struct ar9170 *ar)
 
        BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
 
-       if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
+       if (!IS_ACCEPTING_CMD(ar))
                return -EAGAIN;
 
        count = ARRAY_SIZE(ar->rng.cache);
@@ -1585,14 +1583,6 @@ static int carl9170_rng_read(struct hwrng *rng, u32 *data)
        return sizeof(u16);
 }
 
-static void carl9170_unregister_hwrng(struct ar9170 *ar)
-{
-       if (ar->rng.initialized) {
-               hwrng_unregister(&ar->rng.rng);
-               ar->rng.initialized = false;
-       }
-}
-
 static int carl9170_register_hwrng(struct ar9170 *ar)
 {
        int err;
@@ -1603,25 +1593,14 @@ static int carl9170_register_hwrng(struct ar9170 *ar)
        ar->rng.rng.data_read = carl9170_rng_read;
        ar->rng.rng.priv = (unsigned long)ar;
 
-       if (WARN_ON(ar->rng.initialized))
-               return -EALREADY;
-
-       err = hwrng_register(&ar->rng.rng);
+       err = devm_hwrng_register(&ar->udev->dev, &ar->rng.rng);
        if (err) {
                dev_err(&ar->udev->dev, "Failed to register the random "
                        "number generator (%d)\n", err);
                return err;
        }
 
-       ar->rng.initialized = true;
-
-       err = carl9170_rng_get(ar);
-       if (err) {
-               carl9170_unregister_hwrng(ar);
-               return err;
-       }
-
-       return 0;
+       return carl9170_rng_get(ar);
 }
 #endif /* CONFIG_CARL9170_HWRNG */
 
@@ -1914,7 +1893,7 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
                WARN_ON(!(tx_streams >= 1 && tx_streams <=
                        IEEE80211_HT_MCS_TX_MAX_STREAMS));
 
-               tx_params = (tx_streams - 1) <<
+               tx_params |= (tx_streams - 1) <<
                            IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
 
                carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
@@ -1937,7 +1916,8 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
        if (!bands)
                return -EINVAL;
 
-       ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
+       ar->survey = devm_kcalloc(&ar->udev->dev, chans,
+                                 sizeof(struct survey_info), GFP_KERNEL);
        if (!ar->survey)
                return -ENOMEM;
        ar->num_channels = chans;
@@ -1964,11 +1944,7 @@ int carl9170_register(struct ar9170 *ar)
        struct ath_regulatory *regulatory = &ar->common.regulatory;
        int err = 0, i;
 
-       if (WARN_ON(ar->mem_bitmap))
-               return -EINVAL;
-
-       ar->mem_bitmap = bitmap_zalloc(ar->fw.mem_blocks, GFP_KERNEL);
-
+       ar->mem_bitmap = devm_bitmap_zalloc(&ar->udev->dev, ar->fw.mem_blocks, GFP_KERNEL);
        if (!ar->mem_bitmap)
                return -ENOMEM;
 
@@ -2057,17 +2033,6 @@ void carl9170_unregister(struct ar9170 *ar)
        carl9170_debugfs_unregister(ar);
 #endif /* CONFIG_CARL9170_DEBUGFS */
 
-#ifdef CONFIG_CARL9170_WPC
-       if (ar->wps.pbc) {
-               input_unregister_device(ar->wps.pbc);
-               ar->wps.pbc = NULL;
-       }
-#endif /* CONFIG_CARL9170_WPC */
-
-#ifdef CONFIG_CARL9170_HWRNG
-       carl9170_unregister_hwrng(ar);
-#endif /* CONFIG_CARL9170_HWRNG */
-
        carl9170_cancel_worker(ar);
        cancel_work_sync(&ar->restart_work);
 
@@ -2082,12 +2047,6 @@ void carl9170_free(struct ar9170 *ar)
        kfree_skb(ar->rx_failover);
        ar->rx_failover = NULL;
 
-       bitmap_free(ar->mem_bitmap);
-       ar->mem_bitmap = NULL;
-
-       kfree(ar->survey);
-       ar->survey = NULL;
-
        mutex_destroy(&ar->mutex);
 
        ieee80211_free_hw(ar->hw);
index bb73553..0a4e42e 100644 (file)
@@ -327,7 +327,7 @@ struct _carl9170_tx_superdesc {
 struct _carl9170_tx_superframe {
        struct _carl9170_tx_superdesc s;
        struct _ar9170_tx_hwdesc f;
-       u8 frame_data[0];
+       u8 frame_data[];
 } __packed __aligned(4);
 
 #define        CARL9170_TX_SUPERDESC_LEN               24
index b2400e2..f15e7bd 100644 (file)
@@ -667,14 +667,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
 
 /*
  * Some users have reported their EEPROM programmed with
- * 0x8000 or 0x0 set, this is not a supported regulatory
- * domain but since we have more than one user with it we
- * need a solution for them. We default to 0x64, which is
- * the default Atheros world regulatory domain.
+ * 0x8000 set, this is not a supported regulatory domain
+ * but since we have more than one user with it we need
+ * a solution for them. We default to 0x64, which is the
+ * default Atheros world regulatory domain.
  */
 static void ath_regd_sanitize(struct ath_regulatory *reg)
 {
-       if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0)
+       if (reg->current_rd != COUNTRY_ERD_FLAG)
                return;
        printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
        reg->current_rd = 0x64;
index e14f374..fe187c1 100644 (file)
@@ -108,7 +108,7 @@ struct fft_sample_ath10k {
        u8 avgpwr_db;
        u8 max_exp;
 
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 struct fft_sample_ath11k {
@@ -123,7 +123,7 @@ struct fft_sample_ath11k {
        __be32 tsf;
        __be32 noise;
 
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 #endif /* SPECTRAL_COMMON_H */
index 75661d4..95ea7d0 100644 (file)
@@ -394,7 +394,7 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
        struct ieee80211_vif *vif = NULL;
        struct wcn36xx_vif *tmp;
        struct ieee80211_supported_band *band;
-       struct ieee80211_channel *channel;
+       struct ieee80211_channel *channel = NULL;
        unsigned long flags;
        int i, j;
 
@@ -1391,11 +1391,11 @@ static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx,
 
        spin_unlock_irqrestore(&wcn->survey_lock, flags);
 
-        wcn36xx_dbg(WCN36XX_DBG_MAC,
-                    "ch %d rssi %d snr %d noise %d filled %x freq %d\n",
-                    HW_VALUE_CHANNEL(survey->channel->hw_value),
-                    chan_survey->rssi, chan_survey->snr, survey->noise,
-                    survey->filled, survey->channel->center_freq);
+       wcn36xx_dbg(WCN36XX_DBG_MAC,
+                   "ch %d rssi %d snr %d noise %d filled %x freq %d\n",
+                   HW_VALUE_CHANNEL(survey->channel->hw_value),
+                   chan_survey->rssi, chan_survey->snr, survey->noise,
+                   survey->filled, survey->channel->center_freq);
 
        return 0;
 }
@@ -1583,6 +1583,9 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
        if (iris_node) {
                if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
                        wcn->rf_id = RF_IRIS_WCN3620;
+               if (of_device_is_compatible(iris_node, "qcom,wcn3660") ||
+                   of_device_is_compatible(iris_node, "qcom,wcn3660b"))
+                       wcn->rf_id = RF_IRIS_WCN3660;
                if (of_device_is_compatible(iris_node, "qcom,wcn3680"))
                        wcn->rf_id = RF_IRIS_WCN3680;
                of_node_put(iris_node);
index caeb689..59ad332 100644 (file)
@@ -3347,7 +3347,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
        case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
        case WCN36XX_HAL_PRINT_REG_INFO_IND:
        case WCN36XX_HAL_SCAN_OFFLOAD_IND:
-               msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
+               msg_ind = kmalloc(struct_size(msg_ind, msg, len), GFP_ATOMIC);
                if (!msg_ind) {
                        wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
                                    msg_header->msg_type);
index c049837..df749b1 100644 (file)
@@ -376,8 +376,8 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
                status.freq = WCN36XX_CENTER_FREQ(wcn);
        }
 
-        wcn36xx_update_survey(wcn, status.signal, get_snr(bd),
-                              status.band, status.freq);
+       wcn36xx_update_survey(wcn, status.signal, get_snr(bd),
+                             status.band, status.freq);
 
        if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
                rate = &wcn36xx_rate_table[bd->rate_id];
index 81eaa74..9aa08b6 100644 (file)
@@ -97,6 +97,7 @@ enum wcn36xx_ampdu_state {
 
 #define RF_UNKNOWN     0x0000
 #define RF_IRIS_WCN3620        0x3620
+#define RF_IRIS_WCN3660        0x3660
 #define RF_IRIS_WCN3680        0x3680
 
 static inline void buff_to_be(u32 *buf, size_t len)
index cc830c7..5704def 100644 (file)
@@ -958,7 +958,7 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
                if (gro)
                        napi_gro_receive(&wil->napi_rx, skb);
                else
-                       netif_rx_ni(skb);
+                       netif_rx(skb);
        }
        ndev->stats.rx_packets++;
        stats->rx_packets++;
index dd8abbb..98b4c18 100644 (file)
@@ -1199,7 +1199,7 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
        eth->h_proto = cpu_to_be16(ETH_P_PAE);
        skb_put_data(skb, evt->eapol, eapol_len);
        skb->protocol = eth_type_trans(skb, ndev);
-       if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+       if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
                ndev->stats.rx_packets++;
                ndev->stats.rx_bytes += sz;
                if (stats) {
index 3984fd7..2c95a08 100644 (file)
@@ -397,9 +397,9 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
 }
 
 static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
-                                      struct sk_buff *skb, bool inirq)
+                                      struct sk_buff *skb)
 {
-       brcmf_fws_rxreorder(ifp, skb, inirq);
+       brcmf_fws_rxreorder(ifp, skb);
 }
 
 static void
index b2fb9fc..f0ad1e2 100644 (file)
@@ -4623,7 +4623,7 @@ exit:
 
 s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
 {
-       s32 pktflags[] = {
+       static const s32 pktflags[] = {
                BRCMF_VNDR_IE_PRBREQ_FLAG,
                BRCMF_VNDR_IE_PRBRSP_FLAG,
                BRCMF_VNDR_IE_BEACON_FLAG
index 1ee49f9..4ec7773 100644 (file)
@@ -704,6 +704,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
 {
        switch (ci->pub.chip) {
        case BRCM_CC_4345_CHIP_ID:
+       case BRCM_CC_43454_CHIP_ID:
                return 0x198000;
        case BRCM_CC_4335_CHIP_ID:
        case BRCM_CC_4339_CHIP_ID:
@@ -1401,6 +1402,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
        case BRCM_CC_4354_CHIP_ID:
        case BRCM_CC_4356_CHIP_ID:
        case BRCM_CC_4345_CHIP_ID:
+       case BRCM_CC_43454_CHIP_ID:
                /* explicitly check SR engine enable bit */
                pmu_cc3_mask = BIT(2);
                fallthrough;
index fed9cd5..26fab4b 100644 (file)
@@ -400,7 +400,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
        spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
 }
 
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
 {
        /* Most of Broadcom's firmwares send 802.11f ADD frame every time a new
         * STA connects to the AP interface. This is an obsoleted standard most
@@ -423,15 +423,7 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
        ifp->ndev->stats.rx_packets++;
 
        brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
-       if (inirq) {
-               netif_rx(skb);
-       } else {
-               /* If the receive is not processed inside an ISR,
-                * the softirqd must be woken explicitly to service
-                * the NET_RX_SOFTIRQ.  This is handled by netif_rx_ni().
-                */
-               netif_rx_ni(skb);
-       }
+       netif_rx(skb);
 }
 
 void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
@@ -480,7 +472,7 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
        skb->pkt_type = PACKET_OTHERHOST;
        skb->protocol = htons(ETH_P_802_2);
 
-       brcmf_netif_rx(ifp, skb, false);
+       brcmf_netif_rx(ifp, skb);
 }
 
 static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
@@ -515,7 +507,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
                return;
 
        if (brcmf_proto_is_reorder_skb(skb)) {
-               brcmf_proto_rxreorder(ifp, skb, inirq);
+               brcmf_proto_rxreorder(ifp, skb);
        } else {
                /* Process special event packets */
                if (handle_event) {
@@ -524,7 +516,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
                        brcmf_fweh_process_skb(ifp->drvr, skb,
                                               BCMILCP_SUBTYPE_VENDOR_LONG, gfp);
                }
-               brcmf_netif_rx(ifp, skb, inirq);
+               brcmf_netif_rx(ifp, skb);
        }
 }
 
index 8212c9d..340346c 100644 (file)
@@ -208,7 +208,7 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool locked);
 void brcmf_txflowblock_if(struct brcmf_if *ifp,
                          enum brcmf_netif_stop_reason reason, bool state);
 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
 void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
 void brcmf_net_detach(struct net_device *ndev, bool locked);
 int brcmf_net_mon_attach(struct brcmf_if *ifp);
index 7c68d98..d2ac844 100644 (file)
@@ -248,7 +248,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
        brcmf_feat_firmware_capabilities(ifp);
        memset(&gscan_cfg, 0, sizeof(gscan_cfg));
        if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID &&
-           drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID)
+           drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID &&
+           drvr->bus_if->chip != BRCM_CC_43454_CHIP_ID)
                brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
                                          "pfn_gscan_cfg",
                                          &gscan_cfg, sizeof(gscan_cfg));
index e69d1e5..c87b829 100644 (file)
@@ -1068,7 +1068,7 @@ struct brcmf_mkeep_alive_pkt_le {
        __le32  period_msec;
        __le16  len_bytes;
        u8   keep_alive_id;
-       u8   data[0];
+       u8   data[];
 } __packed;
 
 #endif /* FWIL_TYPES_H_ */
index 19b0f31..d58525e 100644 (file)
@@ -1664,7 +1664,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
        rfi->pend_pkts -= skb_queue_len(skb_list);
 }
 
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
 {
        struct brcmf_pub *drvr = ifp->drvr;
        u8 *reorder_data;
@@ -1682,7 +1682,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
        /* validate flags and flow id */
        if (flags == 0xFF) {
                bphy_err(drvr, "invalid flags...so ignore this packet\n");
-               brcmf_netif_rx(ifp, pkt, inirq);
+               brcmf_netif_rx(ifp, pkt);
                return;
        }
 
@@ -1694,7 +1694,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
                if (rfi == NULL) {
                        brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
                                  flow_id);
-                       brcmf_netif_rx(ifp, pkt, inirq);
+                       brcmf_netif_rx(ifp, pkt);
                        return;
                }
 
@@ -1719,7 +1719,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
                rfi = kzalloc(buf_size, GFP_ATOMIC);
                if (rfi == NULL) {
                        bphy_err(drvr, "failed to alloc buffer\n");
-                       brcmf_netif_rx(ifp, pkt, inirq);
+                       brcmf_netif_rx(ifp, pkt);
                        return;
                }
 
@@ -1833,7 +1833,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
 netif_rx:
        skb_queue_walk_safe(&reorder_list, pkt, pnext) {
                __skb_unlink(pkt, &reorder_list);
-               brcmf_netif_rx(ifp, pkt, inirq);
+               brcmf_netif_rx(ifp, pkt);
        }
 }
 
index 50e424b..b16a9d1 100644 (file)
@@ -42,6 +42,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
 void brcmf_fws_del_interface(struct brcmf_if *ifp);
 void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
 void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
 
 #endif /* FWSIGNAL_H_ */
index 7c8e08e..b2d0f75 100644 (file)
@@ -536,8 +536,7 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
        return -ENODEV;
 }
 
-static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb,
-                                  bool inirq)
+static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
 {
 }
 
@@ -1191,7 +1190,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
        }
 
        skb->protocol = eth_type_trans(skb, ifp->ndev);
-       brcmf_netif_rx(ifp, skb, false);
+       brcmf_netif_rx(ifp, skb);
 }
 
 static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
index d3f08d4..479041f 100644 (file)
@@ -90,8 +90,8 @@
 #define P2PSD_ACTION_CATEGORY          0x04    /* Public action frame */
 #define P2PSD_ACTION_ID_GAS_IREQ       0x0a    /* GAS Initial Request AF */
 #define P2PSD_ACTION_ID_GAS_IRESP      0x0b    /* GAS Initial Response AF */
-#define P2PSD_ACTION_ID_GAS_CREQ       0x0c    /* GAS Comback Request AF */
-#define P2PSD_ACTION_ID_GAS_CRESP      0x0d    /* GAS Comback Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ       0x0c    /* GAS Comeback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP      0x0d    /* GAS Comeback Response AF */
 
 #define BRCMF_P2P_DISABLE_TIMEOUT      msecs_to_jiffies(500)
 
@@ -396,11 +396,11 @@ static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
                                  (tx) ? "TX" : "RX");
                        break;
                case P2PSD_ACTION_ID_GAS_CREQ:
-                       brcmf_dbg(TRACE, "%s P2P GAS Comback Request\n",
+                       brcmf_dbg(TRACE, "%s P2P GAS Comeback Request\n",
                                  (tx) ? "TX" : "RX");
                        break;
                case P2PSD_ACTION_ID_GAS_CRESP:
-                       brcmf_dbg(TRACE, "%s P2P GAS Comback Response\n",
+                       brcmf_dbg(TRACE, "%s P2P GAS Comeback Response\n",
                                  (tx) ? "TX" : "RX");
                        break;
                default:
index f4a79e2..bd08d3a 100644 (file)
@@ -32,7 +32,7 @@ struct brcmf_proto {
                            u8 peer[ETH_ALEN]);
        void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
                              u8 peer[ETH_ALEN]);
-       void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+       void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
        void (*add_if)(struct brcmf_if *ifp);
        void (*del_if)(struct brcmf_if *ifp);
        void (*reset_if)(struct brcmf_if *ifp);
@@ -109,9 +109,9 @@ static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
 }
 
 static inline void
-brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
+brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
 {
-       ifp->drvr->proto->rxreorder(ifp, skb, inirq);
+       ifp->drvr->proto->rxreorder(ifp, skb);
 }
 
 static inline void
index 5d156e5..ba3c159 100644 (file)
@@ -651,6 +651,7 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
        BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFC, 43430B0),
        BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456),
        BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
+       BRCMF_FW_ENTRY(BRCM_CC_43454_CHIP_ID, 0x00000040, 43455),
        BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
        BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
        BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
index e1930ce..b2c7ae8 100644 (file)
@@ -15,7 +15,7 @@
 struct brcmf_xtlv {
        u16 id;
        u16 len;
-       u8 data[0];
+       u8 data[];
 };
 
 enum brcmf_xtlv_option {
index 3bbe238..ed0b707 100644 (file)
@@ -32,6 +32,7 @@
 #define BRCM_CC_4339_CHIP_ID           0x4339
 #define BRCM_CC_43430_CHIP_ID          43430
 #define BRCM_CC_4345_CHIP_ID           0x4345
+#define BRCM_CC_43454_CHIP_ID          43454
 #define BRCM_CC_43465_CHIP_ID          43465
 #define BRCM_CC_4350_CHIP_ID           0x4350
 #define BRCM_CC_43525_CHIP_ID          43525
index 452d085..10daef8 100644 (file)
@@ -545,7 +545,7 @@ struct ConfigRid {
 #define MODE_CFG_MASK cpu_to_le16(0xff)
 #define MODE_ETHERNET_HOST cpu_to_le16(0<<8) /* rx payloads converted */
 #define MODE_LLC_HOST cpu_to_le16(1<<8) /* rx payloads left as is */
-#define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extenstions */
+#define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extensions */
 #define MODE_AP_INTERFACE cpu_to_le16(1<<10) /* enable ap interface extensions */
 #define MODE_ANTENNA_ALIGN cpu_to_le16(1<<11) /* enable antenna alignment */
 #define MODE_ETHER_LLC cpu_to_le16(1<<12) /* enable ethernet LLC */
index 85e7042..a647a40 100644 (file)
@@ -139,6 +139,7 @@ config IWLMEI
        tristate "Intel Management Engine communication over WLAN"
        depends on INTEL_MEI
        depends on PM
+       depends on CFG80211
        help
          Enables the iwlmei kernel module.
 
index 330ef04..8ff967e 100644 (file)
@@ -1,15 +1,16 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  */
 #include <linux/module.h>
 #include <linux/stringify.h>
 #include "iwl-config.h"
 #include "iwl-prph.h"
+#include "fw/api/txq.h"
 
 /* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX        69
+#define IWL_22000_UCODE_API_MAX        72
 
 /* Lowest firmware API version supported */
 #define IWL_22000_UCODE_API_MIN        39
@@ -39,6 +40,7 @@
 #define IWL_SO_A_GF_A_FW_PRE           "iwlwifi-so-a0-gf-a0-"
 #define IWL_TY_A_GF_A_FW_PRE           "iwlwifi-ty-a0-gf-a0-"
 #define IWL_SO_A_GF4_A_FW_PRE          "iwlwifi-so-a0-gf4-a0-"
+#define IWL_SO_A_MR_A_FW_PRE           "iwlwifi-so-a0-mr-a0-"
 #define IWL_SNJ_A_GF4_A_FW_PRE         "iwlwifi-SoSnj-a0-gf4-a0-"
 #define IWL_SNJ_A_GF_A_FW_PRE          "iwlwifi-SoSnj-a0-gf-a0-"
 #define IWL_SNJ_A_HR_B_FW_PRE          "iwlwifi-SoSnj-a0-hr-b0-"
                IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
                IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
-#define IWL_BZ_Z_GF_A_MODULE_FIRMWARE(api) \
-       IWL_BZ_Z_GF_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
        IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
@@ -224,7 +224,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
        .trans.base_params = &iwl_ax210_base_params,                    \
        .min_txq_size = 128,                                            \
        .gp2_reg_addr = 0xd02c68,                                       \
-       .min_256_ba_txq_size = 1024,                                    \
+       .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE,           \
        .mon_dram_regs = {                                              \
                .write_ptr = {                                          \
                        .addr = DBGC_CUR_DBGBUF_STATUS,                 \
@@ -285,7 +285,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
        .trans.base_params = &iwl_ax210_base_params,                    \
        .min_txq_size = 128,                                            \
        .gp2_reg_addr = 0xd02c68,                                       \
-       .min_256_ba_txq_size = 1024,                                    \
+       .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT,          \
        .mon_dram_regs = {                                              \
                .write_ptr = {                                          \
                        .addr = DBGC_CUR_DBGBUF_STATUS,                 \
@@ -299,6 +299,12 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
                        .addr = DBGC_CUR_DBGBUF_STATUS,                 \
                        .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,         \
                },                                                      \
+       },                                                              \
+       .mon_dbgi_regs = {                                              \
+               .write_ptr = {                                          \
+                       .addr = DBGI_SRAM_FIFO_POINTERS,                \
+                       .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK,     \
+               },                                                      \
        }
 
 const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
@@ -385,6 +391,21 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
        .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
 };
 
+const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = {
+       .mq_rx_supported = true,
+       .use_tfh = true,
+       .rf_id = true,
+       .gen2 = true,
+       .device_family = IWL_DEVICE_FAMILY_AX210,
+       .base_params = &iwl_ax210_base_params,
+       .umac_prph_offset = 0x300000,
+       .integrated = true,
+       .low_latency_xtal = true,
+       .xtal_latency = 12000,
+       .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+       .imr_enabled = true,
+};
+
 /*
  * If the device doesn't support HE, no need to have that many buffers.
  * 22000 devices can split multiple frames into a single RB, so fewer are
@@ -476,6 +497,7 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
 const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
 const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
 const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
+const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz";
 const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
 const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
 const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
@@ -816,6 +838,20 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
        .num_rbds = IWL_NUM_RBDS_AX210_HE,
 };
 
+const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = {
+       .fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
+       .uhb_supported = false,
+       IWL_DEVICE_AX210,
+       .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = {
+       .fw_name_pre = IWL_SO_A_MR_A_FW_PRE,
+       .uhb_supported = false,
+       IWL_DEVICE_AX210,
+       .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
 const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
        .fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
        .uhb_supported = true,
@@ -830,6 +866,13 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
        .num_rbds = IWL_NUM_RBDS_AX210_HE,
 };
 
+const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = {
+       .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
+       .uhb_supported = false,
+       IWL_DEVICE_AX210,
+       .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
 const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
        .fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
        IWL_DEVICE_AX210,
index 754876c..e8bd4f0 100644 (file)
@@ -299,7 +299,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
 
        priv->is_open = 1;
        IWL_DEBUG_MAC80211(priv, "leave\n");
-       return 0;
+       return ret;
 }
 
 static void iwlagn_mac_stop(struct ieee80211_hw *hw)
index 90b9bec..caf4529 100644 (file)
@@ -48,6 +48,7 @@
 #define DRV_DESCRIPTION        "Intel(R) Wireless WiFi Link AGN driver for Linux"
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IWLWIFI);
 
 /* Please keep this array *SORTED* by hex value.
  * Access is done through binary search.
index db0c41b..e9d2717 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018, 2020 Intel Corporation
+ * Copyright(c) 2018, 2020-2021 Intel Corporation
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portionhelp of the ieee80211 subsystem header files.
@@ -915,7 +915,7 @@ static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
                len += 1 + 2;
                copylen += 1 + 2;
 
-               new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
+               new_data = kmalloc(struct_size(new_data, data, len), GFP_ATOMIC);
                if (new_data) {
                        new_data->length = len;
                        new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
@@ -1015,8 +1015,7 @@ void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
                /* No handling needed */
                IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
                             iwl_get_cmd_string(priv->trans,
-                                               iwl_cmd_id(pkt->hdr.cmd,
-                                                          0, 0)),
+                                               WIDE_ID(0, pkt->hdr.cmd)),
                             pkt->hdr.cmd);
        }
 }
index c17ab53..33aae63 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2019-2022 Intel Corporation
  */
 #include <linux/uuid.h>
+#include <linux/dmi.h>
 #include "iwl-drv.h"
 #include "iwl-debug.h"
 #include "acpi.h"
@@ -19,6 +20,30 @@ const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
                                      0xDD, 0x26, 0xB5, 0xFD);
 IWL_EXPORT_SYMBOL(iwl_rfi_guid);
 
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+       { .ident = "HP",
+         .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+               },
+       },
+       { .ident = "SAMSUNG",
+         .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+               },
+       },
+       { .ident = "MSFT",
+         .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+               },
+       },
+       { .ident = "ASUS",
+         .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
+               },
+       },
+       {}
+};
+
 static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
                               acpi_handle *ret_handle)
 {
@@ -242,7 +267,7 @@ found:
 IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
 
 int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
-                    struct iwl_tas_config_cmd_v3 *cmd)
+                    union iwl_tas_config_cmd *cmd, int fw_ver)
 {
        union acpi_object *wifi_pkg, *data;
        int ret, tbl_rev, i, block_list_size, enabled;
@@ -268,10 +293,18 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
                        (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
                u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
                        ACPI_WTAS_ENABLE_IEC_POS;
+               u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
+
 
                enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
-               cmd->override_tas_iec = cpu_to_le16(override_iec);
-               cmd->enable_tas_iec = cpu_to_le16(enabled_iec);
+               if (fw_ver <= 3) {
+                       cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
+                       cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
+               } else {
+                       cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
+                       cmd->v4.override_tas_iec = (u8)override_iec;
+                       cmd->v4.enable_tas_iec = (u8)enabled_iec;
+               }
 
        } else if (tbl_rev == 0 &&
                wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
@@ -297,7 +330,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
                goto out_free;
        }
        block_list_size = wifi_pkg->package.elements[2].integer.value;
-       cmd->block_list_size = cpu_to_le32(block_list_size);
+       cmd->v4.block_list_size = cpu_to_le32(block_list_size);
 
        IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
        if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
@@ -319,7 +352,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
                }
 
                country = wifi_pkg->package.elements[3 + i].integer.value;
-               cmd->block_list_array[i] = cpu_to_le32(country);
+               cmd->v4.block_list_array[i] = cpu_to_le32(country);
                IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
        }
 
@@ -529,8 +562,8 @@ IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
 int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
 {
        union acpi_object *wifi_pkg, *table, *data;
-       bool enabled;
        int ret, tbl_rev;
+       u32 flags;
        u8 num_chains, num_sub_bands;
 
        data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
@@ -596,7 +629,8 @@ read_table:
 
        IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev);
 
-       enabled = !!(wifi_pkg->package.elements[1].integer.value);
+       flags = wifi_pkg->package.elements[1].integer.value;
+       fwrt->reduced_power_flags = flags >> IWL_REDUCE_POWER_FLAGS_POS;
 
        /* position of the actual table */
        table = &wifi_pkg->package.elements[2];
@@ -604,7 +638,8 @@ read_table:
        /* The profile from WRDS is officially profile 1, but goes
         * into sar_profiles[0] (because we don't have a profile 0).
         */
-       ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled,
+       ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
+                                 flags & IWL_SAR_ENABLE_MSK,
                                  num_chains, num_sub_bands);
 out_free:
        kfree(data);
@@ -962,3 +997,181 @@ __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
        return config_bitmap;
 }
 IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
+
+int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+       union acpi_object *wifi_pkg, *data, *flags;
+       int i, j, ret, tbl_rev, num_sub_bands = 0;
+       int idx = 2;
+
+       fwrt->ppag_flags = 0;
+
+       data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       /* try to read ppag table rev 2 or 1 (both have the same data size) */
+       wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+                               ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
+
+       if (!IS_ERR(wifi_pkg)) {
+               if (tbl_rev == 1 || tbl_rev == 2) {
+                       num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+                       IWL_DEBUG_RADIO(fwrt,
+                                       "Reading PPAG table v2 (tbl_rev=%d)\n",
+                                       tbl_rev);
+                       goto read_table;
+               } else {
+                       ret = -EINVAL;
+                       goto out_free;
+               }
+       }
+
+       /* try to read ppag table revision 0 */
+       wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+                       ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
+
+       if (!IS_ERR(wifi_pkg)) {
+               if (tbl_rev != 0) {
+                       ret = -EINVAL;
+                       goto out_free;
+               }
+               num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+               IWL_DEBUG_RADIO(fwrt, "Reading PPAG table v1 (tbl_rev=0)\n");
+               goto read_table;
+       }
+
+read_table:
+       fwrt->ppag_ver = tbl_rev;
+       flags = &wifi_pkg->package.elements[1];
+
+       if (flags->type != ACPI_TYPE_INTEGER) {
+               ret = -EINVAL;
+               goto out_free;
+       }
+
+       fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
+
+       if (!fwrt->ppag_flags) {
+               ret = 0;
+               goto out_free;
+       }
+
+       /*
+        * read, verify gain values and save them into the PPAG table.
+        * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
+        * following sub-bands to High-Band (5GHz).
+        */
+       for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+               for (j = 0; j < num_sub_bands; j++) {
+                       union acpi_object *ent;
+
+                       ent = &wifi_pkg->package.elements[idx++];
+                       if (ent->type != ACPI_TYPE_INTEGER) {
+                               ret = -EINVAL;
+                               goto out_free;
+                       }
+
+                       fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
+
+                       if ((j == 0 &&
+                               (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
+                                fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
+                               (j != 0 &&
+                               (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
+                               fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
+                                       fwrt->ppag_flags = 0;
+                                       ret = -EINVAL;
+                                       goto out_free;
+                               }
+               }
+       }
+
+
+       ret = 0;
+
+out_free:
+       kfree(data);
+       return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
+
+int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
+                       int *cmd_size)
+{
+        u8 cmd_ver;
+        int i, j, num_sub_bands;
+        s8 *gain;
+
+        if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+                IWL_DEBUG_RADIO(fwrt,
+                                "PPAG capability not supported by FW, command not sent.\n");
+                return -EINVAL;
+        }
+        if (!fwrt->ppag_flags) {
+                IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
+                return -EINVAL;
+        }
+
+        /* The 'flags' field is the same in v1 and in v2 so we can just
+         * use v1 to access it.
+         */
+        cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+        cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+                                        WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD),
+                                        IWL_FW_CMD_VER_UNKNOWN);
+       if (cmd_ver == 1) {
+                num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+                gain = cmd->v1.gain[0];
+                *cmd_size = sizeof(cmd->v1);
+                if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
+                        IWL_DEBUG_RADIO(fwrt,
+                                        "PPAG table rev is %d but FW supports v1, sending truncated table\n",
+                                        fwrt->ppag_ver);
+                        cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+               }
+       } else if (cmd_ver == 2 || cmd_ver == 3) {
+                num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+                gain = cmd->v2.gain[0];
+                *cmd_size = sizeof(cmd->v2);
+                if (fwrt->ppag_ver == 0) {
+                        IWL_DEBUG_RADIO(fwrt,
+                                        "PPAG table is v1 but FW supports v2, sending padded table\n");
+                } else if (cmd_ver == 2 && fwrt->ppag_ver == 2) {
+                        IWL_DEBUG_RADIO(fwrt,
+                                        "PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
+                        cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+                }
+        } else {
+                IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
+                return -EINVAL;
+        }
+
+       for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+                for (j = 0; j < num_sub_bands; j++) {
+                        gain[i * num_sub_bands + j] =
+                                fwrt->ppag_chains[i].subbands[j];
+                        IWL_DEBUG_RADIO(fwrt,
+                                        "PPAG table: chain[%d] band[%d]: gain = %d\n",
+                                        i, j, gain[i * num_sub_bands + j]);
+                }
+        }
+
+       return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
+
+bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+
+       if (!dmi_check_system(dmi_ppag_approved_list)) {
+               IWL_DEBUG_RADIO(fwrt,
+                       "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+                       dmi_get_system_info(DMI_SYS_VENDOR));
+                       fwrt->ppag_flags = 0;
+                       return false;
+       }
+
+       return true;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);
index 22b3c66..6f361c5 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
  * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  */
 #ifndef __iwl_fw_acpi__
 #define __iwl_fw_acpi__
@@ -77,6 +77,8 @@
 #define ACPI_WTAS_ENABLE_IEC_MSK       0x4
 #define ACPI_WTAS_OVERRIDE_IEC_POS     0x1
 #define ACPI_WTAS_ENABLE_IEC_POS       0x2
+#define ACPI_WTAS_USA_UHB_MSK          BIT(16)
+#define ACPI_WTAS_USA_UHB_POS          16
 
 
 #define ACPI_PPAG_WIFI_DATA_SIZE_V1    ((IWL_NUM_CHAIN_LIMITS * \
 #define ACPI_PPAG_MAX_LB 24
 #define ACPI_PPAG_MIN_HB -16
 #define ACPI_PPAG_MAX_HB 40
+#define ACPI_PPAG_MASK 3
+#define IWL_PPAG_ETSI_MASK BIT(0)
+
+#define IWL_SAR_ENABLE_MSK             BIT(0)
+#define IWL_REDUCE_POWER_FLAGS_POS     1
 
 /*
  * The profile for revision 2 is a superset of revision 1, which is in
@@ -126,7 +133,8 @@ enum iwl_dsm_funcs_rev_0 {
        DSM_FUNC_ENABLE_6E = 3,
        DSM_FUNC_11AX_ENABLEMENT = 6,
        DSM_FUNC_ENABLE_UNII4_CHAN = 7,
-       DSM_FUNC_ACTIVATE_CHANNEL = 8
+       DSM_FUNC_ACTIVATE_CHANNEL = 8,
+       DSM_FUNC_FORCE_DISABLE_CHANNELS = 9
 };
 
 enum iwl_dsm_values_srd {
@@ -213,10 +221,17 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
                     u32 n_bands, u32 n_profiles);
 
 int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
-                    struct iwl_tas_config_cmd_v3 *cmd);
+                    union iwl_tas_config_cmd *cmd, int fw_ver);
 
 __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
 
+int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
+                       int *cmd_size);
+
+bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
+
 #else /* CONFIG_ACPI */
 
 static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -294,7 +309,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
 }
 
 static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
-                                  struct iwl_tas_config_cmd_v3 *cmd)
+                                  union iwl_tas_config_cmd *cmd, int fw_ver)
 {
        return -ENOENT;
 }
@@ -304,6 +319,22 @@ static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt
        return 0;
 }
 
+static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+       return -ENOENT;
+}
+
+static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
+                                   union iwl_ppag_table_cmd *cmd, int *cmd_size)
+{
+       return -ENOENT;
+}
+
+static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+       return false;
+}
+
 #endif /* CONFIG_ACPI */
 
 static inline union acpi_object *
index 35b8856..c78d2f1 100644 (file)
@@ -322,14 +322,6 @@ enum iwl_legacy_cmds {
         */
        REPLY_THERMAL_MNG_BACKOFF = 0x7e,
 
-       /**
-        * @DC2DC_CONFIG_CMD:
-        * Set/Get DC2DC frequency tune
-        * Command is &struct iwl_dc2dc_config_cmd,
-        * response is &struct iwl_dc2dc_config_resp
-        */
-       DC2DC_CONFIG_CMD = 0x83,
-
        /**
         * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
         */
@@ -608,6 +600,11 @@ enum iwl_system_subcmd_ids {
         * @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd
         */
        SYSTEM_FEATURES_CONTROL_CMD = 0xd,
+
+       /**
+        * @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif
+        */
+       RFI_DEACTIVATE_NOTIF = 0xff,
 };
 
 #endif /* __iwl_fw_api_commands_h__ */
index 1ab92f6..087354b 100644 (file)
@@ -114,37 +114,4 @@ enum iwl_dc2dc_config_id {
        DCDC_FREQ_TUNE_SET = 0x2,
 }; /* MARKER_ID_API_E_VER_1 */
 
-/**
- * struct iwl_dc2dc_config_cmd - configure dc2dc values
- *
- * (DC2DC_CONFIG_CMD = 0x83)
- *
- * Set/Get & configure dc2dc values.
- * The command always returns the current dc2dc values.
- *
- * @flags: set/get dc2dc
- * @enable_low_power_mode: not used.
- * @dc2dc_freq_tune0: frequency divider - digital domain
- * @dc2dc_freq_tune1: frequency divider - analog domain
- */
-struct iwl_dc2dc_config_cmd {
-       __le32 flags;
-       __le32 enable_low_power_mode; /* not used */
-       __le32 dc2dc_freq_tune0;
-       __le32 dc2dc_freq_tune1;
-} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
-
-/**
- * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
- *
- * Current dc2dc values returned by the FW.
- *
- * @dc2dc_freq_tune0: frequency divider - digital domain
- * @dc2dc_freq_tune1: frequency divider - analog domain
- */
-struct iwl_dc2dc_config_resp {
-       __le32 dc2dc_freq_tune0;
-       __le32 dc2dc_freq_tune1;
-} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
-
 #endif /* __iwl_fw_api_config_h__ */
index 89236f4..43619ac 100644 (file)
@@ -42,7 +42,7 @@ enum iwl_data_path_subcmd_ids {
        RFH_QUEUE_CONFIG_CMD = 0xD,
 
        /**
-        * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+        * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4
         */
        TLC_MNG_CONFIG_CMD = 0xF,
 
@@ -57,6 +57,20 @@ enum iwl_data_path_subcmd_ids {
         */
        CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
 
+       /**
+        * @RX_BAID_ALLOCATION_CONFIG_CMD: Allocate/deallocate a BAID for an RX
+        *      blockack session, uses &struct iwl_rx_baid_cfg_cmd for the
+        *      command, and &struct iwl_rx_baid_cfg_resp as a response.
+        */
+       RX_BAID_ALLOCATION_CONFIG_CMD = 0x16,
+
+       /**
+        * @SCD_QUEUE_CONFIG_CMD: new scheduler queue allocation/config/removal
+        *      command, uses &struct iwl_scd_queue_cfg_cmd and the response
+        *      is (same as before) &struct iwl_tx_queue_cfg_rsp.
+        */
+       SCD_QUEUE_CONFIG_CMD = 0x17,
+
        /**
         * @MONITOR_NOTIF: Datapath monitoring notification, using
         *      &struct iwl_datapath_monitor_notif
@@ -257,4 +271,136 @@ struct iwl_rlc_config_cmd {
        u8 reserved[3];
 } __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
 
+#define IWL_MAX_BAID_OLD       16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */
+#define IWL_MAX_BAID           32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */
+
+/**
+ * enum iwl_rx_baid_action - BAID allocation/config action
+ * @IWL_RX_BAID_ACTION_ADD: add a new BAID session
+ * @IWL_RX_BAID_ACTION_MODIFY: modify the BAID session
+ * @IWL_RX_BAID_ACTION_REMOVE: remove the BAID session
+ */
+enum iwl_rx_baid_action {
+       IWL_RX_BAID_ACTION_ADD,
+       IWL_RX_BAID_ACTION_MODIFY,
+       IWL_RX_BAID_ACTION_REMOVE,
+}; /*  RX_BAID_ALLOCATION_ACTION_E_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_alloc - BAID allocation data
+ * @sta_id_mask: station ID mask
+ * @tid: the TID for this session
+ * @reserved: reserved
+ * @ssn: the starting sequence number
+ * @win_size: RX BA session window size
+ */
+struct iwl_rx_baid_cfg_cmd_alloc {
+       __le32 sta_id_mask;
+       u8 tid;
+       u8 reserved[3];
+       __le16 ssn;
+       __le16 win_size;
+} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_modify - BAID modification data
+ * @old_sta_id_mask: old station ID mask
+ * @new_sta_id_mask: new station ID mask
+ * @tid: TID of the BAID
+ */
+struct iwl_rx_baid_cfg_cmd_modify {
+       __le32 old_sta_id_mask;
+       __le32 new_sta_id_mask;
+       __le32 tid;
+} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_remove_v1 - BAID removal data
+ * @baid: the BAID to remove
+ */
+struct iwl_rx_baid_cfg_cmd_remove_v1 {
+       __le32 baid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_remove - BAID removal data
+ * @sta_id_mask: the station mask of the BAID to remove
+ * @tid: the TID of the BAID to remove
+ */
+struct iwl_rx_baid_cfg_cmd_remove {
+       __le32 sta_id_mask;
+       __le32 tid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
+ * @action: the action, from &enum iwl_rx_baid_action
+ */
+struct iwl_rx_baid_cfg_cmd {
+       __le32 action;
+       union {
+               struct iwl_rx_baid_cfg_cmd_alloc alloc;
+               struct iwl_rx_baid_cfg_cmd_modify modify;
+               struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1;
+               struct iwl_rx_baid_cfg_cmd_remove remove;
+       }; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */
+} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_resp - BAID allocation response
+ * @baid: the allocated BAID
+ */
+struct iwl_rx_baid_cfg_resp {
+       __le32 baid;
+}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */
+
+/**
+ * enum iwl_scd_queue_cfg_operation - scheduler queue operation
+ * @IWL_SCD_QUEUE_ADD: allocate a new queue
+ * @IWL_SCD_QUEUE_REMOVE: remove a queue
+ * @IWL_SCD_QUEUE_MODIFY: modify a queue
+ */
+enum iwl_scd_queue_cfg_operation {
+       IWL_SCD_QUEUE_ADD = 0,
+       IWL_SCD_QUEUE_REMOVE = 1,
+       IWL_SCD_QUEUE_MODIFY = 2,
+};
+
+/**
+ * struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
+ * @operation: the operation, see &enum iwl_scd_queue_cfg_operation
+ * @u.add.sta_mask: station mask
+ * @u.add.tid: TID
+ * @u.add.reserved: reserved
+ * @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except
+ *     %TX_QUEUE_CFG_ENABLE_QUEUE is not valid
+ * @u.add.cb_size: size code
+ * @u.add.bc_dram_addr: byte-count table IOVA
+ * @u.add.tfdq_dram_addr: TFD queue IOVA
+ * @u.remove.queue: queue ID for removal
+ * @u.modify.sta_mask: new station mask for modify
+ * @u.modify.queue: queue ID to modify
+ */
+struct iwl_scd_queue_cfg_cmd {
+       __le32 operation;
+       union {
+               struct {
+                       __le32 sta_mask;
+                       u8 tid;
+                       u8 reserved[3];
+                       __le32 flags;
+                       __le32 cb_size;
+                       __le64 bc_dram_addr;
+                       __le64 tfdq_dram_addr;
+               } __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */
+               struct {
+                       __le32 queue;
+               } __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */
+               struct {
+                       __le32 sta_mask;
+                       __le32 queue;
+               } __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */
+       } __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
+
 #endif /* __iwl_fw_api_datapath_h__ */
index 456b7ea..52bf965 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  */
 #ifndef __iwl_fw_dbg_tlv_h__
 #define __iwl_fw_dbg_tlv_h__
@@ -11,7 +11,8 @@
 #define IWL_FW_INI_MAX_NAME                    32
 #define IWL_FW_INI_MAX_CFG_NAME                        64
 #define IWL_FW_INI_DOMAIN_ALWAYS_ON            0
-#define IWL_FW_INI_REGION_V2_MASK              0x0000FFFF
+#define IWL_FW_INI_REGION_ID_MASK              GENMASK(15, 0)
+#define IWL_FW_INI_REGION_DUMP_POLICY_MASK     GENMASK(31, 16)
 
 /**
  * struct iwl_fw_ini_hcmd
@@ -249,11 +250,10 @@ struct iwl_fw_ini_hcmd_tlv {
 } __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
 
 /**
-* struct iwl_fw_ini_conf_tlv - preset configuration TLV
+* struct iwl_fw_ini_addr_val - Address and value to set it to
 *
 * @address: the base address
 * @value: value to set at address
-
 */
 struct iwl_fw_ini_addr_val {
        __le32 address;
@@ -475,6 +475,7 @@ enum iwl_fw_ini_time_point {
  * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
  * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
  *     Append otherwise
+ * @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
  */
 enum iwl_fw_ini_trigger_apply_policy {
        IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT        = BIT(0),
@@ -482,6 +483,7 @@ enum iwl_fw_ini_trigger_apply_policy {
        IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS        = BIT(8),
        IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG            = BIT(9),
        IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA           = BIT(10),
+       IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD       = BIT(16),
 };
 
 /**
@@ -496,4 +498,31 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
        IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY,
        IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW
 };
+
+/**
+ * enum iwl_fw_ini_dump_policy - Determines how to handle dump based on enabled flags
+ *
+ * @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
+ * @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
+ * @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
+ */
+enum iwl_fw_ini_dump_policy {
+       IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT           = BIT(0),
+       IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB    = BIT(1),
+       IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB      = BIT(2),
+
+};
+
+/**
+ * enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
+ *
+ * @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
+ * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
+ * @IWL_FW_INI_DUMP_VERBOSE : dump all regions
+ */
+enum iwl_fw_ini_dump_type {
+       IWL_FW_INI_DUMP_BRIEF,
+       IWL_FW_INI_DUMP_MEDIUM,
+       IWL_FW_INI_DUMP_VERBOSE,
+};
 #endif
index 029ae64..6255257 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -42,6 +42,12 @@ enum iwl_debug_cmds {
         * &struct iwl_buf_alloc_cmd
         */
        BUFFER_ALLOCATION = 0x8,
+       /**
+        * @FW_DUMP_COMPLETE_CMD:
+        * sends command to fw once dump collection completed
+        * &struct iwl_dbg_dump_complete_cmd
+        */
+       FW_DUMP_COMPLETE_CMD = 0xB,
        /**
         * @MFU_ASSERT_DUMP_NTF:
         * &struct iwl_mfu_assert_dump_notif
@@ -404,4 +410,15 @@ struct iwl_dbg_host_event_cfg_cmd {
        __le32 enabled_severities;
 } __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */
 
+/**
+ * struct iwl_dbg_dump_complete_cmd - dump complete cmd
+ *
+ * @tp: timepoint whose dump has completed
+ * @tp_data: timepoint data
+ */
+struct iwl_dbg_dump_complete_cmd {
+       __le32 tp;
+       __le32 tp_data;
+} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
+
 #endif /* __iwl_fw_api_debug_h__ */
index d088c82..712532f 100644 (file)
@@ -27,6 +27,10 @@ enum iwl_mac_conf_subcmd_ids {
         * @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
         */
        SESSION_PROTECTION_CMD = 0x5,
+       /**
+        * @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd
+        */
+       CANCEL_CHANNEL_SWITCH_CMD = 0x6,
 
        /**
         * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
@@ -42,6 +46,11 @@ enum iwl_mac_conf_subcmd_ids {
         * @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif
         */
        CHANNEL_SWITCH_START_NOTIF = 0xFF,
+
+       /**
+        *@CHANNEL_SWITCH_ERROR_NOTIF: &struct iwl_channel_switch_error_notif
+        */
+       CHANNEL_SWITCH_ERROR_NOTIF = 0xF9,
 };
 
 #define IWL_P2P_NOA_DESC_COUNT (2)
@@ -110,6 +119,31 @@ struct iwl_channel_switch_start_notif {
        __le32 id_and_color;
 } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
 
+#define CS_ERR_COUNT_ERROR BIT(0)
+#define CS_ERR_LONG_DELAY_AFTER_CS BIT(1)
+#define CS_ERR_LONG_TX_BLOCK BIT(2)
+#define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3)
+
+/**
+ * struct iwl_channel_switch_error_notif - Channel switch error notification
+ *
+ * @mac_id: the mac for which the ucode sends the notification for
+ * @csa_err_mask: mask of channel switch error that can occur
+ */
+struct iwl_channel_switch_error_notif {
+       __le32 mac_id;
+       __le32 csa_err_mask;
+} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command
+ *
+ * @mac_id: the mac that should cancel the channel switch
+ */
+struct iwl_cancel_channel_switch_cmd {
+       __le32 mac_id;
+} __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */
+
 /**
  * struct iwl_chan_switch_te_cmd - Channel Switch Time Event command
  *
index 11f0bd2..9b7caf9 100644 (file)
@@ -413,10 +413,11 @@ enum iwl_he_pkt_ext_constellations {
 };
 
 #define MAX_HE_SUPP_NSS        2
-#define MAX_HE_CHANNEL_BW_INDX 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_2        4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_3        5
 
 /**
- * struct iwl_he_pkt_ext - QAM thresholds
+ * struct iwl_he_pkt_ext_v1 - QAM thresholds
  * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
  * The IE is organized in the following way:
  * Support for Nss x BW (or RU) matrix:
@@ -435,9 +436,34 @@ enum iwl_he_pkt_ext_constellations {
  *     Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
  *             (0-low_th, 1-high_th)
  */
-struct iwl_he_pkt_ext {
-       u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
-} __packed; /* PKT_EXT_DOT11AX_API_S */
+struct iwl_he_pkt_ext_v1 {
+       u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
+
+/**
+ * struct iwl_he_pkt_ext_v2 - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ *     (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ *     0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ *     QAM_tx < QAM_th1            --> PPE=0us
+ *     QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ *     QAM_th2 <= QAM_tx           --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ *     For each Nss/Bw define 2 QAM thrsholds (0..5)
+ *     For rates below the low_th, no need for PPE
+ *     For rates between low_th and high_th, need 8us PPE
+ *     For rates equal or higher then the high_th, need 16us PPE
+ *     Nss (0-siso, 1-mimo2) x
+ *     BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz, 4-320MHz) x
+ *     (0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext_v2 {
+       u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
 
 /**
  * enum iwl_he_sta_ctxt_flags - HE STA context flags
@@ -464,6 +490,11 @@ struct iwl_he_pkt_ext {
  * @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are
  *      not allowed (as there are OBSS that might classify such transmissions as
  *      radar pulses).
+ * @STA_CTXT_HE_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change
+ *     of threshold
+ * @STA_CTXT_EHT_PUNCTURE_MASK_VALID: indicates the puncture_mask field is valid
+ * @STA_CTXT_EHT_LONG_PPE_ENABLED: indicates the PPE requirement should be
+ *     extended to 20us for BW > 160Mhz or for MCS w/ 4096-QAM.
  */
 enum iwl_he_sta_ctxt_flags {
        STA_CTXT_HE_REF_BSSID_VALID             = BIT(4),
@@ -477,6 +508,9 @@ enum iwl_he_sta_ctxt_flags {
        STA_CTXT_HE_MU_EDCA_CW                  = BIT(12),
        STA_CTXT_HE_NIC_NOT_ACK_ENABLED         = BIT(13),
        STA_CTXT_HE_RU_2MHZ_BLOCK               = BIT(14),
+       STA_CTXT_HE_NDP_FEEDBACK_ENABLED        = BIT(15),
+       STA_CTXT_EHT_PUNCTURE_MASK_VALID        = BIT(16),
+       STA_CTXT_EHT_LONG_PPE_ENABLED           = BIT(17),
 };
 
 /**
@@ -551,7 +585,7 @@ struct iwl_he_sta_context_cmd_v1 {
        u8 frag_min_size;
 
        /* The below fields are set via PPE thresholds element */
-       struct iwl_he_pkt_ext pkt_ext;
+       struct iwl_he_pkt_ext_v1 pkt_ext;
 
        /* The below fields are set via HE-Operation IE */
        u8 bss_color;
@@ -568,7 +602,7 @@ struct iwl_he_sta_context_cmd_v1 {
 } __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
 
 /**
- * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
+ * struct iwl_he_sta_context_cmd_v2 - configure FW to work with HE AP
  * @sta_id: STA id
  * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
  *     0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
@@ -599,7 +633,7 @@ struct iwl_he_sta_context_cmd_v1 {
  * @bssid_count: actual number of VAPs in the MultiBSS Set
  * @reserved4: alignment
  */
-struct iwl_he_sta_context_cmd {
+struct iwl_he_sta_context_cmd_v2 {
        u8 sta_id;
        u8 tid_limit;
        u8 reserved1;
@@ -619,7 +653,7 @@ struct iwl_he_sta_context_cmd {
        u8 frag_min_size;
 
        /* The below fields are set via PPE thresholds element */
-       struct iwl_he_pkt_ext pkt_ext;
+       struct iwl_he_pkt_ext_v1 pkt_ext;
 
        /* The below fields are set via HE-Operation IE */
        u8 bss_color;
@@ -642,6 +676,81 @@ struct iwl_he_sta_context_cmd {
        u8 reserved4[3];
 } __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
 
+/**
+ * struct iwl_he_sta_context_cmd_v3 - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ *     0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @puncture_mask: puncture mask for EHT
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ * @max_bssid_indicator: indicator of the max bssid supported on the associated
+ *     bss
+ * @bssid_index: index of the associated VAP
+ * @ema_ap: AP supports enhanced Multi BSSID advertisement
+ * @profile_periodicity: number of Beacon periods that are needed to receive the
+ *     complete VAPs info
+ * @bssid_count: actual number of VAPs in the MultiBSS Set
+ * @reserved4: alignment
+ */
+struct iwl_he_sta_context_cmd_v3 {
+       u8 sta_id;
+       u8 tid_limit;
+       u8 reserved1;
+       u8 reserved2;
+       __le32 flags;
+
+       /* The below fields are set via Multiple BSSID IE */
+       u8 ref_bssid_addr[6];
+       __le16 reserved0;
+
+       /* The below fields are set via HE-capabilities IE */
+       __le32 htc_flags;
+
+       u8 frag_flags;
+       u8 frag_level;
+       u8 frag_max_num;
+       u8 frag_min_size;
+
+       /* The below fields are set via PPE thresholds element */
+       struct iwl_he_pkt_ext_v2 pkt_ext;
+
+       /* The below fields are set via HE-Operation IE */
+       u8 bss_color;
+       u8 htc_trig_based_pkt_ext;
+       __le16 frame_time_rts_th;
+
+       /* Random access parameter set (i.e. RAPS) */
+       u8 rand_alloc_ecwmin;
+       u8 rand_alloc_ecwmax;
+       __le16 puncture_mask;
+
+       /* The below fields are set via MU EDCA parameter set element */
+       struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+
+       u8 max_bssid_indicator;
+       u8 bssid_index;
+       u8 ema_ap;
+       u8 profile_periodicity;
+       u8 bssid_count;
+       u8 reserved4[3];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
+
 /**
  * struct iwl_he_monitor_cmd - configure air sniffer for HE
  * @bssid: the BSSID to sniff for
index 4949fcf..91bfde6 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -419,6 +419,30 @@ struct iwl_tas_config_cmd_v3 {
        __le16 enable_tas_iec;
 } __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
 
+/**
+ * struct iwl_tas_config_cmd_v3 - configures the TAS
+ * @block_list_size: size of relevant field in block_list_array
+ * @block_list_array: list of countries where TAS must be disabled
+ * @override_tas_iec: indicates whether to override default value of IEC regulatory
+ * @enable_tas_iec: in case override_tas_iec is set -
+ *     indicates whether IEC regulatory is enabled or disabled
+ * @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA
+ * @reserved: reserved
+*/
+struct iwl_tas_config_cmd_v4 {
+       __le32 block_list_size;
+       __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+       u8 override_tas_iec;
+       u8 enable_tas_iec;
+       u8 usa_tas_uhb_allowed;
+       u8 reserved;
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
+
+union iwl_tas_config_cmd {
+       struct iwl_tas_config_cmd_v2 v2;
+       struct iwl_tas_config_cmd_v3 v3;
+       struct iwl_tas_config_cmd_v4 v4;
+};
 /**
  * enum iwl_lari_configs - bit masks for the various LARI config operations
  * @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
@@ -514,6 +538,32 @@ struct iwl_lari_config_change_cmd_v5 {
        __le32 chan_state_active_bitmap;
 } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */
 
+/**
+ * struct iwl_lari_config_change_cmd_v6 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ *     different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ *     per country, one to indicate whether to override and the other to
+ *     indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ *     per country, one to indicate whether to override and the other to
+ *     indicate allow/disallow unii4 channels.
+ * @chan_state_active_bitmap: Bitmap for overriding channel state to active.
+ *     Each bit represents a country or region to activate, according to the BIOS
+ *     definitions.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ *     Each bit represents a set of channels in a specific band that should be disabled
+ */
+struct iwl_lari_config_change_cmd_v6 {
+       __le32 config_bitmap;
+       __le32 oem_uhb_allow_bitmap;
+       __le32 oem_11ax_allow_bitmap;
+       __le32 oem_unii4_allow_bitmap;
+       __le32 chan_state_active_bitmap;
+       __le32 force_disable_channels_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */
+
 /**
  * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
  * @status: PNVM image loading status
index c04f252..b1b9c29 100644 (file)
@@ -166,14 +166,24 @@ struct iwl_dts_measurement_resp {
 
 /**
  * struct ct_kill_notif - CT-kill entry notification
+ * This structure represent both versions of this notification.
  *
  * @temperature: the current temperature in celsius
- * @reserved: reserved
+ * @dts: only in v2: DTS that trigger the CT Kill bitmap:
+ *                     bit 0: ToP master
+ *                     bit 1: PA chain A master
+ *                     bit 2: PA chain B master
+ *                     bit 3: ToP slave
+ *                     bit 4: PA chain A slave
+ *                     bit 5: PA chain B slave)
+ *                     bits 6,7: reserved (set to 0)
+ * @scheme: only for v2: scheme that trigger the CT Kill (0-SW, 1-HW)
  */
 struct ct_kill_notif {
        __le16 temperature;
-       __le16 reserved;
-} __packed; /* GRP_PHY_CT_KILL_NTF */
+       u8 dts;
+       u8 scheme;
+} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
 
 /**
 * enum ctdp_cmd_operation - CTDP command operations
index 8131820..f92cac1 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -340,7 +340,7 @@ struct iwl_dev_tx_power_cmd_v5 {
 } __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
 
 /**
- * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5
+ * struct iwl_dev_tx_power_cmd_v6 - TX power reduction command version 6
  * @per_chain: per chain restrictions
  * @enable_ack_reduction: enable or disable close range ack TX power
  *     reduction.
@@ -360,6 +360,28 @@ struct iwl_dev_tx_power_cmd_v6 {
        __le32 timer_period;
 } __packed; /* TX_REDUCED_POWER_API_S_VER_6 */
 
+/**
+ * struct iwl_dev_tx_power_cmd_v7 - TX power reduction command version 7
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ *     reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ *     from last command. used if set_mode is
+ *     IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ *     note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ *     BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ */
+struct iwl_dev_tx_power_cmd_v7 {
+       __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+       u8 enable_ack_reduction;
+       u8 per_chain_restriction_changed;
+       u8 reserved[2];
+       __le32 timer_period;
+       __le32 flags;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
 /**
  * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
  * @common: common part of the command
@@ -375,6 +397,7 @@ struct iwl_dev_tx_power_cmd {
                struct iwl_dev_tx_power_cmd_v4 v4;
                struct iwl_dev_tx_power_cmd_v5 v5;
                struct iwl_dev_tx_power_cmd_v6 v6;
+               struct iwl_dev_tx_power_cmd_v7 v7;
        };
 };
 
index c678b9a..1a84a40 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020-2021 Intel Corporation
  */
 #ifndef __iwl_fw_api_rfi_h__
 #define __iwl_fw_api_rfi_h__
@@ -57,4 +57,12 @@ struct iwl_rfi_freq_table_resp_cmd {
        __le32 status;
 } __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
 
+/**
+ * struct iwl_rfi_deactivate_notif - notifcation that FW disaled RFIm
+ *
+ * @reason: used only for a log message
+ */
+struct iwl_rfi_deactivate_notif {
+       __le32 reason;
+} __packed; /* RFI_DEACTIVATE_NTF_S_VER_1 */
 #endif /* __iwl_fw_api_rfi_h__ */
index 4a7723e..687f804 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2017 Intel Deutschland GmbH
  */
 #ifndef __iwl_fw_api_rs_h__
@@ -133,7 +133,7 @@ enum IWL_TLC_MCS_PER_BW {
 };
 
 /**
- * struct tlc_config_cmd - TLC configuration
+ * struct iwl_tlc_config_cmd_v3 - TLC configuration
  * @sta_id: station id
  * @reserved1: reserved
  * @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
@@ -168,7 +168,7 @@ struct iwl_tlc_config_cmd_v3 {
 } __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
 
 /**
- * struct tlc_config_cmd - TLC configuration
+ * struct iwl_tlc_config_cmd_v4 - TLC configuration
  * @sta_id: station id
  * @reserved1: reserved
  * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
index e73cc73..ecc6706 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
 #ifndef __iwl_fw_api_tx_h__
@@ -296,8 +296,7 @@ struct iwl_tx_cmd_gen2 {
  * @dram_info: FW internal DRAM storage
  * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
  *     cleared. Combination of RATE_MCS_*
- * @ttl: time to live - packet lifetime limit. The FW should drop if
- *     passed.
+ * @reserved: reserved
  * @hdr: 802.11 header
  */
 struct iwl_tx_cmd_gen3 {
@@ -306,7 +305,7 @@ struct iwl_tx_cmd_gen3 {
        __le32 offload_assist;
        struct iwl_dram_sec_info dram_info;
        __le32 rate_n_flags;
-       __le64 ttl;
+       u8 reserved[8];
        struct ieee80211_hdr hdr[];
 } __packed; /* TX_CMD_API_S_VER_8,
               TX_CMD_API_S_VER_10 */
index 8b3a00d..e018946 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -76,6 +76,8 @@ enum iwl_tx_queue_cfg_actions {
        TX_QUEUE_CFG_TFD_SHORT_FORMAT           = BIT(1),
 };
 
+#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4)
+#define IWL_DEFAULT_QUEUE_SIZE_HE 1024
 #define IWL_DEFAULT_QUEUE_SIZE 256
 #define IWL_MGMT_QUEUE_SIZE 16
 #define IWL_CMD_QUEUE_SIZE 32
index 7ad9cee..abf4902 100644 (file)
@@ -12,7 +12,7 @@
 #include "iwl-io.h"
 #include "iwl-prph.h"
 #include "iwl-csr.h"
-
+#include "iwl-fh.h"
 /**
  * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
  *
@@ -303,9 +303,6 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
        iwl_trans_release_nic_access(fwrt->trans);
 }
 
-#define IWL8260_ICCM_OFFSET            0x44000 /* Only for B-step */
-#define IWL8260_ICCM_LEN               0xC000 /* Only for B-step */
-
 struct iwl_prph_range {
        u32 start, end;
 };
@@ -1027,7 +1024,7 @@ struct iwl_dump_ini_region_data {
 static int
 iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
                           struct iwl_dump_ini_region_data *reg_data,
-                          void *range_ptr, int idx)
+                          void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1052,7 +1049,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
 static int
 iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
                           struct iwl_dump_ini_region_data *reg_data,
-                          void *range_ptr, int idx)
+                          void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1102,7 +1099,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
 
 static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
                                 struct iwl_dump_ini_region_data *reg_data,
-                                void *range_ptr, int idx)
+                                void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1121,7 +1118,7 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
 
 static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
                                    struct iwl_dump_ini_region_data *reg_data,
-                                   void *range_ptr, int idx)
+                                   void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_trans *trans = fwrt->trans;
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
@@ -1153,7 +1150,7 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
 
 static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
                                     struct iwl_dump_ini_region_data *reg_data,
-                                    void *range_ptr, int idx)
+                                    void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1175,7 +1172,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
 }
 
 static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
-                                    void *range_ptr, int idx)
+                                    void *range_ptr, u32 range_len, int idx)
 {
        struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1195,7 +1192,7 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
 
 static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
                                    struct iwl_dump_ini_region_data *reg_data,
-                                   void *range_ptr, int idx)
+                                   void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_error_dump_range *range;
        u32 page_size;
@@ -1204,7 +1201,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
        idx++;
 
        if (!fwrt->trans->trans_cfg->gen2)
-               return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
+               return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx);
 
        range = range_ptr;
        page_size = fwrt->trans->init_dram.paging[idx].size;
@@ -1220,7 +1217,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
 static int
 iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
                           struct iwl_dump_ini_region_data *reg_data,
-                          void *range_ptr, int idx)
+                          void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1239,7 +1236,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
 
 static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
                                      struct iwl_dump_ini_region_data *reg_data,
-                                     void *range_ptr, int idx)
+                                     void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1307,7 +1304,7 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
 
 static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
                                 struct iwl_dump_ini_region_data *reg_data,
-                                void *range_ptr, int idx)
+                                void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1442,7 +1439,7 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
 
 static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
                                 struct iwl_dump_ini_region_data *reg_data,
-                                void *range_ptr, int idx)
+                                void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1509,7 +1506,7 @@ out:
 static int
 iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
                            struct iwl_dump_ini_region_data *reg_data,
-                           void *range_ptr, int idx)
+                           void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_region_err_table *err_table = &reg->err_table;
@@ -1528,7 +1525,7 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
 static int
 iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
                              struct iwl_dump_ini_region_data *reg_data,
-                             void *range_ptr, int idx)
+                             void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_region_special_device_memory *special_mem =
@@ -1549,7 +1546,7 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
 static int
 iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
                            struct iwl_dump_ini_region_data *reg_data,
-                           void *range_ptr, int idx)
+                           void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1561,8 +1558,6 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
                return -EBUSY;
 
        range->range_data_size = reg->dev_addr.size;
-       iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
-                              DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
        for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
                prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
                                          DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
@@ -1579,7 +1574,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
 
 static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
                                    struct iwl_dump_ini_region_data *reg_data,
-                                   void *range_ptr, int idx)
+                                   void *range_ptr, u32 range_len, int idx)
 {
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
        struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
@@ -1598,10 +1593,37 @@ static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
        return sizeof(*range) + le32_to_cpu(range->range_data_size);
 }
 
+static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt,
+                                struct iwl_dump_ini_region_data *reg_data,
+                                void *range_ptr, u32 range_len, int idx)
+{
+       /* read the IMR memory and DMA it to SRAM */
+       struct iwl_fw_ini_error_dump_range *range = range_ptr;
+       u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr;
+       u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte;
+       u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr;
+       u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+       u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes;
+
+       range->range_data_size = cpu_to_le32(size_to_dump);
+       if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr,
+                                   imr_curr_addr, size_to_dump)) {
+               IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n");
+               return -1;
+       }
+
+       fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump;
+       fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump;
+
+       iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data,
+                                size_to_dump);
+       return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
 static void *
 iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
                             struct iwl_dump_ini_region_data *reg_data,
-                            void *data)
+                            void *data, u32 data_len)
 {
        struct iwl_fw_ini_error_dump *dump = data;
 
@@ -1677,7 +1699,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
 static void *
 iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
                                  struct iwl_dump_ini_region_data *reg_data,
-                                 void *data)
+                                 void *data, u32 data_len)
 {
        struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
 
@@ -1688,7 +1710,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
 static void *
 iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
                                  struct iwl_dump_ini_region_data *reg_data,
-                                 void *data)
+                                 void *data, u32 data_len)
 {
        struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
 
@@ -1696,10 +1718,21 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
                                            &fwrt->trans->cfg->mon_smem_regs);
 }
 
+static void *
+iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
+                                 struct iwl_dump_ini_region_data *reg_data,
+                                 void *data, u32 data_len)
+{
+       struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+
+       return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+                                           &fwrt->trans->cfg->mon_dbgi_regs);
+}
+
 static void *
 iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
                                   struct iwl_dump_ini_region_data *reg_data,
-                                  void *data)
+                                  void *data, u32 data_len)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_err_table_dump *dump = data;
@@ -1713,7 +1746,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
 static void *
 iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
                                     struct iwl_dump_ini_region_data *reg_data,
-                                    void *data)
+                                    void *data, u32 data_len)
 {
        struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
        struct iwl_fw_ini_special_device_memory *dump = data;
@@ -1725,6 +1758,18 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
        return dump->data;
 }
 
+static void *
+iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt,
+                            struct iwl_dump_ini_region_data *reg_data,
+                            void *data, u32 data_len)
+{
+       struct iwl_fw_ini_error_dump *dump = data;
+
+       dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+
+       return dump->data;
+}
+
 static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
                                   struct iwl_dump_ini_region_data *reg_data)
 {
@@ -1784,6 +1829,26 @@ static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
        return 1;
 }
 
+static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt,
+                                  struct iwl_dump_ini_region_data *reg_data)
+{
+       /* range is total number of pages need to copied from
+        *IMR memory to SRAM and later from SRAM to DRAM
+        */
+       u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
+       u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
+       u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+
+       if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
+               IWL_DEBUG_INFO(fwrt,
+                              "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
+                              imr_enable, imr_size, sram_size);
+               return 0;
+       }
+
+       return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size));
+}
+
 static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
                                     struct iwl_dump_ini_region_data *reg_data)
 {
@@ -1861,6 +1926,20 @@ iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
        return size;
 }
 
+static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt,
+                                         struct iwl_dump_ini_region_data *reg_data)
+{
+       struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+       u32 size = le32_to_cpu(reg->dev_addr.size);
+       u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
+
+       if (!size || !ranges)
+               return 0;
+
+       return sizeof(struct iwl_fw_ini_monitor_dump) + ranges *
+               (size + sizeof(struct iwl_fw_ini_error_dump_range));
+}
+
 static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
                                     struct iwl_dump_ini_region_data *reg_data)
 {
@@ -1948,6 +2027,33 @@ iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
        return size;
 }
 
+static u32
+iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt,
+                         struct iwl_dump_ini_region_data *reg_data)
+{
+       u32 size = 0;
+       u32 ranges = 0;
+       u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
+       u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
+       u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+
+       if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
+               IWL_DEBUG_INFO(fwrt,
+                              "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
+                              imr_enable, imr_size, sram_size);
+               return size;
+       }
+       size = imr_size;
+       ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data);
+       if (!size && !ranges) {
+               IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges);
+               return 0;
+       }
+       size += sizeof(struct iwl_fw_ini_error_dump) +
+               ranges * sizeof(struct iwl_fw_ini_error_dump_range);
+       return size;
+}
+
 /**
  * struct iwl_dump_ini_mem_ops - ini memory dump operations
  * @get_num_of_ranges: returns the number of memory ranges in the region.
@@ -1964,10 +2070,10 @@ struct iwl_dump_ini_mem_ops {
                        struct iwl_dump_ini_region_data *reg_data);
        void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
                              struct iwl_dump_ini_region_data *reg_data,
-                             void *data);
+                             void *data, u32 data_len);
        int (*fill_range)(struct iwl_fw_runtime *fwrt,
                          struct iwl_dump_ini_region_data *reg_data,
-                         void *range, int idx);
+                         void *range, u32 range_len, int idx);
 };
 
 /**
@@ -1990,24 +2096,53 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
        struct iwl_fw_ini_error_dump_data *tlv;
        struct iwl_fw_ini_error_dump_header *header;
        u32 type = reg->type;
-       u32 id = le32_to_cpu(reg->id);
+       u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK);
        u32 num_of_ranges, i, size;
-       void *range;
-
-       /*
-        * The higher part of the ID from 2 is irrelevant for
-        * us, so mask it out.
-        */
-       if (le32_to_cpu(reg->hdr.version) >= 2)
-               id &= IWL_FW_INI_REGION_V2_MASK;
+       u8 *range;
+       u32 free_size;
+       u64 header_size;
+       u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE;
+
+       IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n",
+                    dump_policy, id, type);
+
+       if (le32_to_cpu(reg->hdr.version) >= 2) {
+               u32 dp = le32_get_bits(reg->id,
+                                      IWL_FW_INI_REGION_DUMP_POLICY_MASK);
+
+               if (dump_policy == IWL_FW_INI_DUMP_VERBOSE &&
+                   !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) {
+                       IWL_DEBUG_FW(fwrt,
+                                    "WRT: no dump - type %d and policy mismatch=%d\n",
+                                    dump_policy, dp);
+                       return 0;
+               } else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM &&
+                          !(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) {
+                       IWL_DEBUG_FW(fwrt,
+                                    "WRT: no dump - type %d and policy mismatch=%d\n",
+                                    dump_policy, dp);
+                       return 0;
+               } else if (dump_policy == IWL_FW_INI_DUMP_BRIEF &&
+                          !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) {
+                       IWL_DEBUG_FW(fwrt,
+                                    "WRT: no dump - type %d and policy mismatch=%d\n",
+                                    dump_policy, dp);
+                       return 0;
+               }
+       }
 
        if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
-           !ops->fill_range)
+           !ops->fill_range) {
+               IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n");
                return 0;
+       }
 
        size = ops->get_size(fwrt, reg_data);
-       if (!size)
+
+       if (size < sizeof(*header)) {
+               IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n");
                return 0;
+       }
 
        entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
        if (!entry)
@@ -2022,9 +2157,6 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
        tlv->reserved = reg->reserved;
        tlv->len = cpu_to_le32(size);
 
-       IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
-                    type);
-
        num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
 
        header = (void *)tlv->data;
@@ -2033,7 +2165,8 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
        header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
        memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
 
-       range = ops->fill_mem_hdr(fwrt, reg_data, header);
+       free_size = size;
+       range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size);
        if (!range) {
                IWL_ERR(fwrt,
                        "WRT: Failed to fill region header: id=%d, type=%d\n",
@@ -2041,8 +2174,21 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
                goto out_err;
        }
 
+       header_size = range - (u8 *)header;
+
+       if (WARN(header_size > free_size,
+                "header size %llu > free_size %d",
+                header_size, free_size)) {
+               IWL_ERR(fwrt,
+                       "WRT: fill_mem_hdr used more than given free_size\n");
+               goto out_err;
+       }
+
+       free_size -= header_size;
+
        for (i = 0; i < num_of_ranges; i++) {
-               int range_size = ops->fill_range(fwrt, reg_data, range, i);
+               int range_size = ops->fill_range(fwrt, reg_data, range,
+                                                free_size, i);
 
                if (range_size < 0) {
                        IWL_ERR(fwrt,
@@ -2050,6 +2196,15 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
                                id, type);
                        goto out_err;
                }
+
+               if (WARN(range_size > free_size, "range_size %d > free_size %d",
+                        range_size, free_size)) {
+                       IWL_ERR(fwrt,
+                               "WRT: fill_raged used more than given free_size\n");
+                       goto out_err;
+               }
+
+               free_size -= range_size;
                range = range + range_size;
        }
 
@@ -2240,7 +2395,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
                .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
                .fill_range = iwl_dump_ini_csr_iter,
        },
-       [IWL_FW_INI_REGION_DRAM_IMR] = {},
+       [IWL_FW_INI_REGION_DRAM_IMR] = {
+               .get_num_of_ranges = iwl_dump_ini_imr_ranges,
+               .get_size = iwl_dump_ini_imr_get_size,
+               .fill_mem_hdr = iwl_dump_ini_imr_fill_header,
+               .fill_range = iwl_dump_ini_imr_iter,
+       },
        [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {
                .get_num_of_ranges = iwl_dump_ini_mem_ranges,
                .get_size = iwl_dump_ini_mem_get_size,
@@ -2255,8 +2415,8 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
        },
        [IWL_FW_INI_REGION_DBGI_SRAM] = {
                .get_num_of_ranges = iwl_dump_ini_mem_ranges,
-               .get_size = iwl_dump_ini_mem_get_size,
-               .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+               .get_size = iwl_dump_ini_mon_dbgi_get_size,
+               .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header,
                .fill_range = iwl_dump_ini_dbgi_sram_iter,
        },
 };
@@ -2270,6 +2430,9 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
        struct iwl_dump_ini_region_data reg_data = {
                .dump_data = dump_data,
        };
+       struct iwl_dump_ini_region_data imr_reg_data = {
+               .dump_data = dump_data,
+       };
        int i;
        u32 size = 0;
        u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
@@ -2305,10 +2468,32 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
                                 tp_id);
                        continue;
                }
+               /*
+                * DRAM_IMR can be collected only for FW/HW error timepoint
+                * when fw is not alive. In addition, it must be collected
+                * lastly as it overwrites SRAM that can possibly contain
+                * debug data which also need to be collected.
+                */
+               if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
+                       if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
+                           tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
+                               imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+                       else
+                               IWL_INFO(fwrt,
+                                        "WRT: trying to collect DRAM_IMR at time point: %d, skipping\n",
+                                        tp_id);
+               /* continue to next region */
+                       continue;
+               }
+
 
                size += iwl_dump_ini_mem(fwrt, list, &reg_data,
                                         &iwl_dump_ini_region_ops[reg_type]);
        }
+       /* collect DRAM_IMR region in the last */
+       if (imr_reg_data.reg_tlv)
+               size += iwl_dump_ini_mem(fwrt, list, &reg_data,
+                                        &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
 
        if (size)
                size += iwl_dump_ini_info(fwrt, trigger, list);
@@ -2444,7 +2629,7 @@ static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data)
 static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
                                  struct iwl_fwrt_dump_data *dump_data)
 {
-       struct list_head dump_list = LIST_HEAD_INIT(dump_list);
+       LIST_HEAD(dump_list);
        struct scatterlist *sg_dump_data;
        u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
 
@@ -2589,7 +2774,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
                delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC;
        }
 
-       desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+       desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC);
        if (!desc)
                return -ENOMEM;
 
@@ -2685,6 +2870,28 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
 }
 IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
 
+void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+                                   u32 timepoint,
+                                   u32 timepoint_data)
+{
+       struct iwl_dbg_dump_complete_cmd hcmd_data;
+       struct iwl_host_cmd hcmd = {
+               .id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD),
+               .data[0] = &hcmd_data,
+               .len[0] = sizeof(hcmd_data),
+       };
+
+       if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+               return;
+
+       if (fw_has_capa(&fwrt->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) {
+               hcmd_data.tp = cpu_to_le32(timepoint);
+               hcmd_data.tp_data = cpu_to_le32(timepoint_data);
+               iwl_trans_send_cmd(fwrt->trans, &hcmd);
+       }
+}
+
 /* this function assumes dump_start was called beforehand and dump_end will be
  * called afterwards
  */
@@ -2693,10 +2900,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
        struct iwl_fw_dbg_params params = {0};
        struct iwl_fwrt_dump_data *dump_data =
                &fwrt->dump.wks[wk_idx].dump_data;
-
+       u32 policy;
+       u32 time_point;
        if (!test_bit(wk_idx, &fwrt->dump.active_wks))
                return;
 
+       if (!dump_data->trig) {
+               IWL_ERR(fwrt, "dump trigger data is not set\n");
+               goto out;
+       }
+
        if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
                IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
                goto out;
@@ -2719,6 +2932,13 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
 
        iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
 
+       policy = le32_to_cpu(dump_data->trig->apply_policy);
+       time_point = le32_to_cpu(dump_data->trig->time_point);
+
+       if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+               IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+               iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+       }
        if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
                iwl_force_nmi(fwrt->trans);
 
@@ -2777,10 +2997,10 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
                 "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n",
                 tp_id, (u32)(delay / USEC_PER_MSEC));
 
-       schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
-
        if (sync)
                iwl_fw_dbg_collect_sync(fwrt, idx);
+       else
+               schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
 
        return 0;
 }
@@ -2795,9 +3015,8 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
        /* assumes the op mode mutex is locked in dump_start since
         * iwl_fw_dbg_collect_sync can't run in parallel
         */
-       if (fwrt->ops && fwrt->ops->dump_start &&
-           fwrt->ops->dump_start(fwrt->ops_ctx))
-               return;
+       if (fwrt->ops && fwrt->ops->dump_start)
+               fwrt->ops->dump_start(fwrt->ops_ctx);
 
        iwl_fw_dbg_collect_sync(fwrt, wks->idx);
 
index 8c3c890..be78064 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -324,4 +324,7 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
 }
 
 void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
+void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+                                   u32 timepoint,
+                                   u32 timepoint_data);
 #endif  /* __iwl_fw_dbg_h__ */
index a152ce3..43e9972 100644 (file)
@@ -150,7 +150,7 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
 {
        struct iwl_dbg_host_event_cfg_cmd event_cfg;
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(HOST_EVENT_CFG, DEBUG_GROUP, 0),
+               .id = WIDE_ID(DEBUG_GROUP, HOST_EVENT_CFG),
                .flags = CMD_ASYNC,
                .data[0] = &event_cfg,
                .len[0] = sizeof(event_cfg),
@@ -358,7 +358,7 @@ static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
 
        ver = &fw->ucode_capa.cmd_versions[state->pos];
 
-       cmd_id = iwl_cmd_id(ver->cmd, ver->group, 0);
+       cmd_id = WIDE_ID(ver->group, ver->cmd);
 
        seq_printf(seq, "  0x%04x:\n", cmd_id);
        seq_printf(seq, "    name: %s\n",
index efc6540..5679a78 100644 (file)
@@ -119,7 +119,7 @@ enum iwl_ucode_tlv_type {
 struct iwl_ucode_tlv {
        __le32 type;            /* see above */
        __le32 length;          /* not including type/length fields */
-       u8 data[0];
+       u8 data[];
 };
 
 #define IWL_TLV_UCODE_MAGIC            0x0a4c5749
@@ -310,7 +310,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
- * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
  * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
  * @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it
@@ -368,6 +367,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  *     reset flow
  * @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC
  *      channels even when these are not enabled.
+ * @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
+ *     complete to FW.
  *
  * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
  */
@@ -386,7 +387,6 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = (__force iwl_ucode_tlv_capa_t)13,
        IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG            = (__force iwl_ucode_tlv_capa_t)17,
        IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = (__force iwl_ucode_tlv_capa_t)18,
-       IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT         = (__force iwl_ucode_tlv_capa_t)19,
        IWL_UCODE_TLV_CAPA_CSUM_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)21,
        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = (__force iwl_ucode_tlv_capa_t)22,
        IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD                = (__force iwl_ucode_tlv_capa_t)26,
@@ -419,6 +419,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_BROADCAST_TWT                = (__force iwl_ucode_tlv_capa_t)60,
        IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO               = (__force iwl_ucode_tlv_capa_t)61,
        IWL_UCODE_TLV_CAPA_RFIM_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)62,
+       IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT              = (__force iwl_ucode_tlv_capa_t)63,
 
        /* set 2 */
        IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE         = (__force iwl_ucode_tlv_capa_t)64,
@@ -453,6 +454,7 @@ enum iwl_ucode_tlv_capa {
 
        IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT                = (__force iwl_ucode_tlv_capa_t)100,
        IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT            = (__force iwl_ucode_tlv_capa_t)104,
+       IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT        = (__force iwl_ucode_tlv_capa_t)105,
 
 #ifdef __CHECKER__
        /* sparse says it cannot increment the previous enum member */
@@ -514,34 +516,6 @@ enum iwl_fw_phy_cfg {
        FW_PHY_CFG_SHARED_CLK = BIT(31),
 };
 
-#define IWL_UCODE_MAX_CS               1
-
-/**
- * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
- * @cipher: a cipher suite selector
- * @flags: cipher scheme flags (currently reserved for a future use)
- * @hdr_len: a size of MPDU security header
- * @pn_len: a size of PN
- * @pn_off: an offset of pn from the beginning of the security header
- * @key_idx_off: an offset of key index byte in the security header
- * @key_idx_mask: a bit mask of key_idx bits
- * @key_idx_shift: bit shift needed to get key_idx
- * @mic_len: mic length in bytes
- * @hw_cipher: a HW cipher index used in host commands
- */
-struct iwl_fw_cipher_scheme {
-       __le32 cipher;
-       u8 flags;
-       u8 hdr_len;
-       u8 pn_len;
-       u8 pn_off;
-       u8 key_idx_off;
-       u8 key_idx_mask;
-       u8 key_idx_shift;
-       u8 mic_len;
-       u8 hw_cipher;
-} __packed;
-
 enum iwl_fw_dbg_reg_operator {
        CSR_ASSIGN,
        CSR_SETBIT,
index 530674a..b7deca0 100644 (file)
@@ -2,13 +2,16 @@
 /*
  * Copyright(c) 2019 - 2021 Intel Corporation
  */
-
+#include <fw/api/commands.h>
 #include "img.h"
 
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def)
 {
        const struct iwl_fw_cmd_version *entry;
        unsigned int i;
+       /* prior to LONG_GROUP, we never used this CMD version API */
+       u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP;
+       u8 cmd = iwl_cmd_opcode(cmd_id);
 
        if (!fw->ucode_capa.cmd_versions ||
            !fw->ucode_capa.n_cmd_versions)
index fa7b178..f878ac5 100644 (file)
@@ -133,16 +133,6 @@ struct iwl_fw_paging {
        u32 fw_offs;
 };
 
-/**
- * struct iwl_fw_cscheme_list - a cipher scheme list
- * @size: a number of entries
- * @cs: cipher scheme entries
- */
-struct iwl_fw_cscheme_list {
-       u8 size;
-       struct iwl_fw_cipher_scheme cs[];
-} __packed;
-
 /**
  * enum iwl_fw_type - iwlwifi firmware type
  * @IWL_FW_DVM: DVM firmware
@@ -197,7 +187,6 @@ struct iwl_dump_exclude {
  * @inst_evtlog_size: event log size for runtime ucode.
  * @inst_errlog_ptr: error log offfset for runtime ucode.
  * @type: firmware type (&enum iwl_fw_type)
- * @cipher_scheme: optional external cipher scheme.
  * @human_readable: human readable version
  *     we get the ALIVE from the uCode
  * @phy_integration_ver: PHY integration version string
@@ -228,7 +217,6 @@ struct iwl_fw {
 
        enum iwl_fw_type type;
 
-       struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
        u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
 
        struct iwl_fw_dbg dbg;
@@ -275,7 +263,7 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
        return &fw->img[ucode_type];
 }
 
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def);
 
 u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
 const char *iwl_fw_lookup_assert_desc(u32 num);
index 139ece8..135bd48 100644 (file)
@@ -58,7 +58,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
 {
        struct iwl_soc_configuration_cmd cmd = {};
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0),
+               .id = WIDE_ID(SYSTEM_GROUP, SOC_CONFIGURATION_CMD),
                .data[0] = &cmd,
                .len[0] = sizeof(cmd),
        };
@@ -87,8 +87,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
                cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
                                              SOC_FLAGS_LTR_APPLY_DELAY_MASK);
 
-       if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
-                                 SCAN_REQ_UMAC,
+       if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
                                  IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
            fwrt->trans->trans_cfg->low_latency_xtal)
                cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
index 58ca384..945bc41 100644 (file)
@@ -197,7 +197,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
                }
 
                memcpy(page_address(block->fw_paging_block),
-                      image->sec[sec_idx].data + offset, len);
+                      (const u8 *)image->sec[sec_idx].data + offset, len);
                block->fw_offs = image->sec[sec_idx].offset + offset;
                dma_sync_single_for_device(fwrt->trans->dev,
                                           block->fw_paging_phys,
@@ -243,7 +243,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
                .block_num = cpu_to_le32(fwrt->num_of_paging_blk),
        };
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, FW_PAGING_BLOCK_CMD),
                .len = { sizeof(paging_cmd), },
                .data = { &paging_cmd, },
        };
index 7d4aa39..b6d3ac6 100644 (file)
@@ -33,7 +33,7 @@ static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait,
 static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                                   size_t len)
 {
-       struct iwl_ucode_tlv *tlv;
+       const struct iwl_ucode_tlv *tlv;
        u32 sha1 = 0;
        u16 mac_type = 0, rf_id = 0;
        u8 *pnvm_data = NULL, *tmp;
@@ -47,7 +47,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                u32 tlv_len, tlv_type;
 
                len -= sizeof(*tlv);
-               tlv = (void *)data;
+               tlv = (const void *)data;
 
                tlv_len = le32_to_cpu(tlv->length);
                tlv_type = le32_to_cpu(tlv->type);
@@ -70,7 +70,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                                break;
                        }
 
-                       sha1 = le32_to_cpup((__le32 *)data);
+                       sha1 = le32_to_cpup((const __le32 *)data);
 
                        IWL_DEBUG_FW(trans,
                                     "Got IWL_UCODE_TLV_PNVM_VERSION %0x\n",
@@ -87,8 +87,8 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                        if (hw_match)
                                break;
 
-                       mac_type = le16_to_cpup((__le16 *)data);
-                       rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
+                       mac_type = le16_to_cpup((const __le16 *)data);
+                       rf_id = le16_to_cpup((const __le16 *)(data + sizeof(__le16)));
 
                        IWL_DEBUG_FW(trans,
                                     "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
@@ -99,7 +99,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                                hw_match = true;
                        break;
                case IWL_UCODE_TLV_SEC_RT: {
-                       struct iwl_pnvm_section *section = (void *)data;
+                       const struct iwl_pnvm_section *section = (const void *)data;
                        u32 data_len = tlv_len - sizeof(*section);
 
                        IWL_DEBUG_FW(trans,
@@ -107,7 +107,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                                     tlv_len);
 
                        /* TODO: remove, this is a deprecated separator */
-                       if (le32_to_cpup((__le32 *)data) == 0xddddeeee) {
+                       if (le32_to_cpup((const __le32 *)data) == 0xddddeeee) {
                                IWL_DEBUG_FW(trans, "Ignoring separator.\n");
                                break;
                        }
@@ -173,7 +173,7 @@ out:
 static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
                          size_t len)
 {
-       struct iwl_ucode_tlv *tlv;
+       const struct iwl_ucode_tlv *tlv;
 
        IWL_DEBUG_FW(trans, "Parsing PNVM file\n");
 
@@ -181,7 +181,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
                u32 tlv_len, tlv_type;
 
                len -= sizeof(*tlv);
-               tlv = (void *)data;
+               tlv = (const void *)data;
 
                tlv_len = le32_to_cpu(tlv->length);
                tlv_type = le32_to_cpu(tlv->type);
@@ -193,8 +193,8 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
                }
 
                if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
-                       struct iwl_sku_id *sku_id =
-                               (void *)(data + sizeof(*tlv));
+                       const struct iwl_sku_id *sku_id =
+                               (const void *)(data + sizeof(*tlv));
 
                        IWL_DEBUG_FW(trans,
                                     "Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
index 3cb0ddb..d3cb1ae 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
  * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  */
 #ifndef __iwl_fw_runtime_h__
 #define __iwl_fw_runtime_h__
@@ -16,7 +16,7 @@
 #include "fw/acpi.h"
 
 struct iwl_fw_runtime_ops {
-       int (*dump_start)(void *ctx);
+       void (*dump_start)(void *ctx);
        void (*dump_end)(void *ctx);
        bool (*fw_running)(void *ctx);
        int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
@@ -163,6 +163,7 @@ struct iwl_fw_runtime {
        u32 ppag_ver;
        struct iwl_sar_offset_mapping_cmd sgom_table;
        bool sgom_enabled;
+       u8 reduced_power_flags;
 #endif
 };
 
index f2f1789..3f12720 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -89,7 +89,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
 
        if (fw_has_capa(&fwrt->fw->ucode_capa,
                        IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
-               cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+               cmd.id = WIDE_ID(SYSTEM_GROUP, SHARED_MEM_CFG_CMD);
        else
                cmd.id = SHARED_MEM_CFG;
 
index bd82c24..23b1d68 100644 (file)
@@ -69,7 +69,7 @@ out:
 static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
                                           const u8 *data, size_t len)
 {
-       struct iwl_ucode_tlv *tlv;
+       const struct iwl_ucode_tlv *tlv;
        u8 *reduce_power_data = NULL, *tmp;
        u32 size = 0;
 
@@ -79,7 +79,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
                u32 tlv_len, tlv_type;
 
                len -= sizeof(*tlv);
-               tlv = (void *)data;
+               tlv = (const void *)data;
 
                tlv_len = le32_to_cpu(tlv->length);
                tlv_type = le32_to_cpu(tlv->type);
@@ -154,7 +154,7 @@ out:
 static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
                                         const u8 *data, size_t len)
 {
-       struct iwl_ucode_tlv *tlv;
+       const struct iwl_ucode_tlv *tlv;
        void *sec_data;
 
        IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
@@ -163,7 +163,7 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
                u32 tlv_len, tlv_type;
 
                len -= sizeof(*tlv);
-               tlv = (void *)data;
+               tlv = (const void *)data;
 
                tlv_len = le32_to_cpu(tlv->length);
                tlv_type = le32_to_cpu(tlv->type);
@@ -175,8 +175,8 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
                }
 
                if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
-                       struct iwl_sku_id *sku_id =
-                               (void *)(data + sizeof(*tlv));
+                       const struct iwl_sku_id *sku_id =
+                               (const void *)(data + sizeof(*tlv));
 
                        IWL_DEBUG_FW(trans,
                                     "Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
index e122b8b..f5b556a 100644 (file)
@@ -260,6 +260,7 @@ enum iwl_cfg_trans_ltr_delay {
  * @integrated: discrete or integrated
  * @low_latency_xtal: use the low latency xtal if supported
  * @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
+ * @imr_enabled: use the IMR if supported.
  */
 struct iwl_cfg_trans_params {
        const struct iwl_base_params *base_params;
@@ -274,7 +275,8 @@ struct iwl_cfg_trans_params {
            integrated:1,
            low_latency_xtal:1,
            bisr_workaround:1,
-           ltr_delay:2;
+           ltr_delay:2,
+           imr_enabled:1;
 };
 
 /**
@@ -343,8 +345,8 @@ struct iwl_fw_mon_regs {
  * @bisr_workaround: BISR hardware workaround (for 22260 series devices)
  * @min_txq_size: minimum number of slots required in a TX queue
  * @uhb_supported: ultra high band channels supported
- * @min_256_ba_txq_size: minimum number of slots required in a TX queue which
- *     supports 256 BA aggregation
+ * @min_ba_txq_size: minimum number of slots required in a TX queue which
+ *     based on hardware support (HE - 256, EHT - 1K).
  * @num_rbds: number of receive buffer descriptors to use
  *     (only used for multi-queue capable devices)
  * @mac_addr_csr_base: CSR base register for MAC address access, if not set
@@ -405,9 +407,10 @@ struct iwl_cfg {
        u32 d3_debug_data_length;
        u32 min_txq_size;
        u32 gp2_reg_addr;
-       u32 min_256_ba_txq_size;
+       u32 min_ba_txq_size;
        const struct iwl_fw_mon_regs mon_dram_regs;
        const struct iwl_fw_mon_regs mon_smem_regs;
+       const struct iwl_fw_mon_regs mon_dbgi_regs;
 };
 
 #define IWL_CFG_ANY (~0)
@@ -433,6 +436,7 @@ struct iwl_cfg {
 #define IWL_CFG_RF_TYPE_HR1            0x10C
 #define IWL_CFG_RF_TYPE_GF             0x10D
 #define IWL_CFG_RF_TYPE_MR             0x110
+#define IWL_CFG_RF_TYPE_MS             0x111
 #define IWL_CFG_RF_TYPE_FM             0x112
 
 #define IWL_CFG_RF_ID_TH               0x1
@@ -489,6 +493,7 @@ extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
 extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg;
 extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
 extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg;
 extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
 extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
 extern const char iwl9162_name[];
@@ -509,6 +514,7 @@ extern const char iwl9560_killer_1550i_name[];
 extern const char iwl9560_killer_1550s_name[];
 extern const char iwl_ax200_name[];
 extern const char iwl_ax203_name[];
+extern const char iwl_ax204_name[];
 extern const char iwl_ax201_name[];
 extern const char iwl_ax101_name[];
 extern const char iwl_ax200_killer_1650w_name[];
@@ -631,9 +637,12 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
 extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0;
 extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
+extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0;
 extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
 extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0;
 extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;
index 5adf485..b848840 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2018, 2020-2021 Intel Corporation
+ * Copyright (C) 2018, 2020-2022 Intel Corporation
  */
 #ifndef __iwl_context_info_file_gen3_h__
 #define __iwl_context_info_file_gen3_h__
@@ -34,6 +34,7 @@ enum iwl_prph_scratch_mtr_format {
 
 /**
  * enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug
  * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
  * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
  *     in hwm config.
@@ -55,6 +56,7 @@ enum iwl_prph_scratch_mtr_format {
  * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
  */
 enum iwl_prph_scratch_flags {
+       IWL_PRPH_SCRATCH_IMR_DEBUG_EN           = BIT(1),
        IWL_PRPH_SCRATCH_EARLY_DEBUG_EN         = BIT(4),
        IWL_PRPH_SCRATCH_EDBG_DEST_DRAM         = BIT(8),
        IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL     = BIT(9),
index 8e10ba8..3e1f011 100644 (file)
@@ -534,6 +534,9 @@ enum {
  * 11-8:  queue selector
  */
 #define HBUS_TARG_WRPTR         (HBUS_BASE+0x060)
+/* This register is common for Tx and Rx, Rx queues start from 512 */
+#define HBUS_TARG_WRPTR_Q_SHIFT (16)
+#define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT)
 
 /**********************************************************
  * CSR values
index c73672d..866a33f 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  */
 #include <linux/firmware.h>
 #include "iwl-drv.h"
@@ -74,7 +74,8 @@ static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
        if (!node)
                return -ENOMEM;
 
-       memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
+       memcpy(&node->tlv, tlv, sizeof(node->tlv));
+       memcpy(node->tlv.data, tlv->data, len);
        list_add_tail(&node->list, list);
 
        return 0;
@@ -181,11 +182,11 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
        u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
 
        /*
-        * The higher part of the ID in from version 2 is irrelevant for
-        * us, so mask it out.
+        * The higher part of the ID from version 2 is debug policy.
+        * The id will be only lsb 16 bits, so mask it out.
         */
        if (le32_to_cpu(reg->hdr.version) >= 2)
-               id &= IWL_FW_INI_REGION_V2_MASK;
+               id &= IWL_FW_INI_REGION_ID_MASK;
 
        if (le32_to_cpu(tlv->length) < sizeof(*reg))
                return -EINVAL;
@@ -211,6 +212,14 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
                return -EOPNOTSUPP;
        }
 
+       if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
+               trans->dbg.imr_data.sram_addr =
+                       le32_to_cpu(reg->internal_buffer.base_addr);
+               trans->dbg.imr_data.sram_size =
+                       le32_to_cpu(reg->internal_buffer.size);
+       }
+
+
        active_reg = &trans->dbg.active_regions[id];
        if (*active_reg) {
                IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
@@ -271,7 +280,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
 static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
                                  const struct iwl_ucode_tlv *tlv)
 {
-       struct iwl_fw_ini_conf_set_tlv *conf_set = (void *)tlv->data;
+       const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
        u32 tp = le32_to_cpu(conf_set->time_point);
        u32 type = le32_to_cpu(conf_set->set_type);
 
@@ -460,7 +469,7 @@ static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
 
        while (len >= sizeof(*tlv)) {
                len -= sizeof(*tlv);
-               tlv = (void *)data;
+               tlv = (const void *)data;
 
                tlv_len = le32_to_cpu(tlv->length);
 
@@ -577,8 +586,7 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
                return 0;
 
        num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
-       if (!fw_has_capa(&fwrt->fw->ucode_capa,
-                        IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
+       if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
                if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
                        return -EIO;
                num_frags = 1;
@@ -762,33 +770,40 @@ static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
 
 static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
 {
-       int ret, i, dram_alloc = 0;
-       struct iwl_dram_info dram_info;
+       int ret, i;
+       bool dram_alloc = false;
        struct iwl_dram_data *frags =
                &fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
+       struct iwl_dram_info *dram_info;
+
+       if (!frags || !frags->block)
+               return;
+
+       dram_info = frags->block;
 
        if (!fw_has_capa(&fwrt->fw->ucode_capa,
                         IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
                return;
 
-       dram_info.first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
-       dram_info.second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
+       dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
+       dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
 
        for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
             i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) {
-               ret = iwl_dbg_tlv_update_dram(fwrt, i, &dram_info);
+               ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
                if (!ret)
-                       dram_alloc++;
+                       dram_alloc = true;
                else
                        IWL_WARN(fwrt,
                                 "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
                                 i, ret);
        }
-       if (dram_alloc) {
-               memcpy(frags->block, &dram_info, sizeof(dram_info));
-               IWL_DEBUG_FW(fwrt, "block data after  %016x\n",
-                            *((int *)fwrt->trans->dbg.fw_mon_ini[1].frags[0].block));
-       }
+
+       if (dram_alloc)
+               IWL_DEBUG_FW(fwrt, "block data after  %08x\n",
+                            dram_info->first_word);
+       else
+               memset(frags->block, 0, sizeof(*dram_info));
 }
 
 static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
@@ -811,11 +826,11 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
 }
 
 static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
-                                    struct list_head *config_list)
+                                    struct list_head *conf_list)
 {
        struct iwl_dbg_tlv_node *node;
 
-       list_for_each_entry(node, config_list, list) {
+       list_for_each_entry(node, conf_list, list) {
                struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
                u32 count, address, value;
                u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
@@ -861,11 +876,18 @@ static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
                case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
                        struct iwl_dbgc1_info dram_info = {};
                        struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
-                       __le64 dram_base_addr = cpu_to_le64(frags->physical);
-                       __le32 dram_size = cpu_to_le32(frags->size);
-                       u64  dram_addr = le64_to_cpu(dram_base_addr);
+                       __le64 dram_base_addr;
+                       __le32 dram_size;
+                       u64 dram_addr;
                        u32 ret;
 
+                       if (!frags)
+                               break;
+
+                       dram_base_addr = cpu_to_le64(frags->physical);
+                       dram_size = cpu_to_le32(frags->size);
+                       dram_addr = le64_to_cpu(dram_base_addr);
+
                        IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
                                     dram_base_addr, dram_size);
                        IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
index 7928770..128059c 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  */
 #ifndef __iwl_dbg_tlv_h__
 #define __iwl_dbg_tlv_h__
@@ -10,6 +10,8 @@
 #include <fw/file.h>
 #include <fw/api/dbg-tlv.h>
 
+#define IWL_DBG_TLV_MAX_PRESET 15
+
 /**
  * struct iwl_dbg_tlv_node - debug TLV node
  * @list: list of &struct iwl_dbg_tlv_node
index 6651e78..a2203f6 100644 (file)
@@ -243,14 +243,14 @@ struct iwl_firmware_pieces {
 
        /* FW debug data parsed for driver usage */
        bool dbg_dest_tlv_init;
-       u8 *dbg_dest_ver;
+       const u8 *dbg_dest_ver;
        union {
-               struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
-               struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+               const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+               const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
        };
-       struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+       const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
        size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
-       struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+       const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
        size_t n_mem_tlv;
@@ -324,29 +324,6 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
        pieces->img[type].sec[sec].offset = offset;
 }
 
-static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
-{
-       int i, j;
-       struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
-       struct iwl_fw_cipher_scheme *fwcs;
-
-       if (len < sizeof(*l) ||
-           len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
-               return -EINVAL;
-
-       for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
-               fwcs = &l->cs[j];
-
-               /* we skip schemes with zero cipher suite selector */
-               if (!fwcs->cipher)
-                       continue;
-
-               fw->cs[j++] = *fwcs;
-       }
-
-       return 0;
-}
-
 /*
  * Gets uCode section from tlv.
  */
@@ -356,13 +333,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
 {
        struct fw_img_parsing *img;
        struct fw_sec *sec;
-       struct fw_sec_parsing *sec_parse;
+       const struct fw_sec_parsing *sec_parse;
        size_t alloc_size;
 
        if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
                return -1;
 
-       sec_parse = (struct fw_sec_parsing *)data;
+       sec_parse = (const struct fw_sec_parsing *)data;
 
        img = &pieces->img[type];
 
@@ -385,8 +362,8 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
 
 static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
 {
-       struct iwl_tlv_calib_data *def_calib =
-                                       (struct iwl_tlv_calib_data *)data;
+       const struct iwl_tlv_calib_data *def_calib =
+                                       (const struct iwl_tlv_calib_data *)data;
        u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
        if (ucode_type >= IWL_UCODE_TYPE_MAX) {
                IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
@@ -404,7 +381,7 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
 static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
                                    struct iwl_ucode_capabilities *capa)
 {
-       const struct iwl_ucode_api *ucode_api = (void *)data;
+       const struct iwl_ucode_api *ucode_api = (const void *)data;
        u32 api_index = le32_to_cpu(ucode_api->api_index);
        u32 api_flags = le32_to_cpu(ucode_api->api_flags);
        int i;
@@ -425,7 +402,7 @@ static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
 static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
                                       struct iwl_ucode_capabilities *capa)
 {
-       const struct iwl_ucode_capa *ucode_capa = (void *)data;
+       const struct iwl_ucode_capa *ucode_capa = (const void *)data;
        u32 api_index = le32_to_cpu(ucode_capa->api_index);
        u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
        int i;
@@ -457,7 +434,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
                                    const struct firmware *ucode_raw,
                                    struct iwl_firmware_pieces *pieces)
 {
-       struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+       const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data;
        u32 api_ver, hdr_size, build;
        char buildstr[25];
        const u8 *src;
@@ -600,7 +577,7 @@ static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
                     sizeof(region->special_mem))
                return;
 
-       region = (void *)tlv->data;
+       region = (const void *)tlv->data;
        addr = le32_to_cpu(region->special_mem.base_addr);
        addr += le32_to_cpu(region->special_mem.offset);
        addr &= ~FW_ADDR_CACHE_CONTROL;
@@ -655,7 +632,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                struct iwl_ucode_capabilities *capa,
                                bool *usniffer_images)
 {
-       struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
+       const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data;
        const struct iwl_ucode_tlv *tlv;
        size_t len = ucode_raw->size;
        const u8 *data;
@@ -704,8 +681,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 
        while (len >= sizeof(*tlv)) {
                len -= sizeof(*tlv);
-               tlv = (void *)data;
 
+               tlv = (const void *)data;
                tlv_len = le32_to_cpu(tlv->length);
                tlv_type = le32_to_cpu(tlv->type);
                tlv_data = tlv->data;
@@ -762,7 +739,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        capa->max_probe_length =
-                                       le32_to_cpup((__le32 *)tlv_data);
+                                       le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_PAN:
                        if (tlv_len)
@@ -783,7 +760,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                         * will not work with the new firmware, or
                         * it'll not take advantage of new features.
                         */
-                       capa->flags = le32_to_cpup((__le32 *)tlv_data);
+                       capa->flags = le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_API_CHANGES_SET:
                        if (tlv_len != sizeof(struct iwl_ucode_api))
@@ -799,37 +776,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        pieces->init_evtlog_ptr =
-                                       le32_to_cpup((__le32 *)tlv_data);
+                                       le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        pieces->init_evtlog_size =
-                                       le32_to_cpup((__le32 *)tlv_data);
+                                       le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        pieces->init_errlog_ptr =
-                                       le32_to_cpup((__le32 *)tlv_data);
+                                       le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        pieces->inst_evtlog_ptr =
-                                       le32_to_cpup((__le32 *)tlv_data);
+                                       le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        pieces->inst_evtlog_size =
-                                       le32_to_cpup((__le32 *)tlv_data);
+                                       le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        pieces->inst_errlog_ptr =
-                                       le32_to_cpup((__le32 *)tlv_data);
+                                       le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
                        if (tlv_len)
@@ -858,7 +835,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        capa->standard_phy_calibration_size =
-                                       le32_to_cpup((__le32 *)tlv_data);
+                                       le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_SEC_RT:
                        iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
@@ -884,7 +861,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                case IWL_UCODE_TLV_PHY_SKU:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
-                       drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
+                       drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data);
                        drv->fw.valid_tx_ant = (drv->fw.phy_config &
                                                FW_PHY_CFG_TX_CHAIN) >>
                                                FW_PHY_CFG_TX_CHAIN_POS;
@@ -911,7 +888,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        num_of_cpus =
-                               le32_to_cpup((__le32 *)tlv_data);
+                               le32_to_cpup((const __le32 *)tlv_data);
 
                        if (num_of_cpus == 2) {
                                drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
@@ -925,18 +902,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                return -EINVAL;
                        }
                        break;
-               case IWL_UCODE_TLV_CSCHEME:
-                       if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
-                               goto invalid_tlv_len;
-                       break;
                case IWL_UCODE_TLV_N_SCAN_CHANNELS:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        capa->n_scan_channels =
-                               le32_to_cpup((__le32 *)tlv_data);
+                               le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_FW_VERSION: {
-                       __le32 *ptr = (void *)tlv_data;
+                       const __le32 *ptr = (const void *)tlv_data;
                        u32 major, minor;
                        u8 local_comp;
 
@@ -960,15 +933,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        break;
                        }
                case IWL_UCODE_TLV_FW_DBG_DEST: {
-                       struct iwl_fw_dbg_dest_tlv *dest = NULL;
-                       struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+                       const struct iwl_fw_dbg_dest_tlv *dest = NULL;
+                       const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
                        u8 mon_mode;
 
-                       pieces->dbg_dest_ver = (u8 *)tlv_data;
+                       pieces->dbg_dest_ver = (const u8 *)tlv_data;
                        if (*pieces->dbg_dest_ver == 1) {
-                               dest = (void *)tlv_data;
+                               dest = (const void *)tlv_data;
                        } else if (*pieces->dbg_dest_ver == 0) {
-                               dest_v1 = (void *)tlv_data;
+                               dest_v1 = (const void *)tlv_data;
                        } else {
                                IWL_ERR(drv,
                                        "The version is %d, and it is invalid\n",
@@ -1009,7 +982,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        break;
                        }
                case IWL_UCODE_TLV_FW_DBG_CONF: {
-                       struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
+                       const struct iwl_fw_dbg_conf_tlv *conf =
+                               (const void *)tlv_data;
 
                        if (!pieces->dbg_dest_tlv_init) {
                                IWL_ERR(drv,
@@ -1043,8 +1017,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        break;
                        }
                case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
-                       struct iwl_fw_dbg_trigger_tlv *trigger =
-                               (void *)tlv_data;
+                       const struct iwl_fw_dbg_trigger_tlv *trigger =
+                               (const void *)tlv_data;
                        u32 trigger_id = le32_to_cpu(trigger->id);
 
                        if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
@@ -1075,7 +1049,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        }
 
                        drv->fw.dbg.dump_mask =
-                               le32_to_cpup((__le32 *)tlv_data);
+                               le32_to_cpup((const __le32 *)tlv_data);
                        break;
                        }
                case IWL_UCODE_TLV_SEC_RT_USNIFFER:
@@ -1087,7 +1061,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                case IWL_UCODE_TLV_PAGING:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
-                       paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+                       paging_mem_size = le32_to_cpup((const __le32 *)tlv_data);
 
                        IWL_DEBUG_FW(drv,
                                     "Paging: paging enabled (size = %u bytes)\n",
@@ -1117,8 +1091,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        /* ignored */
                        break;
                case IWL_UCODE_TLV_FW_MEM_SEG: {
-                       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
-                               (void *)tlv_data;
+                       const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
+                               (const void *)tlv_data;
                        size_t size;
                        struct iwl_fw_dbg_mem_seg_tlv *n;
 
@@ -1146,10 +1120,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        break;
                        }
                case IWL_UCODE_TLV_FW_RECOVERY_INFO: {
-                       struct {
+                       const struct {
                                __le32 buf_addr;
                                __le32 buf_size;
-                       } *recov_info = (void *)tlv_data;
+                       } *recov_info = (const void *)tlv_data;
 
                        if (tlv_len != sizeof(*recov_info))
                                goto invalid_tlv_len;
@@ -1160,10 +1134,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        }
                        break;
                case IWL_UCODE_TLV_FW_FSEQ_VERSION: {
-                       struct {
+                       const struct {
                                u8 version[32];
                                u8 sha1[20];
-                       } *fseq_ver = (void *)tlv_data;
+                       } *fseq_ver = (const void *)tlv_data;
 
                        if (tlv_len != sizeof(*fseq_ver))
                                goto invalid_tlv_len;
@@ -1174,19 +1148,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                case IWL_UCODE_TLV_FW_NUM_STATIONS:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
-                       if (le32_to_cpup((__le32 *)tlv_data) >
+                       if (le32_to_cpup((const __le32 *)tlv_data) >
                            IWL_MVM_STATION_COUNT_MAX) {
                                IWL_ERR(drv,
                                        "%d is an invalid number of station\n",
-                                       le32_to_cpup((__le32 *)tlv_data));
+                                       le32_to_cpup((const __le32 *)tlv_data));
                                goto tlv_error;
                        }
                        capa->num_stations =
-                               le32_to_cpup((__le32 *)tlv_data);
+                               le32_to_cpup((const __le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
-                       struct iwl_umac_debug_addrs *dbg_ptrs =
-                               (void *)tlv_data;
+                       const struct iwl_umac_debug_addrs *dbg_ptrs =
+                               (const void *)tlv_data;
 
                        if (tlv_len != sizeof(*dbg_ptrs))
                                goto invalid_tlv_len;
@@ -1201,8 +1175,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        break;
                        }
                case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: {
-                       struct iwl_lmac_debug_addrs *dbg_ptrs =
-                               (void *)tlv_data;
+                       const struct iwl_lmac_debug_addrs *dbg_ptrs =
+                               (const void *)tlv_data;
 
                        if (tlv_len != sizeof(*dbg_ptrs))
                                goto invalid_tlv_len;
@@ -1277,7 +1251,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 
        if (len) {
                IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
-               iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
+               iwl_print_hex_dump(drv, IWL_DL_FW, data, len);
                return -EINVAL;
        }
 
@@ -1418,7 +1392,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
 {
        struct iwl_drv *drv = context;
        struct iwl_fw *fw = &drv->fw;
-       struct iwl_ucode_header *ucode;
+       const struct iwl_ucode_header *ucode;
        struct iwlwifi_opmode_table *op;
        int err;
        struct iwl_firmware_pieces *pieces;
@@ -1456,7 +1430,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        }
 
        /* Data from ucode file:  header followed by uCode images */
-       ucode = (struct iwl_ucode_header *)ucode_raw->data;
+       ucode = (const struct iwl_ucode_header *)ucode_raw->data;
 
        if (ucode->ver)
                err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
@@ -1645,6 +1619,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        /* We have our copies now, allow OS release its copies */
        release_firmware(ucode_raw);
 
+       iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
+
        mutex_lock(&iwlwifi_opmode_table_mtx);
        switch (fw->type) {
        case IWL_FW_DVM:
@@ -1661,8 +1637,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
                 drv->fw.fw_version, op->name);
 
-       iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
-
        /* add this device to the list of devices using this op_mode */
        list_add_tail(&drv->list, &op->drv);
 
@@ -1796,6 +1770,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
        kfree(drv);
 }
 
+#define ENABLE_INI     (IWL_DBG_TLV_MAX_PRESET + 1)
 
 /* shared module parameters */
 struct iwl_mod_params iwlwifi_mod_params = {
@@ -1803,7 +1778,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
        .bt_coex_active = true,
        .power_level = IWL_POWER_INDEX_1,
        .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
-       .enable_ini = true,
+       .enable_ini = ENABLE_INI,
        /* the rest are 0 by default */
 };
 IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1915,10 +1890,42 @@ MODULE_PARM_DESC(nvm_file, "NVM file name");
 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
 MODULE_PARM_DESC(uapsd_disable,
                 "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
-module_param_named(enable_ini, iwlwifi_mod_params.enable_ini,
-                  bool, S_IRUGO | S_IWUSR);
+
+static int enable_ini_set(const char *arg, const struct kernel_param *kp)
+{
+       int ret = 0;
+       bool res;
+       __u32 new_enable_ini;
+
+       /* in case the argument type is a number */
+       ret = kstrtou32(arg, 0, &new_enable_ini);
+       if (!ret) {
+               if (new_enable_ini > ENABLE_INI) {
+                       pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
+                       return -EINVAL;
+               }
+               goto out;
+       }
+
+       /* in case the argument type is boolean */
+       ret = kstrtobool(arg, &res);
+       if (ret)
+               return ret;
+       new_enable_ini = (res ? ENABLE_INI : 0);
+
+out:
+       iwlwifi_mod_params.enable_ini = new_enable_ini;
+       return 0;
+}
+
+static const struct kernel_param_ops enable_ini_ops = {
+       .set = enable_ini_set
+};
+
+module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
 MODULE_PARM_DESC(enable_ini,
-                "Enable debug INI TLV FW debug infrastructure (default: true");
+                "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
+                "Debug INI TLV FW debug infrastructure (default: 16)");
 
 /*
  * set bt_coex_active to true, uCode will do kill/defer
index 0fd009e..80073f9 100644 (file)
@@ -84,7 +84,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
  * everything is built-in, then we can avoid that.
  */
 #ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
-#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
+#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, IWLWIFI)
 #else
 #define IWL_EXPORT_SYMBOL(sym)
 #endif
index b9e86bf..5f386bb 100644 (file)
  */
 #define IWL_EEPROM_ACCESS_TIMEOUT      5000 /* uSec */
 
-#define IWL_EEPROM_SEM_TIMEOUT         10   /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT     1000 /* number of attempts (not time) */
-
-
 /*
  * The device's EEPROM semaphore prevents conflicts between driver and uCode
  * when accessing the EEPROM; each access is a series of pulses to/from the
  * EEPROM chip, not a single event, so even reads could conflict if they
  * weren't arbitrated by the semaphore.
  */
+#define IWL_EEPROM_SEM_TIMEOUT         10   /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT     1000 /* number of attempts (not time) */
 
-#define        EEPROM_SEM_TIMEOUT 10           /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000    /* number of attempts (not time) */
 
 static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
 {
        u16 count;
        int ret;
 
-       for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+       for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
                /* Request semaphore */
                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                            CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
@@ -51,7 +47,7 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
                ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
                                CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
                                CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
-                               EEPROM_SEM_TIMEOUT);
+                               IWL_EEPROM_SEM_TIMEOUT);
                if (ret >= 0) {
                        IWL_DEBUG_EEPROM(trans->dev,
                                         "Acquired semaphore after %d tries.\n",
index e6fd494..bedd78a 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
 #ifndef __iwl_fh_h__
@@ -590,11 +590,31 @@ struct iwl_rb_status {
 #define TFD_QUEUE_CB_SIZE(x)   (ilog2(x) - 3)
 #define TFD_QUEUE_SIZE_BC_DUP  (64)
 #define TFD_QUEUE_BC_SIZE      (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define TFD_QUEUE_BC_SIZE_GEN3 1024
+#define TFD_QUEUE_BC_SIZE_GEN3_AX210   1024
+#define TFD_QUEUE_BC_SIZE_GEN3_BZ      (1024 * 4)
 #define IWL_TX_DMA_MASK        DMA_BIT_MASK(36)
 #define IWL_NUM_OF_TBS         20
 #define IWL_TFH_NUM_TBS                25
 
+/* IMR DMA registers */
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL           0x00a0a51c
+#define IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR      0x00a0a520
+#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB  0x00a0a524
+#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB  0x00a0a528
+#define IMR_TFH_SRV_DMA_CHNL0_BC             0x00a0a52c
+#define TFH_SRV_DMA_CHNL0_LEFT_BC           0x00a0a530
+
+/* RFH S2D DMA registers */
+#define IMR_RFH_GEN_CFG_SERVICE_DMA_RS_MSK     0x0000000c
+#define IMR_RFH_GEN_CFG_SERVICE_DMA_SNOOP_MSK  0x00000002
+
+/* TFH D2S DMA registers */
+#define IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK       0x80000000
+#define IMR_UREG_CHICK                                 0x00d05c00
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS  0x00800000
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK          0x00000030
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS      0x80000000
+
 static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
 {
        return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
@@ -707,14 +727,14 @@ struct iwlagn_scd_bc_tbl {
 } __packed;
 
 /**
- * struct iwl_gen3_bc_tbl scheduler byte count table gen3
+ * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
  * For AX210 and on:
  * @tfd_offset: 0-12 - tx command byte count
  *             12-13 - number of 64 byte chunks
  *             14-16 - reserved
  */
-struct iwl_gen3_bc_tbl {
-       __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+struct iwl_gen3_bc_tbl_entry {
+       __le16 tfd_offset;
 } __packed;
 
 #endif /* !__iwl_fh_h__ */
index 253eac4..396f2c9 100644 (file)
@@ -65,14 +65,14 @@ IWL_EXPORT_SYMBOL(iwl_poll_bit);
 
 u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
 {
-       u32 value = 0x5a5a5a5a;
-
        if (iwl_trans_grab_nic_access(trans)) {
-               value = iwl_read32(trans, reg);
+               u32 value = iwl_read32(trans, reg);
+
                iwl_trans_release_nic_access(trans);
+               return value;
        }
 
-       return value;
+       return 0x5a5a5a5a;
 }
 IWL_EXPORT_SYMBOL(iwl_read_direct32);
 
@@ -135,13 +135,15 @@ IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
 
 u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
 {
-       u32 val = 0x5a5a5a5a;
-
        if (iwl_trans_grab_nic_access(trans)) {
-               val = iwl_read_prph_no_grab(trans, ofs);
+               u32 val = iwl_read_prph_no_grab(trans, ofs);
+
                iwl_trans_release_nic_access(trans);
+
+               return val;
        }
-       return val;
+
+       return 0x5a5a5a5a;
 }
 IWL_EXPORT_SYMBOL(iwl_read_prph);
 
index 004ebda..d0b4d02 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
  */
 #ifndef __iwl_modparams_h__
 #define __iwl_modparams_h__
@@ -83,7 +83,8 @@ struct iwl_mod_params {
         */
        bool disable_11ax;
        bool remove_when_gone;
-       bool enable_ini;
+       u32 enable_ini;
+       bool disable_11be;
 };
 
 static inline bool iwl_enable_rx_ampdu(void)
index 04addf9..9040da3 100644 (file)
@@ -375,10 +375,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 
                if (v4)
                        ch_flags =
-                               __le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx);
+                               __le32_to_cpup((const __le32 *)nvm_ch_flags + ch_idx);
                else
                        ch_flags =
-                               __le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
+                               __le16_to_cpup((const __le16 *)nvm_ch_flags + ch_idx);
 
                if (band == NL80211_BAND_5GHZ &&
                    !data->sku_cap_band_52ghz_enable)
@@ -583,9 +583,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                                        IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
                                        IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
                                .phy_cap_info[3] =
-                                       IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
+                                       IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
                                        IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
-                                       IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
+                                       IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
                                        IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
                                .phy_cap_info[4] =
                                        IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
@@ -653,9 +653,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                                        IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
                                        IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US,
                                .phy_cap_info[3] =
-                                       IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
+                                       IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
                                        IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
-                                       IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
+                                       IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
                                        IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
                                .phy_cap_info[6] =
                                        IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
@@ -731,7 +731,7 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
        IWL_DEBUG_EEPROM(trans->dev, "he_6ghz_capa=0x%x\n", he_6ghz_capa);
 
        /* we know it's writable - we set it before ourselves */
-       iftype_data = (void *)sband->iftype_data;
+       iftype_data = (void *)(uintptr_t)sband->iftype_data;
        for (i = 0; i < sband->n_iftype_data; i++)
                iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
 }
@@ -783,6 +783,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
        switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
        case IWL_CFG_RF_TYPE_GF:
        case IWL_CFG_RF_TYPE_MR:
+       case IWL_CFG_RF_TYPE_MS:
                iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
                        IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
                if (!is_ap)
@@ -911,7 +912,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
        if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + SKU);
 
-       return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
+       return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000));
 }
 
 static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
@@ -919,8 +920,8 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
        if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + NVM_VERSION);
        else
-               return le32_to_cpup((__le32 *)(nvm_sw +
-                                              NVM_VERSION_EXT_NVM));
+               return le32_to_cpup((const __le32 *)(nvm_sw +
+                                                    NVM_VERSION_EXT_NVM));
 }
 
 static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
@@ -929,7 +930,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
        if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + RADIO_CFG);
 
-       return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
+       return le32_to_cpup((const __le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
 
 }
 
@@ -940,7 +941,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
        if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + N_HW_ADDRS);
 
-       n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
+       n_hw_addr = le32_to_cpup((const __le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
 
        return n_hw_addr & N_HW_ADDR_MASK;
 }
@@ -1079,7 +1080,9 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
                return -EINVAL;
        }
 
-       IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
+       if (!trans->csme_own)
+               IWL_INFO(trans, "base HW address: %pM, OTP minor version: 0x%x\n",
+                        data->hw_addr, iwl_read_prph(trans, REG_OTP_MINOR));
 
        return 0;
 }
@@ -1384,8 +1387,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                nvm_chan = iwl_nvm_channels;
        }
 
-       if (WARN_ON(num_of_ch > max_num_ch))
+       if (num_of_ch > max_num_ch) {
+               IWL_DEBUG_DEV(dev, IWL_DL_LAR,
+                             "Num of channels (%d) is greater than expected. Truncating to %d\n",
+                             num_of_ch, max_num_ch);
                num_of_ch = max_num_ch;
+       }
 
        if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
                return ERR_PTR(-EINVAL);
@@ -1591,7 +1598,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
        }
 
        eof = fw_entry->data + fw_entry->size;
-       dword_buff = (__le32 *)fw_entry->data;
+       dword_buff = (const __le32 *)fw_entry->data;
 
        /* some NVM file will contain a header.
         * The header is identified by 2 dwords header as follow:
@@ -1603,7 +1610,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
        if (fw_entry->size > NVM_HEADER_SIZE &&
            dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
            dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
-               file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
+               file_sec = (const void *)(fw_entry->data + NVM_HEADER_SIZE);
                IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
                IWL_INFO(trans, "NVM Manufacturing date %08X\n",
                         le32_to_cpu(dword_buff[3]));
@@ -1616,7 +1623,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
                        goto out;
                }
        } else {
-               file_sec = (void *)fw_entry->data;
+               file_sec = (const void *)fw_entry->data;
        }
 
        while (true) {
@@ -1684,7 +1691,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
                nvm_sections[section_id].length = section_size;
 
                /* advance to the next section */
-               file_sec = (void *)(file_sec->data + section_size);
+               file_sec = (const void *)(file_sec->data + section_size);
        }
 out:
        release_firmware(fw_entry);
index 5378315..0a93ac7 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2005-2014, 2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2020-2021 Intel Corporation
  * Copyright (C) 2016 Intel Deutschland GmbH
  */
 #include <linux/slab.h>
@@ -13,8 +13,6 @@
 #include "iwl-op-mode.h"
 #include "iwl-trans.h"
 
-#define CHANNEL_NUM_SIZE       4       /* num of channels in calib_ch size */
-
 struct iwl_phy_db_entry {
        u16     size;
        u8      *data;
index 95b3dae..a22788a 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016 Intel Deutschland GmbH
  */
 #define WFPM_GP2                       0xA030B4
 
 /* DBGI SRAM Register details */
-#define DBGI_SRAM_TARGET_ACCESS_CFG                    0x00A2E14C
-#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK  0x10000
 #define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB              0x00A2E154
 #define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB              0x00A2E158
+#define DBGI_SRAM_FIFO_POINTERS                                0x00A2E148
+#define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK             0x00000FFF
 
 enum {
        ENABLE_WFPM = BIT(31),
@@ -386,6 +386,11 @@ enum {
 #define UREG_LMAC1_CURRENT_PC          0xa05c1c
 #define UREG_LMAC2_CURRENT_PC          0xa05c20
 
+#define WFPM_LMAC1_PD_NOTIFICATION      0xa0338c
+#define WFPM_ARC1_PD_NOTIFICATION       0xa03044
+#define HPM_SECONDARY_DEVICE_STATE      0xa03404
+
+
 /* For UMAG_GEN_HW_STATUS reg check */
 enum {
        UMAG_GEN_HW_IS_FPGA = BIT(1),
@@ -491,4 +496,6 @@ enum {
 #define HBUS_TIMEOUT 0xA5A5A5A1
 #define WFPM_DPHY_OFF 0xDF10FF
 
+#define REG_OTP_MINOR 0xA0333C
+
 #endif                         /* __iwl_prph_h__ */
index 9236f91..b1af935 100644 (file)
@@ -78,8 +78,12 @@ int iwl_trans_init(struct iwl_trans *trans)
        if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
                return -EINVAL;
 
-       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
-               trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+               trans->txqs.bc_tbl_size =
+                       sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
+       else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+               trans->txqs.bc_tbl_size =
+                       sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
        else
                trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
        /*
@@ -203,10 +207,10 @@ IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
 static int iwl_hcmd_names_cmp(const void *key, const void *elt)
 {
        const struct iwl_hcmd_names *name = elt;
-       u8 cmd1 = *(u8 *)key;
+       const u8 *cmd1 = key;
        u8 cmd2 = name->cmd_id;
 
-       return (cmd1 - cmd2);
+       return (*cmd1 - cmd2);
 }
 
 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
index 1bcaa35..d659ccd 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -406,6 +406,9 @@ struct iwl_dump_sanitize_ops {
  * @cb_data_offs: offset inside skb->cb to store transport data at, must have
  *     space for at least two pointers
  * @fw_reset_handshake: firmware supports reset flow handshake
+ * @queue_alloc_cmd_ver: queue allocation command version, set to 0
+ *     for using the older SCD_QUEUE_CFG, set to the version of
+ *     SCD_QUEUE_CONFIG_CMD otherwise.
  */
 struct iwl_trans_config {
        struct iwl_op_mode *op_mode;
@@ -424,6 +427,7 @@ struct iwl_trans_config {
 
        u8 cb_data_offs;
        bool fw_reset_handshake;
+       u8 queue_alloc_cmd_ver;
 };
 
 struct iwl_trans_dump_data {
@@ -569,10 +573,9 @@ struct iwl_trans_ops {
        void (*txq_disable)(struct iwl_trans *trans, int queue,
                            bool configure_scd);
        /* 22000 functions */
-       int (*txq_alloc)(struct iwl_trans *trans,
-                        __le16 flags, u8 sta_id, u8 tid,
-                        int cmd_id, int size,
-                        unsigned int queue_wdg_timeout);
+       int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
+                        u32 sta_mask, u8 tid,
+                        int size, unsigned int queue_wdg_timeout);
        void (*txq_free)(struct iwl_trans *trans, int queue);
        int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
                            struct iwl_trans_rxq_dma_data *data);
@@ -615,6 +618,10 @@ struct iwl_trans_ops {
        int (*set_reduce_power)(struct iwl_trans *trans,
                                const void *data, u32 len);
        void (*interrupts)(struct iwl_trans *trans, bool enable);
+       int (*imr_dma_data)(struct iwl_trans *trans,
+                           u32 dst_addr, u64 src_addr,
+                           u32 byte_cnt);
+
 };
 
 /**
@@ -721,6 +728,26 @@ struct iwl_self_init_dram {
        int paging_cnt;
 };
 
+/**
+ * struct iwl_imr_data - imr dram data used during debug process
+ * @imr_enable: imr enable status received from fw
+ * @imr_size: imr dram size received from fw
+ * @sram_addr: sram address from debug tlv
+ * @sram_size: sram size from debug tlv
+ * @imr2sram_remainbyte`: size remained after each dma transfer
+ * @imr_curr_addr: current dst address used during dma transfer
+ * @imr_base_addr: imr address received from fw
+ */
+struct iwl_imr_data {
+       u32 imr_enable;
+       u32 imr_size;
+       u32 sram_addr;
+       u32 sram_size;
+       u32 imr2sram_remainbyte;
+       u64 imr_curr_addr;
+       __le64 imr_base_addr;
+};
+
 /**
  * struct iwl_trans_debug - transport debug related data
  *
@@ -785,6 +812,7 @@ struct iwl_trans_debug {
        u32 ucode_preset;
        bool restart_required;
        u32 last_tp_resetfw;
+       struct iwl_imr_data imr_data;
 };
 
 struct iwl_dma_ptr {
@@ -904,6 +932,7 @@ struct iwl_txq {
  * @queue_used - bit mask of used queues
  * @queue_stopped - bit mask of stopped queues
  * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
+ * @queue_alloc_cmd_ver: queue allocation command version
  */
 struct iwl_trans_txqs {
        unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
@@ -929,6 +958,8 @@ struct iwl_trans_txqs {
        } tfd;
 
        struct iwl_dma_ptr scd_bc_tbls;
+
+       u8 queue_alloc_cmd_ver;
 };
 
 /**
@@ -1220,9 +1251,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue)
 
 static inline int
 iwl_trans_txq_alloc(struct iwl_trans *trans,
-                   __le16 flags, u8 sta_id, u8 tid,
-                   int cmd_id, int size,
-                   unsigned int wdg_timeout)
+                   u32 flags, u32 sta_mask, u8 tid,
+                   int size, unsigned int wdg_timeout)
 {
        might_sleep();
 
@@ -1234,8 +1264,8 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
                return -EIO;
        }
 
-       return trans->ops->txq_alloc(trans, flags, sta_id, tid,
-                                    cmd_id, size, wdg_timeout);
+       return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
+                                    size, wdg_timeout);
 }
 
 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
@@ -1368,6 +1398,15 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
                iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
        } while (0)
 
+static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
+                                         u32 dst_addr, u64 src_addr,
+                                         u32 byte_cnt)
+{
+       if (trans->ops->imr_dma_data)
+               return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
+       return 0;
+}
+
 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
 {
        u32 value;
index 2f7f0f9..b4f4523 100644 (file)
@@ -312,7 +312,7 @@ static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
                memcpy(q_head + wr, hdr, tx_sz);
        } else {
                memcpy(q_head + wr, hdr, q_sz - wr);
-               memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
+               memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
        }
 
        WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
@@ -432,7 +432,7 @@ void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
        u32 q_sz;
        u32 rd;
        u32 wr;
-       void *q_head;
+       u8 *q_head;
 
        if (!iwl_mei_global_cldev)
                return;
@@ -2003,7 +2003,11 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
 }
 
 static const struct mei_cl_device_id iwl_mei_tbl[] = {
-       { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY},
+       {
+               .name = KBUILD_MODNAME,
+               .uuid = MEI_WLAN_UUID,
+               .version = MEI_CL_VERSION_ANY,
+       },
 
        /* required last entry */
        { }
index 468102a..3472167 100644 (file)
@@ -102,8 +102,8 @@ static bool iwl_mei_rx_filter_arp(struct sk_buff *skb,
         * src IP address    - 4 bytes
         * target MAC addess - 6 bytes
         */
-       target_ip = (void *)((u8 *)(arp + 1) +
-                            ETH_ALEN + sizeof(__be32) + ETH_ALEN);
+       target_ip = (const void *)((const u8 *)(arp + 1) +
+                                  ETH_ALEN + sizeof(__be32) + ETH_ALEN);
 
        /*
         * ARP request is forwarded to ME only if IP address match in the
index b400867..a995bba 100644 (file)
@@ -31,7 +31,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
        memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len);
        mvmvif->rekey_data.akm = data->akm & 0xFF;
        mvmvif->rekey_data.replay_ctr =
-               cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
+               cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr));
        mvmvif->rekey_data.valid = true;
 
        mutex_unlock(&mvm->mutex);
@@ -453,8 +453,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                       WOWLAN_TSC_RSC_PARAM,
+       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
                                        IWL_FW_CMD_VER_UNKNOWN);
        int ret;
 
@@ -672,8 +671,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
                .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
        };
        int i, err;
-       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                       WOWLAN_PATTERNS,
+       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
                                        IWL_FW_CMD_VER_UNKNOWN);
 
        if (!wowlan->n_patterns)
@@ -921,8 +919,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
        wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
                ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
 
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                 WOWLAN_CONFIGURATION, 0) < 6) {
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
                /* Query the last used seqno and set it */
                int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
 
@@ -1017,8 +1014,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
 
        if (!fw_has_api(&mvm->fw->ucode_capa,
                        IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
-               int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                               WOWLAN_TKIP_PARAM,
+               int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TKIP_PARAM,
                                                IWL_FW_CMD_VER_UNKNOWN);
                struct wowlan_key_tkip_data tkip_data = {};
                int size;
@@ -1058,7 +1054,6 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                };
 
                cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
-                                               IWL_ALWAYS_LONG_GROUP,
                                                WOWLAN_KEK_KCK_MATERIAL,
                                                IWL_FW_CMD_VER_UNKNOWN);
                if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
@@ -1089,7 +1084,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                                        sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
                        /* skip the sta_id at the beginning */
                        _kek_kck_cmd = (void *)
-                               ((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
+                               ((u8 *)_kek_kck_cmd + sizeof(kek_kck_cmd.sta_id));
                }
 
                IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
@@ -1489,7 +1484,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
                int pktsize = status->wake_packet_bufsize;
                int pktlen = status->wake_packet_length;
                const u8 *pktdata = status->wake_packet;
-               struct ieee80211_hdr *hdr = (void *)pktdata;
+               const struct ieee80211_hdr *hdr = (const void *)pktdata;
                int truncated = pktlen - pktsize;
 
                /* this would be a firmware bug */
@@ -2074,8 +2069,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
        };
        int ret, len;
        u8 notif_ver;
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                          WOWLAN_GET_STATUSES,
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
                                           IWL_FW_CMD_VER_UNKNOWN);
 
        if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
@@ -2182,8 +2176,7 @@ out_free_resp:
 static struct iwl_wowlan_status_data *
 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
 {
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                          OFFLOADS_QUERY_CMD,
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, OFFLOADS_QUERY_CMD,
                                           IWL_FW_CMD_VER_UNKNOWN);
        __le32 station_id = cpu_to_le32(sta_id);
        u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
@@ -2704,7 +2697,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
 
        /* start pseudo D3 */
        rtnl_lock();
+       wiphy_lock(mvm->hw->wiphy);
        err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
+       wiphy_unlock(mvm->hw->wiphy);
        rtnl_unlock();
        if (err > 0)
                err = -EINVAL;
@@ -2760,7 +2755,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
        iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
 
        rtnl_lock();
+       wiphy_lock(mvm->hw->wiphy);
        __iwl_mvm_resume(mvm, true);
+       wiphy_unlock(mvm->hw->wiphy);
        rtnl_unlock();
 
        iwl_mvm_resume_tcm(mvm);
index 445c94a..49898fd 100644 (file)
@@ -426,8 +426,7 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
                return -EINVAL;
 
        /* only change from debug set <-> debug unset */
-       if ((amsdu_len && mvmsta->orig_amsdu_len) ||
-           (!!amsdu_len && mvmsta->orig_amsdu_len))
+       if (amsdu_len && mvmsta->orig_amsdu_len)
                return -EBUSY;
 
        if (amsdu_len) {
@@ -1479,7 +1478,7 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
                .mvm = mvm,
        };
        u16 wait_cmds[] = {
-               iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0),
+               WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
        };
        u32 aid;
        int ret;
@@ -1514,8 +1513,9 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
                                   wait_cmds, ARRAY_SIZE(wait_cmds),
                                   iwl_mvm_sniffer_apply, &apply);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
-                                                  DATA_PATH_GROUP, 0), 0,
+       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                  WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
+                                  0,
                                   sizeof(he_mon_cmd), &he_mon_cmd);
 
        /* no need to really wait, we already did anyway */
@@ -1727,8 +1727,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
        if (!iwl_mvm_firmware_running(mvm))
                return -EIO;
 
-       hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
-                            DEBUG_GROUP, 0);
+       hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
        cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
 
        /* Take care of alignment of both the position and the length */
@@ -1758,7 +1757,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
                goto out;
        }
 
-       ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
+       ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len);
        *ppos += ret;
 
 out:
@@ -1782,8 +1781,7 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
        if (!iwl_mvm_firmware_running(mvm))
                return -EIO;
 
-       hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
-                            DEBUG_GROUP, 0);
+       hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
 
        if (*ppos & 0x3 || count < 4) {
                op = DEBUG_MEM_OP_WRITE_BYTES;
index 628aee6..430044b 100644 (file)
@@ -346,8 +346,8 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
                *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
                break;
        case NL80211_CHAN_WIDTH_160:
-               cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
-                                               TOF_RANGE_REQ_CMD,
+               cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+                                               WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
                                                IWL_FW_CMD_VER_UNKNOWN);
 
                if (cmd_ver >= 13) {
@@ -548,7 +548,7 @@ static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 {
        struct iwl_tof_range_req_cmd_v5 cmd_v5;
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+               .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
                .dataflags[0] = IWL_HCMD_DFL_DUP,
                .data[0] = &cmd_v5,
                .len[0] = sizeof(cmd_v5),
@@ -574,7 +574,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 {
        struct iwl_tof_range_req_cmd_v7 cmd_v7;
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+               .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
                .dataflags[0] = IWL_HCMD_DFL_DUP,
                .data[0] = &cmd_v7,
                .len[0] = sizeof(cmd_v7),
@@ -604,7 +604,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 {
        struct iwl_tof_range_req_cmd_v8 cmd;
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+               .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
                .dataflags[0] = IWL_HCMD_DFL_DUP,
                .data[0] = &cmd,
                .len[0] = sizeof(cmd),
@@ -630,7 +630,7 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 {
        struct iwl_tof_range_req_cmd_v9 cmd;
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+               .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
                .dataflags[0] = IWL_HCMD_DFL_DUP,
                .data[0] = &cmd,
                .len[0] = sizeof(cmd),
@@ -728,7 +728,7 @@ static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
 {
        struct iwl_tof_range_req_cmd_v11 cmd;
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+               .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
                .dataflags[0] = IWL_HCMD_DFL_DUP,
                .data[0] = &cmd,
                .len[0] = sizeof(cmd),
@@ -799,7 +799,7 @@ static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
 {
        struct iwl_tof_range_req_cmd_v12 cmd;
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+               .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
                .dataflags[0] = IWL_HCMD_DFL_DUP,
                .data[0] = &cmd,
                .len[0] = sizeof(cmd),
@@ -827,7 +827,7 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
 {
        struct iwl_tof_range_req_cmd_v13 cmd;
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+               .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
                .dataflags[0] = IWL_HCMD_DFL_DUP,
                .data[0] = &cmd,
                .len[0] = sizeof(cmd),
@@ -877,8 +877,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return -EBUSY;
 
        if (new_api) {
-               u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
-                                                  TOF_RANGE_REQ_CMD,
+               u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+                                                  WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
                                                   IWL_FW_CMD_VER_UNKNOWN);
 
                switch (cmd_ver) {
@@ -927,8 +927,7 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
 
        iwl_mvm_ftm_reset(mvm);
 
-       if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
-                                                LOCATION_GROUP, 0),
+       if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
                                 0, sizeof(cmd), &cmd))
                IWL_ERR(mvm, "failed to abort FTM process\n");
 }
index bda6da7..9729680 100644 (file)
@@ -106,6 +106,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
                          struct ieee80211_vif *vif,
                          struct cfg80211_chan_def *chandef)
 {
+       u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        /*
         * The command structure is the same for versions 6, 7 and 8 (only the
@@ -120,8 +121,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
                                    IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
                .sta_id = mvmvif->bcast_sta.sta_id,
        };
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
-                                          TOF_RESPONDER_CONFIG_CMD, 6);
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6);
        int err;
        int cmd_size;
 
@@ -161,9 +161,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
 
        memcpy(cmd.bssid, vif->addr, ETH_ALEN);
 
-       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD,
-                                                   LOCATION_GROUP, 0),
-                                   0, cmd_size, &cmd);
+       return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
 }
 
 static int
@@ -177,8 +175,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm,
        };
        u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0};
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
-                                LOCATION_GROUP, 0),
+               .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
                .data[0] = &cmd,
                .len[0] = sizeof(cmd),
                .data[1] = &data,
@@ -220,8 +217,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm,
 {
        struct iwl_tof_responder_dyn_config_cmd cmd;
        struct iwl_host_cmd hcmd = {
-               .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
-                                LOCATION_GROUP, 0),
+               .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
                .data[0] = &cmd,
                .len[0] = sizeof(cmd),
                /* may not be able to DMA from stack */
@@ -278,8 +274,9 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
                                  struct ieee80211_ftm_responder_params *params)
 {
        int ret;
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
-                                          TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+                                          WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+                                          2);
 
        switch (cmd_ver) {
        case 2:
@@ -320,8 +317,9 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
                .addr = addr,
                .hltk = hltk,
        };
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
-                                          TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+                                          WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+                                          2);
 
        lockdep_assert_held(&mvm->mutex);
 
index ae589b3..4632d3a 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
 #define MVM_UCODE_ALIVE_TIMEOUT        (HZ)
 #define MVM_UCODE_CALIB_TIMEOUT        (2 * HZ)
 
-#define UCODE_VALID_OK cpu_to_le32(0x1)
-
-#define IWL_PPAG_MASK 3
-#define IWL_PPAG_ETSI_MASK BIT(0)
-
 #define IWL_TAS_US_MCC 0x5553
 #define IWL_TAS_CANADA_MCC 0x4341
 
@@ -79,7 +74,7 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
        struct iwl_dqa_enable_cmd dqa_cmd = {
                .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
        };
-       u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
+       u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
        int ret;
 
        ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
@@ -126,13 +121,54 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
        u32 lmac_error_event_table, umac_error_table;
        u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
                                              UCODE_ALIVE_NTFY, 0);
+       u32 i;
 
-       /*
-        * For v5 and above, we can check the version, for older
-        * versions we need to check the size.
-        */
-       if (version == 5 || version == 6) {
-               /* v5 and v6 are compatible (only IMR addition) */
+       if (version == 6) {
+               struct iwl_alive_ntf_v6 *palive;
+
+               if (pkt_len < sizeof(*palive))
+                       return false;
+
+               palive = (void *)pkt->data;
+               mvm->trans->dbg.imr_data.imr_enable =
+                       le32_to_cpu(palive->imr.enabled);
+               mvm->trans->dbg.imr_data.imr_size =
+                       le32_to_cpu(palive->imr.size);
+               mvm->trans->dbg.imr_data.imr2sram_remainbyte =
+                       mvm->trans->dbg.imr_data.imr_size;
+               mvm->trans->dbg.imr_data.imr_base_addr =
+                       palive->imr.base_addr;
+               mvm->trans->dbg.imr_data.imr_curr_addr =
+                       le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
+               IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x  size 0x0%x Address 0x%016llx\n",
+                            mvm->trans->dbg.imr_data.imr_enable,
+                            mvm->trans->dbg.imr_data.imr_size,
+                            le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
+
+               if (!mvm->trans->dbg.imr_data.imr_enable) {
+                       for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) {
+                               struct iwl_ucode_tlv *reg_tlv;
+                               struct iwl_fw_ini_region_tlv *reg;
+
+                               reg_tlv = mvm->trans->dbg.active_regions[i];
+                               if (!reg_tlv)
+                                       continue;
+
+                               reg = (void *)reg_tlv->data;
+                               /*
+                                * We have only one DRAM IMR region, so we
+                                * can break as soon as we find the first
+                                * one.
+                                */
+                               if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
+                                       mvm->trans->dbg.unsupported_region_msk |= BIT(i);
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       if (version >= 5) {
                struct iwl_alive_ntf_v5 *palive;
 
                if (pkt_len < sizeof(*palive))
@@ -249,6 +285,26 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
        return false;
 }
 
+static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
+{
+       struct iwl_trans *trans = mvm->trans;
+       enum iwl_device_family device_family = trans->trans_cfg->device_family;
+
+       if (device_family < IWL_DEVICE_FAMILY_8000)
+               return;
+
+       if (device_family <= IWL_DEVICE_FAMILY_9000)
+               IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n",
+                       iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION));
+       else
+               IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n",
+                       iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION));
+
+       IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n",
+               iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE));
+
+}
+
 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
                                         enum iwl_ucode_type ucode_type)
 {
@@ -314,6 +370,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
                                iwl_read_prph(trans, SB_CPU_2_STATUS));
                }
 
+               iwl_mvm_print_pd_notification(mvm);
+
                /* LMAC/UMAC PC info */
                if (trans->trans_cfg->device_family >=
                                        IWL_DEVICE_FAMILY_9000) {
@@ -546,8 +604,7 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
                return 0;
        }
 
-       cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
-                                       SAR_OFFSET_MAPPING_TABLE_CMD,
+       cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
                                        IWL_FW_CMD_VER_UNKNOWN);
 
        if (cmd_ver != 2) {
@@ -572,6 +629,7 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
 
 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
 {
+       u32 cmd_id = PHY_CONFIGURATION_CMD;
        struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
        enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
        struct iwl_phy_specific_cfg phy_filters = {};
@@ -603,8 +661,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
        phy_cfg_cmd.calib_control.flow_trigger =
                mvm->fw->default_calib[ucode_type].flow_trigger;
 
-       cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
-                                       PHY_CONFIGURATION_CMD,
+       cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
                                        IWL_FW_CMD_VER_UNKNOWN);
        if (cmd_ver == 3) {
                iwl_mvm_phy_filter_init(mvm, &phy_filters);
@@ -616,8 +673,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
                       phy_cfg_cmd.phy_cfg);
        cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
                                    sizeof(struct iwl_phy_cfg_cmd_v1);
-       return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
-                                   cmd_size, &phy_cfg_cmd);
+       return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
 }
 
 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
@@ -737,7 +793,7 @@ out:
                mvm->nvm_data->bands[0].n_channels = 1;
                mvm->nvm_data->bands[0].n_bitrates = 1;
                mvm->nvm_data->bands[0].bitrates =
-                       (void *)mvm->nvm_data->channels + 1;
+                       (void *)((u8 *)mvm->nvm_data->channels + 1);
                mvm->nvm_data->bands[0].bitrates->hw_value = 10;
        }
 
@@ -760,6 +816,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
 #ifdef CONFIG_ACPI
 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
 {
+       u32 cmd_id = REDUCE_TX_POWER_CMD;
        struct iwl_dev_tx_power_cmd cmd = {
                .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
        };
@@ -767,11 +824,14 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
        int ret;
        u16 len = 0;
        u32 n_subbands;
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                          REDUCE_TX_POWER_CMD,
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
                                           IWL_FW_CMD_VER_UNKNOWN);
-
-       if (cmd_ver == 6) {
+       if (cmd_ver == 7) {
+               len = sizeof(cmd.v7);
+               n_subbands = IWL_NUM_SUB_BANDS_V2;
+               per_chain = cmd.v7.per_chain[0][0];
+               cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
+       } else if (cmd_ver == 6) {
                len = sizeof(cmd.v6);
                n_subbands = IWL_NUM_SUB_BANDS_V2;
                per_chain = cmd.v6.per_chain[0][0];
@@ -805,7 +865,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
        iwl_mei_set_power_limit(per_chain);
 
        IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
-       return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+       return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
 }
 
 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@@ -814,9 +874,12 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
        struct iwl_geo_tx_power_profiles_resp *resp;
        u16 len;
        int ret;
-       struct iwl_host_cmd cmd;
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
-                                          PER_CHAIN_LIMIT_OFFSET_CMD,
+       struct iwl_host_cmd cmd = {
+               .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
+               .flags = CMD_WANT_SKB,
+               .data = { &geo_tx_cmd },
+       };
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
                                           IWL_FW_CMD_VER_UNKNOWN);
 
        /* the ops field is at the same spot for all versions, so set in v1 */
@@ -838,12 +901,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
        if (!iwl_sar_geo_support(&mvm->fwrt))
                return -EOPNOTSUPP;
 
-       cmd = (struct iwl_host_cmd){
-               .id =  WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
-               .len = { len, },
-               .flags = CMD_WANT_SKB,
-               .data = { &geo_tx_cmd },
-       };
+       cmd.len[0] = len;
 
        ret = iwl_mvm_send_cmd(mvm, &cmd);
        if (ret) {
@@ -863,14 +921,14 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
 
 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
 {
+       u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
        union iwl_geo_tx_power_profiles_cmd cmd;
        u16 len;
        u32 n_bands;
        u32 n_profiles;
        u32 sk = 0;
        int ret;
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
-                                          PER_CHAIN_LIMIT_OFFSET_CMD,
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
                                           IWL_FW_CMD_VER_UNKNOWN);
 
        BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
@@ -948,167 +1006,18 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
                            IWL_UCODE_TLV_API_SAR_TABLE_VER))
                cmd.v2.table_revision = cpu_to_le32(sk);
 
-       return iwl_mvm_send_cmd_pdu(mvm,
-                                   WIDE_ID(PHY_OPS_GROUP,
-                                           PER_CHAIN_LIMIT_OFFSET_CMD),
-                                   0, len, &cmd);
-}
-
-static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
-{
-       union acpi_object *wifi_pkg, *data, *flags;
-       int i, j, ret, tbl_rev, num_sub_bands;
-       int idx = 2;
-
-       mvm->fwrt.ppag_flags = 0;
-
-       data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
-       if (IS_ERR(data))
-               return PTR_ERR(data);
-
-       /* try to read ppag table rev 2 or 1 (both have the same data size) */
-       wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
-                                        ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
-       if (!IS_ERR(wifi_pkg)) {
-               if (tbl_rev == 1 || tbl_rev == 2) {
-                       num_sub_bands = IWL_NUM_SUB_BANDS_V2;
-                       IWL_DEBUG_RADIO(mvm,
-                                       "Reading PPAG table v2 (tbl_rev=%d)\n",
-                                       tbl_rev);
-                       goto read_table;
-               } else {
-                       ret = -EINVAL;
-                       goto out_free;
-               }
-       }
-
-       /* try to read ppag table revision 0 */
-       wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
-                                        ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
-       if (!IS_ERR(wifi_pkg)) {
-               if (tbl_rev != 0) {
-                       ret = -EINVAL;
-                       goto out_free;
-               }
-               num_sub_bands = IWL_NUM_SUB_BANDS_V1;
-               IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n");
-               goto read_table;
-       }
-       ret = PTR_ERR(wifi_pkg);
-       goto out_free;
-
-read_table:
-       mvm->fwrt.ppag_ver = tbl_rev;
-       flags = &wifi_pkg->package.elements[1];
-
-       if (flags->type != ACPI_TYPE_INTEGER) {
-               ret = -EINVAL;
-               goto out_free;
-       }
-
-       mvm->fwrt.ppag_flags = flags->integer.value & IWL_PPAG_MASK;
-
-       if (!mvm->fwrt.ppag_flags) {
-               ret = 0;
-               goto out_free;
-       }
-
-       /*
-        * read, verify gain values and save them into the PPAG table.
-        * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
-        * following sub-bands to High-Band (5GHz).
-        */
-       for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
-               for (j = 0; j < num_sub_bands; j++) {
-                       union acpi_object *ent;
-
-                       ent = &wifi_pkg->package.elements[idx++];
-                       if (ent->type != ACPI_TYPE_INTEGER) {
-                               ret = -EINVAL;
-                               goto out_free;
-                       }
-
-                       mvm->fwrt.ppag_chains[i].subbands[j] = ent->integer.value;
-
-                       if ((j == 0 &&
-                            (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
-                             mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
-                           (j != 0 &&
-                            (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
-                             mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
-                               mvm->fwrt.ppag_flags = 0;
-                               ret = -EINVAL;
-                               goto out_free;
-                       }
-               }
-       }
-
-       ret = 0;
-out_free:
-       kfree(data);
-       return ret;
+       return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
 }
 
 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
 {
        union iwl_ppag_table_cmd cmd;
-       u8 cmd_ver;
-       int i, j, ret, num_sub_bands, cmd_size;
-       s8 *gain;
-
-       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
-               IWL_DEBUG_RADIO(mvm,
-                               "PPAG capability not supported by FW, command not sent.\n");
-               return 0;
-       }
-       if (!mvm->fwrt.ppag_flags) {
-               IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n");
-               return 0;
-       }
+       int ret, cmd_size;
 
-       /* The 'flags' field is the same in v1 and in v2 so we can just
-        * use v1 to access it.
-        */
-       cmd.v1.flags = cpu_to_le32(mvm->fwrt.ppag_flags);
-       cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
-                                       PER_PLATFORM_ANT_GAIN_CMD,
-                                       IWL_FW_CMD_VER_UNKNOWN);
-       if (cmd_ver == 1) {
-               num_sub_bands = IWL_NUM_SUB_BANDS_V1;
-               gain = cmd.v1.gain[0];
-               cmd_size = sizeof(cmd.v1);
-               if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) {
-                       IWL_DEBUG_RADIO(mvm,
-                                       "PPAG table rev is %d but FW supports v1, sending truncated table\n",
-                                       mvm->fwrt.ppag_ver);
-                       cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
-               }
-       } else if (cmd_ver == 2 || cmd_ver == 3) {
-               num_sub_bands = IWL_NUM_SUB_BANDS_V2;
-               gain = cmd.v2.gain[0];
-               cmd_size = sizeof(cmd.v2);
-               if (mvm->fwrt.ppag_ver == 0) {
-                       IWL_DEBUG_RADIO(mvm,
-                                       "PPAG table is v1 but FW supports v2, sending padded table\n");
-               } else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) {
-                       IWL_DEBUG_RADIO(mvm,
-                                       "PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
-                       cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
-               }
-       } else {
-               IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n");
-               return 0;
-       }
+       ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
+       if(ret < 0)
+               return ret;
 
-       for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
-               for (j = 0; j < num_sub_bands; j++) {
-                       gain[i * num_sub_bands + j] =
-                               mvm->fwrt.ppag_chains[i].subbands[j];
-                       IWL_DEBUG_RADIO(mvm,
-                                       "PPAG table: chain[%d] band[%d]: gain = %d\n",
-                                       i, j, gain[i * num_sub_bands + j]);
-               }
-       }
        IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
        ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
                                                PER_PLATFORM_ANT_GAIN_CMD),
@@ -1120,40 +1029,11 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
        return ret;
 }
 
-static const struct dmi_system_id dmi_ppag_approved_list[] = {
-       { .ident = "HP",
-         .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
-               },
-       },
-       { .ident = "SAMSUNG",
-         .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
-               },
-       },
-       { .ident = "MSFT",
-         .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
-               },
-       },
-       { .ident = "ASUS",
-         .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
-               },
-       },
-       {}
-};
-
 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
 {
        /* no need to read the table, done in INIT stage */
-       if (!dmi_check_system(dmi_ppag_approved_list)) {
-               IWL_DEBUG_RADIO(mvm,
-                               "System vendor '%s' is not in the approved list, disabling PPAG.\n",
-                               dmi_get_system_info(DMI_SYS_VENDOR));
-               mvm->fwrt.ppag_flags = 0;
+       if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
                return 0;
-       }
 
        return iwl_mvm_ppag_send_cmd(mvm);
 }
@@ -1205,11 +1085,12 @@ static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigne
 
 static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
 {
+       u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
        int ret;
-       struct iwl_tas_config_cmd_v3 cmd = {};
-       int cmd_size;
+       union iwl_tas_config_cmd cmd = {};
+       int cmd_size, fw_ver;
 
-       BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) <
+       BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
                     APCI_WTAS_BLACK_LIST_MAX);
 
        if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
@@ -1217,7 +1098,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
                return;
        }
 
-       ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd);
+       fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+                                      IWL_FW_CMD_VER_UNKNOWN);
+
+       ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
        if (ret < 0) {
                IWL_DEBUG_RADIO(mvm,
                                "TAS table invalid or unavailable. (%d)\n",
@@ -1232,25 +1116,24 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
                IWL_DEBUG_RADIO(mvm,
                                "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
                                dmi_get_system_info(DMI_SYS_VENDOR));
-               if ((!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
-                                                   &cmd.block_list_size, IWL_TAS_US_MCC)) ||
-                   (!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
-                                                   &cmd.block_list_size, IWL_TAS_CANADA_MCC))) {
+               if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
+                                                   &cmd.v4.block_list_size,
+                                                       IWL_TAS_US_MCC)) ||
+                   (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
+                                                   &cmd.v4.block_list_size,
+                                                       IWL_TAS_CANADA_MCC))) {
                        IWL_DEBUG_RADIO(mvm,
                                        "Unable to add US/Canada to TAS block list, disabling TAS\n");
                        return;
                }
        }
 
-       cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
-                                        TAS_CONFIG,
-                                        IWL_FW_CMD_VER_UNKNOWN) < 3 ?
+       /* v4 is the same size as v3, so no need to differentiate here */
+       cmd_size = fw_ver < 3 ?
                sizeof(struct iwl_tas_config_cmd_v2) :
                sizeof(struct iwl_tas_config_cmd_v3);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
-                                               TAS_CONFIG),
-                                  0, cmd_size, &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
        if (ret < 0)
                IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
 }
@@ -1283,7 +1166,7 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
 {
        int ret;
        u32 value;
-       struct iwl_lari_config_change_cmd_v5 cmd = {};
+       struct iwl_lari_config_change_cmd_v6 cmd = {};
 
        cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
 
@@ -1310,25 +1193,43 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
        if (!ret)
                cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
 
+       ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
+                                  DSM_FUNC_FORCE_DISABLE_CHANNELS,
+                                  &iwl_guid, &value);
+       if (!ret)
+               cmd.force_disable_channels_bitmap = cpu_to_le32(value);
+
        if (cmd.config_bitmap ||
            cmd.oem_uhb_allow_bitmap ||
            cmd.oem_11ax_allow_bitmap ||
            cmd.oem_unii4_allow_bitmap ||
-           cmd.chan_state_active_bitmap) {
+           cmd.chan_state_active_bitmap ||
+           cmd.force_disable_channels_bitmap) {
                size_t cmd_size;
                u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
-                                                  REGULATORY_AND_NVM_GROUP,
-                                                  LARI_CONFIG_CHANGE, 1);
-               if (cmd_ver == 5)
+                                                  WIDE_ID(REGULATORY_AND_NVM_GROUP,
+                                                          LARI_CONFIG_CHANGE),
+                                                  1);
+               switch (cmd_ver) {
+               case 6:
+                       cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
+                       break;
+               case 5:
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
-               else if (cmd_ver == 4)
+                       break;
+               case 4:
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
-               else if (cmd_ver == 3)
+                       break;
+               case 3:
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
-               else if (cmd_ver == 2)
+                       break;
+               case 2:
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
-               else
+                       break;
+               default:
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
+                       break;
+               }
 
                IWL_DEBUG_RADIO(mvm,
                                "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
@@ -1340,8 +1241,9 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
                                le32_to_cpu(cmd.chan_state_active_bitmap),
                                cmd_ver);
                IWL_DEBUG_RADIO(mvm,
-                               "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x\n",
-                               le32_to_cpu(cmd.oem_uhb_allow_bitmap));
+                               "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+                               le32_to_cpu(cmd.oem_uhb_allow_bitmap),
+                               le32_to_cpu(cmd.force_disable_channels_bitmap));
                ret = iwl_mvm_send_cmd_pdu(mvm,
                                           WIDE_ID(REGULATORY_AND_NVM_GROUP,
                                                   LARI_CONFIG_CHANGE),
@@ -1358,7 +1260,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
        int ret;
 
        /* read PPAG table */
-       ret = iwl_mvm_get_ppag_table(mvm);
+       ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
        if (ret < 0) {
                IWL_DEBUG_RADIO(mvm,
                                "PPAG BIOS table invalid or unavailable. (%d)\n",
@@ -1641,9 +1543,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
         * internal aux station for all aux activities that don't
         * requires a dedicated data queue.
         */
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                 ADD_STA,
-                                 0) < 12) {
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
                 /*
                  * In old version the aux station uses mac id like other
                  * station and not lmac id
@@ -1658,8 +1558,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        while (!sband && i < NUM_NL80211_BANDS)
                sband = mvm->hw->wiphy->bands[i++];
 
-       if (WARN_ON_ONCE(!sband))
+       if (WARN_ON_ONCE(!sband)) {
+               ret = -ENODEV;
                goto error;
+       }
 
        chan = &sband->channels[0];
 
@@ -1800,9 +1702,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
        for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
                RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
 
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                 ADD_STA,
-                                 0) < 12) {
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
                /*
                 * Add auxiliary station for scanning.
                 * Newer versions of this command implies that the fw uses
index fd7d4ab..5aa4520 100644 (file)
@@ -821,10 +821,7 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
 u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
 {
        u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
-       bool is_new_rate = iwl_fw_lookup_cmd_ver(fw,
-                                                LONG_GROUP,
-                                                BEACON_TEMPLATE_CMD,
-                                                0) > 10;
+       bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
 
        if (rate_idx <= IWL_FIRST_CCK_RATE)
                flags |= is_new_rate ? IWL_MAC_BEACON_CCK
@@ -960,8 +957,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
        WARN_ON(channel == 0);
        if (cfg80211_channel_is_psc(ctx->def.chan) &&
            !IWL_MVM_DISABLE_AP_FILS) {
-               flags |= iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                              BEACON_TEMPLATE_CMD,
+               flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD,
                                               0) > 10 ?
                        IWL_MAC_BEACON_FILS :
                        IWL_MAC_BEACON_FILS_V1;
@@ -1458,8 +1454,9 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
        struct sk_buff *skb;
        u8 *data;
        u32 size = le32_to_cpu(sb->byte_count);
-       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PROT_OFFLOAD_GROUP,
-                                       STORED_BEACON_NTF, 0);
+       int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+                                       WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF),
+                                       0);
 
        if (size == 0)
                return;
@@ -1602,6 +1599,18 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
                RCU_INIT_POINTER(mvm->csa_vif, NULL);
                return;
        case NL80211_IFTYPE_STATION:
+               /*
+                * if we don't know about an ongoing channel switch,
+                * make sure FW cancels it
+                */
+               if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+                                           CHANNEL_SWITCH_ERROR_NOTIF,
+                                           0) && !vif->csa_active) {
+                       IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n");
+                       iwl_mvm_cancel_channel_switch(mvm, vif, mac_id);
+                       break;
+               }
+
                iwl_mvm_csa_client_absent(mvm, vif);
                cancel_delayed_work(&mvmvif->csa_work);
                ieee80211_chswitch_done(vif, true);
@@ -1615,6 +1624,31 @@ out_unlock:
        rcu_read_unlock();
 }
 
+void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+                                       struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
+       struct ieee80211_vif *vif;
+       u32 id = le32_to_cpu(notif->mac_id);
+       u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
+
+       rcu_read_lock();
+       vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+       if (!vif) {
+               rcu_read_unlock();
+               return;
+       }
+
+       IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n",
+                      id, csa_err_mask);
+       if (csa_err_mask & (CS_ERR_COUNT_ERROR |
+                           CS_ERR_LONG_DELAY_AFTER_CS |
+                           CS_ERR_TX_BLOCK_TIMER_EXPIRED))
+               ieee80211_channel_switch_disconnect(vif, true);
+       rcu_read_unlock();
+}
+
 void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
                                 struct iwl_rx_cmd_buffer *rxb)
 {
index 709a3df..784d912 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -374,28 +374,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->wiphy->n_cipher_suites++;
        }
 
-       /* currently FW API supports only one optional cipher scheme */
-       if (mvm->fw->cs[0].cipher) {
-               const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
-               struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
-
-               mvm->hw->n_cipher_schemes = 1;
-
-               cs->cipher = le32_to_cpu(fwcs->cipher);
-               cs->iftype = BIT(NL80211_IFTYPE_STATION);
-               cs->hdr_len = fwcs->hdr_len;
-               cs->pn_len = fwcs->pn_len;
-               cs->pn_off = fwcs->pn_off;
-               cs->key_idx_off = fwcs->key_idx_off;
-               cs->key_idx_mask = fwcs->key_idx_mask;
-               cs->key_idx_shift = fwcs->key_idx_shift;
-               cs->mic_len = fwcs->mic_len;
-
-               mvm->hw->cipher_schemes = mvm->cs;
-               mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
-               hw->wiphy->n_cipher_suites++;
-       }
-
        if (fw_has_capa(&mvm->fw->ucode_capa,
                        IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) {
                wiphy_ext_feature_set(hw->wiphy,
@@ -553,8 +531,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                        IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
                hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
 
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
-                                 WOWLAN_KEK_KCK_MATERIAL,
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
                                  IWL_FW_CMD_VER_UNKNOWN) == 3)
                hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
 
@@ -567,9 +544,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        }
 
        if (iwl_mvm_is_oce_supported(mvm)) {
-               u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
-                                                   IWL_ALWAYS_LONG_GROUP,
-                                                   SCAN_REQ_UMAC, 0);
+               u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0);
 
                wiphy_ext_feature_set(hw->wiphy,
                        NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
@@ -1154,7 +1129,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
 
        /* async_handlers_wk is now blocked */
 
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, 0) < 12)
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12)
                iwl_mvm_rm_aux_sta(mvm);
 
        iwl_mvm_stop_device(mvm);
@@ -1246,6 +1221,7 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                s16 tx_power)
 {
+       u32 cmd_id = REDUCE_TX_POWER_CMD;
        int len;
        struct iwl_dev_tx_power_cmd cmd = {
                .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
@@ -1253,14 +1229,15 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                        cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
                .common.pwr_restriction = cpu_to_le16(8 * tx_power),
        };
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                          REDUCE_TX_POWER_CMD,
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
                                           IWL_FW_CMD_VER_UNKNOWN);
 
        if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
                cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
 
-       if (cmd_ver == 6)
+       if (cmd_ver == 7)
+               len = sizeof(cmd.v7);
+       else if (cmd_ver == 6)
                len = sizeof(cmd.v6);
        else if (fw_has_api(&mvm->fw->ucode_capa,
                            IWL_UCODE_TLV_API_REDUCE_TX_POWER))
@@ -1274,7 +1251,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        /* all structs have the same common part, add it */
        len += sizeof(cmd.common);
 
-       return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+       return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
 }
 
 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
@@ -1335,6 +1312,15 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
                .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
        };
 
+       /*
+        * In the new flow since FW is in charge of the timing,
+        * if driver has canceled the channel switch he will receive the
+        * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it
+        */
+       if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+                                   CHANNEL_SWITCH_ERROR_NOTIF, 0))
+               return;
+
        IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
 
        mutex_lock(&mvm->mutex);
@@ -1845,11 +1831,108 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
        return res;
 }
 
+static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
+                             struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
+                             u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit)
+{
+       int i;
+
+       /*
+       * FW currently supports only nss == MAX_HE_SUPP_NSS
+       *
+       * If nss > MAX: we can ignore values we don't support
+       * If nss < MAX: we can set zeros in other streams
+       */
+       if (nss > MAX_HE_SUPP_NSS) {
+               IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+                        MAX_HE_SUPP_NSS);
+               nss = MAX_HE_SUPP_NSS;
+       }
+
+       for (i = 0; i < nss; i++) {
+               u8 ru_index_tmp = ru_index_bitmap << 1;
+               u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
+               u8 bw;
+
+               for (bw = 0;
+                    bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+                    bw++) {
+                       ru_index_tmp >>= 1;
+
+                       if (!(ru_index_tmp & 1))
+                               continue;
+
+                       high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
+                       ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+                       low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
+                       ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+
+                       pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+                       pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+               }
+       }
+}
+
+static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
+                                           struct ieee80211_sta *sta,
+                                           struct iwl_he_pkt_ext_v2 *pkt_ext)
+{
+       u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1;
+       u8 *ppe = &sta->he_cap.ppe_thres[0];
+       u8 ru_index_bitmap =
+               u8_get_bits(*ppe,
+                           IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+       /* Starting after PPE header */
+       u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
+
+       iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit);
+}
+
+static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
+                                                    u8 nominal_padding,
+                                                    u32 *flags)
+{
+       int low_th = -1;
+       int high_th = -1;
+       int i;
+
+       switch (nominal_padding) {
+       case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
+               low_th = IWL_HE_PKT_EXT_NONE;
+               high_th = IWL_HE_PKT_EXT_NONE;
+               break;
+       case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
+               low_th = IWL_HE_PKT_EXT_BPSK;
+               high_th = IWL_HE_PKT_EXT_NONE;
+               break;
+       case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
+               low_th = IWL_HE_PKT_EXT_NONE;
+               high_th = IWL_HE_PKT_EXT_BPSK;
+               break;
+       }
+
+       /* Set the PPE thresholds accordingly */
+       if (low_th >= 0 && high_th >= 0) {
+               for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
+                       u8 bw;
+
+                       for (bw = 0;
+                            bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+                            bw++) {
+                               pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+                               pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+                       }
+               }
+
+               *flags |= STA_CTXT_HE_PACKET_EXT;
+       }
+}
+
 static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
                               struct ieee80211_vif *vif, u8 sta_id)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+       struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = {
                .sta_id = sta_id,
                .tid_limit = IWL_MAX_TID_COUNT,
                .bss_color = vif->bss_conf.he_bss_color.color,
@@ -1857,16 +1940,39 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
                .frame_time_rts_th =
                        cpu_to_le16(vif->bss_conf.frame_time_rts_th),
        };
-       int size = fw_has_api(&mvm->fw->ucode_capa,
-                             IWL_UCODE_TLV_API_MBSSID_HE) ?
-                  sizeof(sta_ctxt_cmd) :
-                  sizeof(struct iwl_he_sta_context_cmd_v1);
+       struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {};
+       u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD);
+       u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2);
+       int size;
        struct ieee80211_sta *sta;
        u32 flags;
        int i;
        const struct ieee80211_sta_he_cap *own_he_cap = NULL;
        struct ieee80211_chanctx_conf *chanctx_conf;
        const struct ieee80211_supported_band *sband;
+       void *cmd;
+
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE))
+               ver = 1;
+
+       switch (ver) {
+       case 1:
+               /* same layout as v2 except some data at the end */
+               cmd = &sta_ctxt_cmd_v2;
+               size = sizeof(struct iwl_he_sta_context_cmd_v1);
+               break;
+       case 2:
+               cmd = &sta_ctxt_cmd_v2;
+               size = sizeof(struct iwl_he_sta_context_cmd_v2);
+               break;
+       case 3:
+               cmd = &sta_ctxt_cmd;
+               size = sizeof(struct iwl_he_sta_context_cmd_v3);
+               break;
+       default:
+               IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver);
+               return;
+       }
 
        rcu_read_lock();
 
@@ -1931,97 +2037,25 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
         * Initialize the PPE thresholds to "None" (7), as described in Table
         * 9-262ac of 80211.ax/D3.0.
         */
-       memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
+       memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE,
+              sizeof(sta_ctxt_cmd.pkt_ext));
 
        /* If PPE Thresholds exist, parse them into a FW-familiar format. */
        if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
-           IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
-               u8 nss = (sta->he_cap.ppe_thres[0] &
-                         IEEE80211_PPE_THRES_NSS_MASK) + 1;
-               u8 ru_index_bitmap =
-                       (sta->he_cap.ppe_thres[0] &
-                        IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
-                       IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
-               u8 *ppe = &sta->he_cap.ppe_thres[0];
-               u8 ppe_pos_bit = 7; /* Starting after PPE header */
-
-               /*
-                * FW currently supports only nss == MAX_HE_SUPP_NSS
-                *
-                * If nss > MAX: we can ignore values we don't support
-                * If nss < MAX: we can set zeros in other streams
-                */
-               if (nss > MAX_HE_SUPP_NSS) {
-                       IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
-                                MAX_HE_SUPP_NSS);
-                       nss = MAX_HE_SUPP_NSS;
-               }
-
-               for (i = 0; i < nss; i++) {
-                       u8 ru_index_tmp = ru_index_bitmap << 1;
-                       u8 bw;
-
-                       for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
-                               ru_index_tmp >>= 1;
-                               if (!(ru_index_tmp & 1))
-                                       continue;
-
-                               sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
-                                       iwl_mvm_he_get_ppe_val(ppe,
-                                                              ppe_pos_bit);
-                               ppe_pos_bit +=
-                                       IEEE80211_PPE_THRES_INFO_PPET_SIZE;
-                               sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
-                                       iwl_mvm_he_get_ppe_val(ppe,
-                                                              ppe_pos_bit);
-                               ppe_pos_bit +=
-                                       IEEE80211_PPE_THRES_INFO_PPET_SIZE;
-                       }
-               }
-
+               IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+               iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
+                                               &sta_ctxt_cmd.pkt_ext);
                flags |= STA_CTXT_HE_PACKET_EXT;
-       } else if (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
-                              IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)
-                  != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) {
-               int low_th = -1;
-               int high_th = -1;
-
-               /* Take the PPE thresholds from the nominal padding info */
-               switch (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
-                                   IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) {
-               case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
-                       low_th = IWL_HE_PKT_EXT_NONE;
-                       high_th = IWL_HE_PKT_EXT_NONE;
-                       break;
-               case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
-                       low_th = IWL_HE_PKT_EXT_BPSK;
-                       high_th = IWL_HE_PKT_EXT_NONE;
-                       break;
-               case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
-                       low_th = IWL_HE_PKT_EXT_NONE;
-                       high_th = IWL_HE_PKT_EXT_BPSK;
-                       break;
-               }
-
-               /* Set the PPE thresholds accordingly */
-               if (low_th >= 0 && high_th >= 0) {
-                       struct iwl_he_pkt_ext *pkt_ext =
-                               (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext;
-
-                       for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
-                               u8 bw;
-
-                               for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
-                                    bw++) {
-                                       pkt_ext->pkt_ext_qam_th[i][bw][0] =
-                                               low_th;
-                                       pkt_ext->pkt_ext_qam_th[i][bw][1] =
-                                               high_th;
-                               }
-                       }
-
-                       flags |= STA_CTXT_HE_PACKET_EXT;
-               }
+       /* PPE Thresholds doesn't exist - set the API PPE values
+       * according to Common Nominal Packet Padding fiels. */
+       } else {
+               u8 nominal_padding =
+                       u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
+                                   IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+               if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
+                       iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
+                                                                nominal_padding,
+                                                                &flags);
        }
 
        if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
@@ -2084,9 +2118,46 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
 
        sta_ctxt_cmd.flags = cpu_to_le32(flags);
 
-       if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
-                                                DATA_PATH_GROUP, 0),
-                                0, size, &sta_ctxt_cmd))
+       if (ver < 3) {
+               /* fields before pkt_ext */
+               BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) !=
+                            offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext));
+               memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd,
+                      offsetof(typeof(sta_ctxt_cmd), pkt_ext));
+
+               /* pkt_ext */
+               for (i = 0;
+                    i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th);
+                    i++) {
+                       u8 bw;
+
+                       for (bw = 0;
+                            bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]);
+                            bw++) {
+                               BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) !=
+                                            sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw]));
+
+                               memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw],
+                                      &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw],
+                                      sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]));
+                       }
+               }
+
+               /* fields after pkt_ext */
+               BUILD_BUG_ON(sizeof(sta_ctxt_cmd) -
+                            offsetofend(typeof(sta_ctxt_cmd), pkt_ext) !=
+                            sizeof(sta_ctxt_cmd_v2) -
+                            offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext));
+               memcpy((u8 *)&sta_ctxt_cmd_v2 +
+                               offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext),
+                      (u8 *)&sta_ctxt_cmd +
+                               offsetofend(typeof(sta_ctxt_cmd), pkt_ext),
+                      sizeof(sta_ctxt_cmd) -
+                               offsetofend(typeof(sta_ctxt_cmd), pkt_ext));
+               sta_ctxt_cmd_v2.reserved3 = 0;
+       }
+
+       if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd))
                IWL_ERR(mvm, "Failed to config FW to work HE!\n");
 }
 
@@ -2301,11 +2372,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                /*
                 * We received a beacon from the associated AP so
                 * remove the session protection.
-                * A firmware with the new API will remove it automatically.
                 */
-               if (!fw_has_capa(&mvm->fw->ucode_capa,
-                                IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
-                       iwl_mvm_stop_session_protection(mvm, vif);
+               iwl_mvm_stop_session_protection(mvm, vif);
 
                iwl_mvm_sf_update(mvm, vif, false);
                WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
@@ -2949,7 +3017,7 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
 
        if (he_cap) {
                /* we know that ours is writable */
-               struct ieee80211_sta_he_cap *he = (void *)he_cap;
+               struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
 
                he->he_cap_elem.phy_cap_info[0] |=
                        IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
@@ -3413,12 +3481,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                /* support HW crypto on TX */
                return 0;
        default:
-               /* currently FW supports only one optional cipher scheme */
-               if (hw->n_cipher_schemes &&
-                   hw->cipher_schemes->cipher == key->cipher)
-                       key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
-               else
-                       return -EOPNOTSUPP;
+               return -EOPNOTSUPP;
        }
 
        switch (cmd) {
@@ -3801,8 +3864,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
                if (fw_has_capa(&mvm->fw->ucode_capa,
                                IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
                        /* Use aux roc framework (HS20) */
-                       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                                 ADD_STA, 0) >= 12) {
+                       if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) {
                                u32 lmac_id;
 
                                lmac_id = iwl_mvm_get_lmac_id(mvm->fw,
@@ -4605,6 +4667,15 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
 
                break;
        case NL80211_IFTYPE_STATION:
+               /*
+                * In the new flow FW is in charge of timing the switch so there
+                * is no need for all of this
+                */
+               if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+                                           CHANNEL_SWITCH_ERROR_NOTIF,
+                                           0))
+                       break;
+
                /*
                 * We haven't configured the firmware to be associated yet since
                 * we don't know the dtim period. In this case, the firmware can't
@@ -4676,6 +4747,14 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
                .cs_mode = chsw->block_tx,
        };
 
+       /*
+        * In the new flow FW is in charge of timing the switch so there is no
+        * need for all of this
+        */
+       if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+                                   CHANNEL_SWITCH_ERROR_NOTIF, 0))
+               return;
+
        if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
                return;
 
index d78f407..c6bc85d 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -1064,7 +1064,6 @@ struct iwl_mvm {
 
 
        u32 ciphers[IWL_MVM_NUM_CIPHERS];
-       struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
 
        struct cfg80211_ftm_responder_stats ftm_resp_stats;
        struct {
@@ -1086,7 +1085,6 @@ struct iwl_mvm {
        } cmd_ver;
 
        struct ieee80211_vif *nan_vif;
-#define IWL_MAX_BAID   32
        struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
 
        /*
@@ -1106,6 +1104,8 @@ struct iwl_mvm {
 
        unsigned long last_6ghz_passive_scan_jiffies;
        unsigned long last_reset_or_resume_time_jiffies;
+
+       bool sta_remove_requires_queue_remove;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -1671,6 +1671,8 @@ void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
                                 struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
                                        struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+                                       struct iwl_rx_cmd_buffer *rxb);
 /* Bindings */
 int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1932,10 +1934,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
 
 void iwl_mvm_stop_device(struct iwl_mvm *mvm);
 
-/* Re-configure the SCD for a queue that has already been configured */
-int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
-                        int tid, int frame_limit, u16 ssn);
-
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
@@ -2085,6 +2083,8 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
 int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
                            struct iwl_rfi_lut_entry *rfi_table);
 struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
+void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
+                                     struct iwl_rx_cmd_buffer *rxb);
 
 static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
 {
@@ -2159,8 +2159,7 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
 
 static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
 {
-       u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
-                                      SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+       u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
                                       IWL_FW_CMD_VER_UNKNOWN);
        return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
                IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
index 4188051..c7dabc6 100644 (file)
@@ -47,8 +47,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
        struct iwl_proto_offload_cmd_common *common;
        u32 enabled = 0, size;
        u32 capa_flags = mvm->fw->ucode_capa.flags;
-       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                       PROT_OFFLOAD_CONFIG_CMD, 0);
+       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0);
 
 #if IS_ENABLED(CONFIG_IPV6)
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
index 1f8b979..b2f33eb 100644 (file)
@@ -24,6 +24,7 @@
 #include "iwl-prph.h"
 #include "rs.h"
 #include "fw/api/scan.h"
+#include "fw/api/rfi.h"
 #include "time-event.h"
 #include "fw-api.h"
 #include "fw/acpi.h"
@@ -32,6 +33,7 @@
 #define DRV_DESCRIPTION        "The new Intel(R) wireless AGN driver for Linux"
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IWLWIFI);
 
 static const struct iwl_op_mode_ops iwl_mvm_ops;
 static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
@@ -191,7 +193,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
 
        if (he_cap) {
                /* we know that ours is writable */
-               struct ieee80211_sta_he_cap *he = (void *)he_cap;
+               struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
 
                WARN_ON(!he->has_he);
                WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
@@ -235,7 +237,8 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
         */
        mvm->fw_static_smps_request =
                req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
-       ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+       ieee80211_iterate_interfaces(mvm->hw,
+                                    IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
                                     iwl_mvm_intf_dual_chain_req, NULL);
 }
 
@@ -382,6 +385,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
                       iwl_mvm_channel_switch_start_notif,
                       RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
+       RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
+                      iwl_mvm_channel_switch_error_notif,
+                      RX_HANDLER_ASYNC_UNLOCKED,
+                      struct iwl_channel_switch_error_notif),
        RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
                       iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
                       struct iwl_datapath_monitor_notif),
@@ -390,6 +397,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
                       iwl_mvm_rx_thermal_dual_chain_req,
                       RX_HANDLER_ASYNC_LOCKED,
                       struct iwl_thermal_dual_chain_request),
+
+       RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF,
+                      iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED,
+                      struct iwl_rfi_deactivate_notif),
 };
 #undef RX_HANDLER
 #undef RX_HANDLER_GRP
@@ -443,7 +454,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
        HCMD_NAME(POWER_TABLE_CMD),
        HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
        HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
-       HCMD_NAME(DC2DC_CONFIG_CMD),
        HCMD_NAME(NVM_ACCESS_CMD),
        HCMD_NAME(BEACON_NOTIFICATION),
        HCMD_NAME(BEACON_TEMPLATE_CMD),
@@ -499,6 +509,7 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
        HCMD_NAME(RFI_CONFIG_CMD),
        HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
        HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
+       HCMD_NAME(RFI_DEACTIVATE_NOTIF),
 };
 
 /* Please keep this array *SORTED* by hex value.
@@ -535,6 +546,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
        HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
        HCMD_NAME(TLC_MNG_CONFIG_CMD),
        HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
+       HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
        HCMD_NAME(MONITOR_NOTIF),
        HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
        HCMD_NAME(STA_PM_NOTIF),
@@ -633,13 +645,11 @@ unlock:
        mutex_unlock(&mvm->mutex);
 }
 
-static int iwl_mvm_fwrt_dump_start(void *ctx)
+static void iwl_mvm_fwrt_dump_start(void *ctx)
 {
        struct iwl_mvm *mvm = ctx;
 
        mutex_lock(&mvm->mutex);
-
-       return 0;
 }
 
 static void iwl_mvm_fwrt_dump_end(void *ctx)
@@ -1076,12 +1086,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        if (!hw)
                return NULL;
 
-       hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+       hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
 
        if (cfg->max_tx_agg_size)
                hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
        else
-               hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+               hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
 
        op_mode = hw->priv;
 
@@ -1244,6 +1254,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
                                                   IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
 
+       trans_cfg.queue_alloc_cmd_ver =
+               iwl_fw_lookup_cmd_ver(mvm->fw,
+                                     WIDE_ID(DATA_PATH_GROUP,
+                                             SCD_QUEUE_CONFIG_CMD),
+                                     0);
+       mvm->sta_remove_requires_queue_remove =
+               trans_cfg.queue_alloc_cmd_ver > 0;
+
        /* Configure transport layer */
        iwl_trans_configure(mvm->trans, &trans_cfg);
 
index 9af40b0..a3cefbc 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2017 Intel Deutschland GmbH
  */
@@ -158,8 +158,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
        iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
 
        /* we only support RLC command version 2 */
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
-                                 RLC_CONFIG_CMD, 0) < 2)
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
                iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
                                             chains_static, chains_dynamic);
 }
@@ -172,8 +171,7 @@ static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm,
                .phy_id = cpu_to_le32(ctxt->id),
        };
 
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
-                                 RLC_CONFIG_CMD, 0) < 2)
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
                return 0;
 
        BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE !=
@@ -209,8 +207,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
                                  u32 action)
 {
        int ret;
-       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
-                                       PHY_CONTEXT_CMD, 1);
+       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
 
        if (ver == 3 || ver == 4) {
                struct iwl_phy_context_cmd cmd = {};
@@ -301,8 +298,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
-                                 RLC_CONFIG_CMD, 0) >= 2 &&
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 &&
            ctxt->channel == chandef->chan &&
            ctxt->width == chandef->width &&
            ctxt->center_freq1 == chandef->center_freq1)
@@ -349,18 +345,31 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
         * otherwise we might not be able to reuse this phy.
         */
        if (ctxt->ref == 0) {
-               struct ieee80211_channel *chan;
+               struct ieee80211_channel *chan = NULL;
                struct cfg80211_chan_def chandef;
-               struct ieee80211_supported_band *sband = NULL;
-               enum nl80211_band band = NL80211_BAND_2GHZ;
+               struct ieee80211_supported_band *sband;
+               enum nl80211_band band;
+               int channel;
 
-               while (!sband && band < NUM_NL80211_BANDS)
-                       sband = mvm->hw->wiphy->bands[band++];
+               for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+                       sband = mvm->hw->wiphy->bands[band];
 
-               if (WARN_ON(!sband))
-                       return;
+                       if (!sband)
+                               continue;
+
+                       for (channel = 0; channel < sband->n_channels; channel++)
+                               if (!(sband->channels[channel].flags &
+                                               IEEE80211_CHAN_DISABLED)) {
+                                       chan = &sband->channels[channel];
+                                       break;
+                               }
 
-               chan = &sband->channels[0];
+                       if (chan)
+                               break;
+               }
+
+               if (WARN_ON(!chan))
+                       return;
 
                cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
                iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
index 3d0166d..c862bd2 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
index f054ce7..bb77bc9 100644 (file)
@@ -125,12 +125,19 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
        if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
                return ERR_PTR(-EIO);
 
-       resp = kzalloc(resp_size, GFP_KERNEL);
+       resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
        if (!resp)
                return ERR_PTR(-ENOMEM);
 
-       memcpy(resp, cmd.resp_pkt->data, resp_size);
-
        iwl_free_resp(&cmd);
        return resp;
 }
+
+void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
+                                     struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_rfi_deactivate_notif *notif = (void *)pkt->data;
+
+       IWL_INFO(mvm, "RFIm is deactivated, reason = %d\n", notif->reason);
+}
index 66808c5..9830d26 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  */
 #include "rs.h"
 #include "fw-api.h"
@@ -97,7 +97,10 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
 
        if (he_cap->has_he &&
            (he_cap->he_cap_elem.phy_cap_info[3] &
-            IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
+            IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
+            sband->iftype_data &&
+            sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[3] &
+            IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
                flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
 
        return flags;
@@ -420,7 +423,7 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        struct ieee80211_hw *hw = mvm->hw;
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
-       u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+       u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
        struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
        u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
        struct iwl_tlc_config_cmd_v4 cfg_cmd = {
@@ -449,8 +452,22 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
         */
        sta->max_amsdu_len = max_amsdu_len;
 
-       cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
-                                       TLC_MNG_CONFIG_CMD, 0);
+       cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+                                       WIDE_ID(DATA_PATH_GROUP,
+                                               TLC_MNG_CONFIG_CMD),
+                                       0);
+       IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
+                      cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode);
+       IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n",
+                      cfg_cmd.chains, cfg_cmd.sgi_ch_width_supp, cfg_cmd.flags);
+       IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, mpdu_len=%d, no_ht_rate=0x%X, tx_op=%d\n",
+                      cfg_cmd.max_mpdu_len, cfg_cmd.non_ht_rates, cfg_cmd.max_tx_op);
+       IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][0]=0x%X, ht_rate[1][0]=0x%X\n",
+                      cfg_cmd.ht_rates[0][0], cfg_cmd.ht_rates[1][0]);
+       IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][1]=0x%X, ht_rate[1][1]=0x%X\n",
+                      cfg_cmd.ht_rates[0][1], cfg_cmd.ht_rates[1][1]);
+       IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][2]=0x%X, ht_rate[1][2]=0x%X\n",
+                      cfg_cmd.ht_rates[0][2], cfg_cmd.ht_rates[1][2]);
        if (cmd_ver == 4) {
                ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC,
                                           sizeof(cfg_cmd), &cfg_cmd);
@@ -474,8 +491,9 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                u16 cmd_size = sizeof(cfg_cmd_v3);
 
                /* In old versions of the API the struct is 4 bytes smaller */
-               if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
-                                         TLC_MNG_CONFIG_CMD, 0) < 3)
+               if (iwl_fw_lookup_cmd_ver(mvm->fw,
+                                         WIDE_ID(DATA_PATH_GROUP,
+                                                 TLC_MNG_CONFIG_CMD), 0) < 3)
                        cmd_size -= 4;
 
                ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
index f4d02f9..6211461 100644 (file)
@@ -454,8 +454,6 @@ static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
 };
 
-#define MCS_INDEX_PER_STREAM   (8)
-
 static const char *rs_pretty_lq_type(enum iwl_table_type type)
 {
        static const char * const lq_types[] = {
index 64446a1..78198da 100644 (file)
@@ -83,8 +83,8 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
        fraglen = len - hdrlen;
 
        if (fraglen) {
-               int offset = (void *)hdr + hdrlen -
-                            rxb_addr(rxb) + rxb_offset(rxb);
+               int offset = (u8 *)hdr + hdrlen -
+                            (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
 
                skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
                                fraglen, rxb->truesize);
@@ -640,7 +640,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        u16 vif_id = mvmvif->id;
 
-       if (WARN_ONCE(vif_id > MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
+       if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
                return;
 
        if (vif->type != NL80211_IFTYPE_STATION)
index 5f3128f..2c43a99 100644 (file)
@@ -217,8 +217,8 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        fraglen = len - headlen;
 
        if (fraglen) {
-               int offset = (void *)hdr + headlen + pad_len -
-                            rxb_addr(rxb) + rxb_offset(rxb);
+               int offset = (u8 *)hdr + headlen + pad_len -
+                            (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
 
                skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
                                fraglen, rxb->truesize);
index 5f92a09..a407705 100644 (file)
@@ -20,7 +20,6 @@
 #define IWL_SCAN_DWELL_FRAGMENTED      44
 #define IWL_SCAN_DWELL_EXTENDED                90
 #define IWL_SCAN_NUM_OF_FRAGS          3
-#define IWL_SCAN_LAST_2_4_CHN          14
 
 /* adaptive dwell max budget time [TU] for full scan */
 #define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
@@ -98,6 +97,7 @@ struct iwl_mvm_scan_params {
        u32 n_6ghz_params;
        bool scan_6ghz;
        bool enable_6ghz_passive;
+       bool respect_p2p_go, respect_p2p_go_hb;
 };
 
 static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
@@ -169,17 +169,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
                return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
 }
 
-static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
-                                           struct ieee80211_vif *vif)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       int *global_cnt = data;
-
-       if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
-           mvmvif->phy_ctxt->id < NUM_PHY_CTX)
-               *global_cnt += 1;
-}
-
 static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
 {
        return mvm->tcm.result.global_load;
@@ -191,26 +180,31 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
        return mvm->tcm.result.band_load[band];
 }
 
-struct iwl_is_dcm_with_go_iterator_data {
+struct iwl_mvm_scan_iter_data {
+       u32 global_cnt;
        struct ieee80211_vif *current_vif;
        bool is_dcm_with_p2p_go;
 };
 
-static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
-                                           struct ieee80211_vif *vif)
+static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
+                                 struct ieee80211_vif *vif)
 {
-       struct iwl_is_dcm_with_go_iterator_data *data = _data;
-       struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_mvm_vif *curr_mvmvif =
-               iwl_mvm_vif_from_mac80211(data->current_vif);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_scan_iter_data *data = _data;
+       struct iwl_mvm_vif *curr_mvmvif;
 
-       /* exclude the given vif */
-       if (vif == data->current_vif)
+       if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
+           mvmvif->phy_ctxt->id < NUM_PHY_CTX)
+               data->global_cnt += 1;
+
+       if (!data->current_vif || vif == data->current_vif)
                return;
 
+       curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
+
        if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
-           other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
-           other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
+           mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
+           mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
                data->is_dcm_with_p2p_go = true;
 }
 
@@ -220,13 +214,18 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
                                         enum iwl_mvm_traffic_load load,
                                         bool low_latency)
 {
-       int global_cnt = 0;
+       struct iwl_mvm_scan_iter_data data = {
+               .current_vif = vif,
+               .is_dcm_with_p2p_go = false,
+               .global_cnt = 0,
+       };
 
        ieee80211_iterate_active_interfaces_atomic(mvm->hw,
-                                           IEEE80211_IFACE_ITER_NORMAL,
-                                           iwl_mvm_scan_condition_iterator,
-                                           &global_cnt);
-       if (!global_cnt)
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_mvm_scan_iterator,
+                                                  &data);
+
+       if (!data.global_cnt)
                return IWL_SCAN_TYPE_UNASSOC;
 
        if (fw_has_api(&mvm->fw->ucode_capa,
@@ -235,23 +234,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
                    (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
                        return IWL_SCAN_TYPE_FRAGMENTED;
 
-               /* in case of DCM with GO where BSS DTIM interval < 220msec
+               /*
+                * in case of DCM with GO where BSS DTIM interval < 220msec
                 * set all scan requests as fast-balance scan
-                * */
+                */
                if (vif && vif->type == NL80211_IFTYPE_STATION &&
-                   vif->bss_conf.dtim_period < 220) {
-                       struct iwl_is_dcm_with_go_iterator_data data = {
-                               .current_vif = vif,
-                               .is_dcm_with_p2p_go = false,
-                       };
-
-                       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
-                                               IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_is_dcm_with_go_iterator,
-                                               &data);
-                       if (data.is_dcm_with_p2p_go)
-                               return IWL_SCAN_TYPE_FAST_BALANCE;
-               }
+                   vif->bss_conf.dtim_period < 220 &&
+                   data.is_dcm_with_p2p_go)
+                       return IWL_SCAN_TYPE_FAST_BALANCE;
        }
 
        if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
@@ -651,9 +641,7 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
                                                           NL80211_BAND_2GHZ,
                                                           no_cck);
 
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                 ADD_STA,
-                                 0) < 12) {
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
                tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
                tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
 
@@ -1090,8 +1078,7 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
        memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
 
        /* This function should not be called when using ADD_STA ver >=12 */
-       WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                          ADD_STA, 0) >= 12);
+       WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
 
        cfg->bcast_sta_id = mvm->aux_sta.sta_id;
        cfg->channel_flags = channel_flags;
@@ -1142,8 +1129,7 @@ static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
        memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
 
        /* This function should not be called when using ADD_STA ver >=12 */
-       WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                          ADD_STA, 0) >= 12);
+       WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
 
        cfg->bcast_sta_id = mvm->aux_sta.sta_id;
        cfg->channel_flags = channel_flags;
@@ -1156,7 +1142,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
        void *cfg;
        int ret, cmd_size;
        struct iwl_host_cmd cmd = {
-               .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
        };
        enum iwl_mvm_scan_type type;
        enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
@@ -1247,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
 {
        struct iwl_scan_config cfg;
        struct iwl_host_cmd cmd = {
-               .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
                .len[0] = sizeof(cfg),
                .data[0] = &cfg,
                .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
@@ -1258,11 +1244,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
 
        memset(&cfg, 0, sizeof(cfg));
 
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                 ADD_STA, 0) < 12) {
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
                cfg.bcast_sta_id = mvm->aux_sta.sta_id;
-       } else if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                        SCAN_CFG_CMD, 0) < 5) {
+       } else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) {
                /*
                 * Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
                 * version 5.
@@ -1662,7 +1646,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
        }
 }
 
-static int
+static void
 iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
                                    struct iwl_mvm_scan_params *params,
                                     struct iwl_scan_probe_params_v4 *pp)
@@ -1731,31 +1715,40 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
 
        pp->short_ssid_num = idex_s;
        pp->bssid_num = idex_b;
-       return 0;
 }
 
 /* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */
-static void
-iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
+static u32
+iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm,
+                                    struct iwl_mvm_scan_params *params,
                                     u32 n_channels,
                                     struct iwl_scan_probe_params_v4 *pp,
                                     struct iwl_scan_channel_params_v6 *cp,
                                     enum nl80211_iftype vif_type)
 {
-       struct iwl_scan_channel_cfg_umac *channel_cfg = cp->channel_config;
        int i;
        struct cfg80211_scan_6ghz_params *scan_6ghz_params =
                params->scan_6ghz_params;
+       u32 ch_cnt;
 
-       for (i = 0; i < params->n_channels; i++) {
+       for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
                struct iwl_scan_channel_cfg_umac *cfg =
-                       &cp->channel_config[i];
+                       &cp->channel_config[ch_cnt];
 
                u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
                u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
                bool force_passive, found = false, allow_passive = true,
                     unsolicited_probe_on_chan = false, psc_no_listen = false;
 
+               /*
+                * Avoid performing passive scan on non PSC channels unless the
+                * scan is specifically a passive scan, i.e., no SSIDs
+                * configured in the scan command.
+                */
+               if (!cfg80211_channel_is_psc(params->channels[i]) &&
+                   !params->n_6ghz_params && params->n_ssids)
+                       continue;
+
                cfg->v1.channel_num = params->channels[i]->hw_value;
                cfg->v2.band = 2;
                cfg->v2.iter_count = 1;
@@ -1875,8 +1868,16 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
                else
                        flags |= bssid_bitmap | (s_ssid_bitmap << 16);
 
-               channel_cfg[i].flags |= cpu_to_le32(flags);
+               cfg->flags |= cpu_to_le32(flags);
+               ch_cnt++;
        }
+
+       if (params->n_channels > ch_cnt)
+               IWL_DEBUG_SCAN(mvm,
+                              "6GHz: reducing number channels: (%u->%u)\n",
+                              params->n_channels, ch_cnt);
+
+       return ch_cnt;
 }
 
 static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
@@ -1893,9 +1894,25 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
                        IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
 
        /* set fragmented ebs for fragmented scan on HB channels */
-       if (iwl_mvm_is_scan_fragmented(params->hb_type))
+       if ((!iwl_mvm_is_cdb_supported(mvm) &&
+            iwl_mvm_is_scan_fragmented(params->type)) ||
+           (iwl_mvm_is_cdb_supported(mvm) &&
+            iwl_mvm_is_scan_fragmented(params->hb_type)))
                flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
 
+       /*
+        * force EBS in case the scan is a fragmented and there is a need to take P2P
+        * GO operation into consideration during scan operation.
+        */
+       if ((!iwl_mvm_is_cdb_supported(mvm) &&
+            iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) ||
+           (iwl_mvm_is_cdb_supported(mvm) &&
+            iwl_mvm_is_scan_fragmented(params->hb_type) &&
+            params->respect_p2p_go_hb)) {
+               IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n");
+               flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
+       }
+
        return flags;
 }
 
@@ -2046,6 +2063,26 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
        return flags;
 }
 
+static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
+                                  struct iwl_mvm_scan_params *params,
+                                  struct ieee80211_vif *vif, int type)
+{
+       u8 flags = 0;
+
+       if (iwl_mvm_is_cdb_supported(mvm)) {
+               if (params->respect_p2p_go)
+                       flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB;
+               if (params->respect_p2p_go_hb)
+                       flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+       } else {
+               if (params->respect_p2p_go)
+                       flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
+                               IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+       }
+
+       return flags;
+}
+
 static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
                                   struct iwl_mvm_scan_params *params,
                                   struct ieee80211_vif *vif)
@@ -2164,7 +2201,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
        struct iwl_scan_umac_chan_param *chan_param;
        void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
-       void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
+       void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
                mvm->fw->ucode_capa.n_scan_channels;
        struct iwl_scan_req_umac_tail_v2 *tail_v2 =
                (struct iwl_scan_req_umac_tail_v2 *)sec_part;
@@ -2248,13 +2285,17 @@ iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm,
                                     struct iwl_mvm_scan_params *params,
                                     struct ieee80211_vif *vif,
                                     struct iwl_scan_general_params_v11 *gp,
-                                    u16 gen_flags)
+                                    u16 gen_flags, u8 gen_flags2)
 {
        struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
 
        iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
 
+       IWL_DEBUG_SCAN(mvm, "Gerenal: flags=0x%x, flags2=0x%x\n",
+                      gen_flags, gen_flags2);
+
        gp->flags = cpu_to_le16(gen_flags);
+       gp->flags2 = gen_flags2;
 
        if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
                gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
@@ -2358,7 +2399,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
        iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
                                             &scan_p->general_params,
-                                            gen_flags);
+                                            gen_flags, 0);
 
        ret = iwl_mvm_fill_scan_sched_params(params,
                                             scan_p->periodic_params.schedule,
@@ -2384,6 +2425,7 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
        struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
        int ret;
        u16 gen_flags;
+       u8 gen_flags2;
        u32 bitmap_ssid = 0;
 
        mvm->scan_uid_status[uid] = type;
@@ -2392,9 +2434,15 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
        cmd->uid = cpu_to_le32(uid);
 
        gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+
+       if (version >= 15)
+               gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
+       else
+               gen_flags2 = 0;
+
        iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
                                             &scan_p->general_params,
-                                            gen_flags);
+                                            gen_flags, gen_flags2);
 
        ret = iwl_mvm_fill_scan_sched_params(params,
                                             scan_p->periodic_params.schedule,
@@ -2417,14 +2465,16 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
        cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
        cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
 
-       ret = iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
-       if (ret)
-               return ret;
+       iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
+
+       cp->count = iwl_mvm_umac_scan_cfg_channels_v6_6g(mvm, params,
+                                                        params->n_channels,
+                                                        pb, cp, vif->type);
+       if (!cp->count) {
+               mvm->scan_uid_status[uid] = 0;
+               return -EINVAL;
+       }
 
-       iwl_mvm_umac_scan_cfg_channels_v6_6g(params,
-                                            params->n_channels,
-                                            pb, cp, vif->type);
-       cp->count = params->n_channels;
        if (!params->n_ssids ||
            (params->n_ssids == 1 && !params->ssids[0].ssid_len))
                cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
@@ -2588,10 +2638,9 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
        if (uid < 0)
                return uid;
 
-       hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+       hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC);
 
-       scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
-                                        SCAN_REQ_UMAC,
+       scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
                                         IWL_FW_CMD_VER_UNKNOWN);
 
        for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
@@ -2611,6 +2660,85 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
        return uid;
 }
 
+struct iwl_mvm_scan_respect_p2p_go_iter_data {
+       struct ieee80211_vif *current_vif;
+       bool p2p_go;
+       enum nl80211_band band;
+};
+
+static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
+                                            struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       /* exclude the given vif */
+       if (vif == data->current_vif)
+               return;
+
+       if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+           mvmvif->phy_ctxt->id < NUM_PHY_CTX &&
+           (data->band == NUM_NL80211_BANDS ||
+            mvmvif->phy_ctxt->channel->band == data->band))
+               data->p2p_go = true;
+}
+
+static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       bool low_latency,
+                                       enum nl80211_band band)
+{
+       struct iwl_mvm_scan_respect_p2p_go_iter_data data = {
+               .current_vif = vif,
+               .p2p_go = false,
+               .band = band,
+       };
+
+       if (!low_latency)
+               return false;
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_mvm_scan_respect_p2p_go_iter,
+                                                  &data);
+
+       return data.p2p_go;
+}
+
+static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm,
+                                           struct ieee80211_vif *vif,
+                                           enum nl80211_band band)
+{
+       bool low_latency = iwl_mvm_low_latency_band(mvm, band);
+
+       return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band);
+}
+
+static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif)
+{
+       bool low_latency = iwl_mvm_low_latency(mvm);
+
+       return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency,
+                                          NUM_NL80211_BANDS);
+}
+
+static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
+                                       struct iwl_mvm_scan_params *params,
+                                       struct ieee80211_vif *vif)
+{
+       if (iwl_mvm_is_cdb_supported(mvm)) {
+               params->respect_p2p_go =
+                       iwl_mvm_get_respect_p2p_go_band(mvm, vif,
+                                                       NL80211_BAND_2GHZ);
+               params->respect_p2p_go_hb =
+                       iwl_mvm_get_respect_p2p_go_band(mvm, vif,
+                                                       NL80211_BAND_5GHZ);
+       } else {
+               params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif);
+       }
+}
+
 int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           struct cfg80211_scan_request *req,
                           struct ieee80211_scan_ies *ies)
@@ -2662,6 +2790,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        params.scan_6ghz_params = req->scan_6ghz_params;
        params.scan_6ghz = req->scan_6ghz;
        iwl_mvm_fill_scan_type(mvm, &params, vif);
+       iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
 
        if (req->duration)
                params.iter_notif = true;
@@ -2753,6 +2882,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
        params.scan_plans = req->scan_plans;
 
        iwl_mvm_fill_scan_type(mvm, &params, vif);
+       iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
 
        /* In theory, LMAC scans can handle a 32-bit delay, but since
         * waiting for over 18 hours to start the scan is a bit silly
@@ -2922,8 +3052,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
        IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
 
        ret = iwl_mvm_send_cmd_pdu(mvm,
-                                  iwl_cmd_id(SCAN_ABORT_UMAC,
-                                             IWL_ALWAYS_LONG_GROUP, 0),
+                                  WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
                                   0, sizeof(cmd), &cmd);
        if (!ret)
                mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
@@ -2978,8 +3107,7 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
 int iwl_mvm_scan_size(struct iwl_mvm *mvm)
 {
        int base_size, tail_size;
-       u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
-                                           SCAN_REQ_UMAC,
+       u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
                                            IWL_FW_CMD_VER_UNKNOWN);
 
        base_size = iwl_scan_req_umac_get_size(scan_ver);
index feab0bf..c7f9d38 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2015, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -87,6 +87,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        }
 
        switch (sta->bandwidth) {
+       case IEEE80211_STA_RX_BW_320:
        case IEEE80211_STA_RX_BW_160:
                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
                fallthrough;
@@ -316,7 +317,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
 }
 
 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                              u16 *queueptr, u8 tid, u8 flags)
+                              u16 *queueptr, u8 tid)
 {
        int queue = *queueptr;
        struct iwl_scd_txq_cfg_cmd cmd = {
@@ -325,11 +326,28 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        };
        int ret;
 
+       lockdep_assert_held(&mvm->mutex);
+
        if (iwl_mvm_has_new_tx_api(mvm)) {
+               if (mvm->sta_remove_requires_queue_remove) {
+                       u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
+                                            SCD_QUEUE_CONFIG_CMD);
+                       struct iwl_scd_queue_cfg_cmd remove_cmd = {
+                               .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
+                               .u.remove.queue = cpu_to_le32(queue),
+                       };
+
+                       ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
+                                                  sizeof(remove_cmd),
+                                                  &remove_cmd);
+               } else {
+                       ret = 0;
+               }
+
                iwl_trans_txq_free(mvm->trans, queue);
                *queueptr = IWL_MVM_INVALID_QUEUE;
 
-               return 0;
+               return ret;
        }
 
        if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
@@ -373,7 +391,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        mvm->queue_info[queue].reserved = false;
 
        iwl_trans_txq_disable(mvm->trans, queue, false);
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
                                   sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
 
        if (ret)
@@ -512,7 +530,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
                iwl_mvm_invalidate_sta_queue(mvm, queue,
                                             disable_agg_tids, false);
 
-       ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
+       ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid);
        if (ret) {
                IWL_ERR(mvm,
                        "Failed to free inactive queue %d (ret=%d)\n",
@@ -596,6 +614,39 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
        return queue;
 }
 
+/* Re-configure the SCD for a queue that has already been configured */
+static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
+                               int sta_id, int tid, int frame_limit, u16 ssn)
+{
+       struct iwl_scd_txq_cfg_cmd cmd = {
+               .scd_queue = queue,
+               .action = SCD_CFG_ENABLE_QUEUE,
+               .window = frame_limit,
+               .sta_id = sta_id,
+               .ssn = cpu_to_le16(ssn),
+               .tx_fifo = fifo,
+               .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+                             queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
+               .tid = tid,
+       };
+       int ret;
+
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return -EINVAL;
+
+       if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
+                "Trying to reconfig unallocated queue %d\n", queue))
+               return -ENXIO;
+
+       IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+       WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
+                 queue, fifo, ret);
+
+       return ret;
+}
+
 /*
  * If a given queue has a higher AC than the TID stream that is being compared
  * to, the queue needs to be redirected to the lower AC. This function does that
@@ -716,21 +767,40 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
                                   u8 sta_id, u8 tid, unsigned int timeout)
 {
-       int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
-                               mvm->trans->cfg->min_256_ba_txq_size);
+       int queue, size;
 
        if (tid == IWL_MAX_TID_COUNT) {
                tid = IWL_MGMT_TID;
                size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
                             mvm->trans->cfg->min_txq_size);
+       } else {
+               struct ieee80211_sta *sta;
+
+               rcu_read_lock();
+               sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+               /* this queue isn't used for traffic (cab_queue) */
+               if (IS_ERR_OR_NULL(sta)) {
+                       size = IWL_MGMT_QUEUE_SIZE;
+               } else if (sta->he_cap.has_he) {
+                       /* support for 256 ba size */
+                       size = IWL_DEFAULT_QUEUE_SIZE_HE;
+               } else {
+                       size = IWL_DEFAULT_QUEUE_SIZE;
+               }
+
+               rcu_read_unlock();
        }
 
-       do {
-               __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
+       /* take the min with bc tbl entries allowed */
+       size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
 
-               queue = iwl_trans_txq_alloc(mvm->trans, enable,
-                                           sta_id, tid, SCD_QUEUE_CFG,
-                                           size, timeout);
+       /* size needs to be power of 2 values for calculating read/write pointers */
+       size = rounddown_pow_of_two(size);
+
+       do {
+               queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
+                                           tid, size, timeout);
 
                if (queue < 0)
                        IWL_DEBUG_TX_QUEUES(mvm,
@@ -1019,12 +1089,12 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
         * Remove the ones that did.
         */
        for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
-               u16 tid_bitmap;
+               u16 q_tid_bitmap;
 
                mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
                mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
 
-               tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+               q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 
                /*
                 * We need to take into account a situation in which a TXQ was
@@ -1037,7 +1107,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
                 * Mark this queue in the right bitmap, we'll send the command
                 * to the firmware later.
                 */
-               if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
+               if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
                        set_bit(queue, changetid_queues);
 
                IWL_DEBUG_TX_QUEUES(mvm,
@@ -1337,7 +1407,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
 
 out_err:
        queue_tmp = queue;
-       iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
+       iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid);
 
        return ret;
 }
@@ -1516,8 +1586,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
        memset(&cmd, 0, sizeof(cmd));
        cmd.sta_id = sta->sta_id;
 
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
-                                 0) >= 12 &&
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
            sta->type == IWL_STA_AUX_ACTIVITY)
                cmd.mac_id_n_color = cpu_to_le32(mac_id);
        else
@@ -1784,8 +1853,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
                if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
                        continue;
 
-               iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
-                                   0);
+               iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i);
                mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
        }
 
@@ -1993,7 +2061,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
        if (ret) {
                if (!iwl_mvm_has_new_tx_api(mvm))
                        iwl_mvm_disable_txq(mvm, NULL, queue,
-                                           IWL_MAX_TID_COUNT, 0);
+                                           IWL_MAX_TID_COUNT);
                return ret;
        }
 
@@ -2065,7 +2133,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
                return -EINVAL;
 
-       iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
+       iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT);
        ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
        if (ret)
                IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2082,7 +2150,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
        if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
                return -EINVAL;
 
-       iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
+       iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT);
        ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
        if (ret)
                IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2199,7 +2267,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
        }
 
        queue = *queueptr;
-       iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
+       iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT);
        if (iwl_mvm_has_new_tx_api(mvm))
                return;
 
@@ -2434,7 +2502,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
 
-       iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
+       iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0);
 
        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
        if (ret)
@@ -2443,8 +2511,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        return ret;
 }
 
-#define IWL_MAX_RX_BA_SESSIONS 16
-
 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
 {
        struct iwl_mvm_delba_data notif = {
@@ -2526,18 +2592,126 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
        }
 }
 
+static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
+                                 struct iwl_mvm_sta *mvm_sta,
+                                 bool start, int tid, u16 ssn,
+                                 u16 buf_size)
+{
+       struct iwl_mvm_add_sta_cmd cmd = {
+               .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
+               .sta_id = mvm_sta->sta_id,
+               .add_modify = STA_MODE_MODIFY,
+       };
+       u32 status;
+       int ret;
+
+       if (start) {
+               cmd.add_immediate_ba_tid = tid;
+               cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+               cmd.rx_ba_window = cpu_to_le16(buf_size);
+               cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
+       } else {
+               cmd.remove_immediate_ba_tid = tid;
+               cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
+       }
+
+       status = ADD_STA_SUCCESS;
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
+                                         &cmd, &status);
+       if (ret)
+               return ret;
+
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
+       case ADD_STA_SUCCESS:
+               IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+                            start ? "start" : "stopp");
+               if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
+                           !(status & IWL_ADD_STA_BAID_VALID_MASK)))
+                       return -EINVAL;
+               return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
+       case ADD_STA_IMMEDIATE_BA_FAILURE:
+               IWL_WARN(mvm, "RX BA Session refused by fw\n");
+               return -ENOSPC;
+       default:
+               IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
+                       start ? "start" : "stopp", status);
+               return -EIO;
+       }
+}
+
+static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+                                 struct iwl_mvm_sta *mvm_sta,
+                                 bool start, int tid, u16 ssn,
+                                 u16 buf_size, int baid)
+{
+       struct iwl_rx_baid_cfg_cmd cmd = {
+               .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
+                                 cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
+       };
+       u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+       int ret;
+
+       BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+
+       if (start) {
+               cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
+               cmd.alloc.tid = tid;
+               cmd.alloc.ssn = cpu_to_le16(ssn);
+               cmd.alloc.win_size = cpu_to_le16(buf_size);
+               baid = -EIO;
+       } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
+               cmd.remove_v1.baid = cpu_to_le32(baid);
+               BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
+       } else {
+               cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
+               cmd.remove.tid = cpu_to_le32(tid);
+       }
+
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
+                                         &cmd, &baid);
+       if (ret)
+               return ret;
+
+       if (!start) {
+               /* ignore firmware baid on remove */
+               baid = 0;
+       }
+
+       IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+                    start ? "start" : "stopp");
+
+       if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
+               return -EINVAL;
+
+       return baid;
+}
+
+static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
+                             bool start, int tid, u16 ssn, u16 buf_size,
+                             int baid)
+{
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
+               return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
+                                             tid, ssn, buf_size, baid);
+
+       return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
+                                     tid, ssn, buf_size);
+}
+
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
 {
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_mvm_add_sta_cmd cmd = {};
        struct iwl_mvm_baid_data *baid_data = NULL;
-       int ret;
-       u32 status;
+       int ret, baid;
+       u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
+                                                              IWL_MAX_BAID_OLD;
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
+       if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
                IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
                return -ENOSPC;
        }
@@ -2583,59 +2757,29 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                        reorder_buf_size / sizeof(baid_data->entries[0]);
        }
 
-       cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
-       cmd.sta_id = mvm_sta->sta_id;
-       cmd.add_modify = STA_MODE_MODIFY;
-       if (start) {
-               cmd.add_immediate_ba_tid = (u8) tid;
-               cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
-               cmd.rx_ba_window = cpu_to_le16(buf_size);
+       if (iwl_mvm_has_new_rx_api(mvm) && !start) {
+               baid = mvm_sta->tid_to_baid[tid];
        } else {
-               cmd.remove_immediate_ba_tid = (u8) tid;
+               /* we don't really need it in this case */
+               baid = -1;
        }
-       cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
-                                 STA_MODIFY_REMOVE_BA_TID;
 
-       status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
-                                         iwl_mvm_add_sta_cmd_size(mvm),
-                                         &cmd, &status);
-       if (ret)
-               goto out_free;
+       /* Don't send command to remove (start=0) BAID during restart */
+       if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
+                                         baid);
 
-       switch (status & IWL_ADD_STA_STATUS_MASK) {
-       case ADD_STA_SUCCESS:
-               IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
-                            start ? "start" : "stopp");
-               break;
-       case ADD_STA_IMMEDIATE_BA_FAILURE:
-               IWL_WARN(mvm, "RX BA Session refused by fw\n");
-               ret = -ENOSPC;
-               break;
-       default:
-               ret = -EIO;
-               IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
-                       start ? "start" : "stopp", status);
-               break;
-       }
-
-       if (ret)
+       if (baid < 0) {
+               ret = baid;
                goto out_free;
+       }
 
        if (start) {
-               u8 baid;
-
                mvm->rx_ba_sessions++;
 
                if (!iwl_mvm_has_new_rx_api(mvm))
                        return 0;
 
-               if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
-                       ret = -EINVAL;
-                       goto out_free;
-               }
-               baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
-                           IWL_ADD_STA_BAID_SHIFT);
                baid_data->baid = baid;
                baid_data->timeout = timeout;
                baid_data->last_rx = jiffies;
@@ -2663,7 +2807,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
                rcu_assign_pointer(mvm->baid_map[baid], baid_data);
        } else  {
-               u8 baid = mvm_sta->tid_to_baid[tid];
+               baid = mvm_sta->tid_to_baid[tid];
 
                if (mvm->rx_ba_sessions > 0)
                        /* check that restart flow didn't zero the counter */
@@ -3238,8 +3382,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
        int i, size;
        bool new_api = fw_has_api(&mvm->fw->ucode_capa,
                                  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
-       int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                           ADD_STA_KEY,
+       int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
                                            new_api ? 2 : 1);
 
        if (sta_id == IWL_MVM_INVALID_STA)
@@ -3939,7 +4082,7 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
 
-       if (!WARN_ON(!mvmsta))
+       if (mvmsta)
                iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
 
        rcu_read_unlock();
@@ -3998,3 +4141,21 @@ out:
        iwl_mvm_dealloc_int_sta(mvm, sta);
        return ret;
 }
+
+void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
+                                  struct ieee80211_vif *vif,
+                                  u32 mac_id)
+{
+       struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
+               .mac_id = cpu_to_le32(mac_id),
+       };
+       int ret;
+
+       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                  WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
+                                  CMD_ASYNC,
+                                  sizeof(cancel_channel_switch_cmd),
+                                  &cancel_channel_switch_cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to cancel the channel switch\n");
+}
index e34b82b..f1a4fc3 100644 (file)
@@ -548,4 +548,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                         struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
                         u8 *key, u32 key_len);
+void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
+                                  struct ieee80211_vif *vif,
+                                  u32 mac_id);
 #endif /* __sta_h__ */
index ab06dcd..6edf2b7 100644 (file)
@@ -97,8 +97,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
                /* In newer version of this command an aux station is added only
                 * in cases of dedicated tx queue and need to be removed in end
                 * of use */
-               if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
-                                         ADD_STA, 0) >= 12)
+               if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12)
                        iwl_mvm_rm_aux_sta(mvm);
        }
 
@@ -658,8 +657,8 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
        };
        int ret;
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
-                                                  MAC_CONF_GROUP, 0),
+       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                  WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
                                   0, sizeof(cmd), &cmd);
        if (ret)
                IWL_ERR(mvm,
@@ -923,8 +922,8 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
        }
 
        cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
-       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
-                                                   MAC_CONF_GROUP, 0),
+       return iwl_mvm_send_cmd_pdu(mvm,
+                                   WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
                                    0, sizeof(cmd), &cmd);
 }
 
@@ -1162,8 +1161,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
-                                        MAC_CONF_GROUP, 0) };
+       const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
        struct iwl_notification_wait wait_notif;
        struct iwl_mvm_session_prot_cmd cmd = {
                .id_and_color =
@@ -1201,8 +1199,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
 
        if (!wait_for_notif) {
                if (iwl_mvm_send_cmd_pdu(mvm,
-                                        iwl_cmd_id(SESSION_PROTECTION_CMD,
-                                                   MAC_CONF_GROUP, 0),
+                                        WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
                                         0, sizeof(cmd), &cmd)) {
                        IWL_ERR(mvm,
                                "Couldn't send the SESSION_PROTECTION_CMD\n");
@@ -1219,8 +1216,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
                                   iwl_mvm_session_prot_notif, NULL);
 
        if (iwl_mvm_send_cmd_pdu(mvm,
-                                iwl_cmd_id(SESSION_PROTECTION_CMD,
-                                           MAC_CONF_GROUP, 0),
+                                WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
                                 0, sizeof(cmd), &cmd)) {
                IWL_ERR(mvm,
                        "Couldn't send the SESSION_PROTECTION_CMD\n");
index 398390c..69cf3a3 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2016 Intel Deutschland GmbH
  */
@@ -160,6 +160,11 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
        notif = (struct ct_kill_notif *)pkt->data;
        IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
                       notif->temperature);
+       if (iwl_fw_lookup_notif_ver(mvm->fw, PHY_OPS_GROUP,
+                                   CT_KILL_NOTIFICATION, 0) > 1)
+               IWL_DEBUG_TEMP(mvm,
+                              "CT kill notification DTS bitmap = 0x%x, Scheme = %d\n",
+                              notif->dts, notif->scheme);
 
        iwl_mvm_enter_ctkill(mvm);
 }
@@ -240,8 +245,8 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
         * a response. For older versions we send the command and wait for a
         * notification (no command TLV for previous versions).
         */
-       cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
-                                       CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+       cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+                                       WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
                                        IWL_FW_CMD_VER_UNKNOWN);
        if (cmd_ver == 1)
                return iwl_mvm_send_temp_cmd(mvm, true, temp);
index 9213f85..7763037 100644 (file)
@@ -318,15 +318,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
 
        /* info->control is only relevant for non HW rate control */
        if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
-               struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
                /* HT rate doesn't make sense for a non data frame */
                WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
                          !ieee80211_is_data(fc),
                          "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
                          info->control.rates[0].flags,
                          info->control.rates[0].idx,
-                         le16_to_cpu(fc), sta ? mvmsta->sta_state : -1);
+                         le16_to_cpu(fc),
+                         sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
 
                rate_idx = info->control.rates[0].idx;
        }
@@ -351,7 +350,7 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
        is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE);
 
        /* Set CCK or OFDM flag */
-       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 8) {
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
                if (!is_cck)
                        rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
                else
@@ -654,7 +653,8 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
        int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
        struct iwl_probe_resp_data *resp_data;
-       u8 *ie, *pos;
+       const u8 *ie;
+       u8 *pos;
        u8 match[] = {
                (WLAN_OUI_WFA >> 16) & 0xff,
                (WLAN_OUI_WFA >> 8) & 0xff,
@@ -671,10 +671,10 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
        if (!resp_data->notif.noa_active)
                goto out;
 
-       ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
-                                         mgmt->u.probe_resp.variable,
-                                         skb->len - base_len,
-                                         match, 4, 2);
+       ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
+                                   mgmt->u.probe_resp.variable,
+                                   skb->len - base_len,
+                                   match, 4, 2);
        if (!ie) {
                IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
                goto out;
@@ -1602,8 +1602,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        seq_ctl = le16_to_cpu(hdr->seq_ctrl);
 
                if (unlikely(!seq_ctl)) {
-                       struct ieee80211_hdr *hdr = (void *)skb->data;
-
                        /*
                         * If it is an NDP, we can't update next_reclaim since
                         * its sequence control is 0. Note that for that same
index 1f3e90e..bc94773 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -169,8 +169,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
 
 u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
 {
-       if (iwl_fw_lookup_cmd_ver(fw, LONG_GROUP,
-                                 TX_CMD, 0) > 8)
+       if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
                /* In the new rate legacy rates are indexed:
                 * 0 - 3 for CCK and 0 - 7 for OFDM.
                 */
@@ -241,38 +240,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
        return last_idx;
 }
 
-int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
-                        int tid, int frame_limit, u16 ssn)
-{
-       struct iwl_scd_txq_cfg_cmd cmd = {
-               .scd_queue = queue,
-               .action = SCD_CFG_ENABLE_QUEUE,
-               .window = frame_limit,
-               .sta_id = sta_id,
-               .ssn = cpu_to_le16(ssn),
-               .tx_fifo = fifo,
-               .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
-                             queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
-               .tid = tid,
-       };
-       int ret;
-
-       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
-               return -EINVAL;
-
-       if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
-                "Trying to reconfig unallocated queue %d\n", queue))
-               return -ENXIO;
-
-       IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
-
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
-       WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
-                 queue, fifo, ret);
-
-       return ret;
-}
-
 /**
  * iwl_mvm_send_lq_cmd() - Send link quality command
  * @mvm: Driver data.
@@ -480,8 +447,7 @@ void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
                cmd.low_latency_tx = 1;
        }
 
-       if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
-                                                MAC_CONF_GROUP, 0),
+       if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
                                 0, sizeof(cmd), &cmd))
                IWL_ERR(mvm, "Failed to send low latency command\n");
 }
index 85a6da7..75fd386 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  */
 #include "iwl-trans.h"
 #include "iwl-fh.h"
@@ -125,6 +125,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
        control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
 
+       if (trans->trans_cfg->imr_enabled)
+               control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
+
        /* initialize RX default queue */
        prph_sc_ctrl->rbd_cfg.free_rbd_addr =
                cpu_to_le64(trans_pcie->rxq->bd_dma);
index 5178e85..b16d4ae 100644 (file)
@@ -491,18 +491,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 /* So devices */
        {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)},
        {IWL_PCI_DEVICE(0x2726, PCI_ANY_ID, iwl_snj_trans_cfg)},
-       {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+       {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
        {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
        {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+       {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
        {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+       {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
 
 /* Ma devices */
        {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
        {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
-       {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_ma_trans_cfg)},
 
 /* Bz devices */
        {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+       {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
+       {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
@@ -668,8 +671,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
        IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
        IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
-       IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
-       IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
+       IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+       IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
 
        /* SO with GF2 */
        IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
@@ -682,6 +685,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
        IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
        IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+       IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+       IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
 
        /* MA with GF2 */
        IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name),
@@ -1301,7 +1306,30 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
                      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
                      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
-                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+
+/* MsP */
+/* For now we use the same FW as MR, but this will change in the future. */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+                     iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+                     iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+                     iwl_cfg_ma_a0_ms_a0, iwl_ax204_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+                     iwl_cfg_snj_a0_ms_a0, iwl_ax204_name)
 
 #endif /* CONFIG_IWLMVM */
 };
index a43e56c..f7e4f86 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2003-2015, 2018-2021 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -103,6 +103,18 @@ struct iwl_rx_completion_desc {
        u8 reserved2[25];
 } __packed;
 
+/**
+ * struct iwl_rx_completion_desc_bz - Bz completion descriptor
+ * @rbid: unique tag of the received buffer
+ * @flags: flags (0: fragmented, all others: reserved)
+ * @reserved: reserved
+ */
+struct iwl_rx_completion_desc_bz {
+       __le16 rbid;
+       u8 flags;
+       u8 reserved[1];
+} __packed;
+
 /**
  * struct iwl_rxq - Rx queue
  * @id: queue index
@@ -133,11 +145,7 @@ struct iwl_rxq {
        int id;
        void *bd;
        dma_addr_t bd_dma;
-       union {
-               void *used_bd;
-               __le32 *bd_32;
-               struct iwl_rx_completion_desc *cd;
-       };
+       void *used_bd;
        dma_addr_t used_bd_dma;
        u32 read;
        u32 write;
@@ -261,6 +269,20 @@ enum iwl_pcie_fw_reset_state {
        FW_RESET_ERROR,
 };
 
+/**
+ * enum wl_pcie_imr_status - imr dma transfer state
+ * @IMR_D2S_IDLE: default value of the dma transfer
+ * @IMR_D2S_REQUESTED: dma transfer requested
+ * @IMR_D2S_COMPLETED: dma transfer completed
+ * @IMR_D2S_ERROR: dma transfer error
+ */
+enum iwl_pcie_imr_status {
+       IMR_D2S_IDLE,
+       IMR_D2S_REQUESTED,
+       IMR_D2S_COMPLETED,
+       IMR_D2S_ERROR,
+};
+
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
@@ -319,6 +341,8 @@ enum iwl_pcie_fw_reset_state {
  * @alloc_page_lock: spinlock for the page allocator
  * @alloc_page: allocated page to still use parts of
  * @alloc_page_used: how much of the allocated page was already used (bytes)
+ * @imr_status: imr dma state machine
+ * @wait_queue_head_t: imr wait queue for dma completion
  * @rf_name: name/version of the CRF, if any
  */
 struct iwl_trans_pcie {
@@ -363,7 +387,7 @@ struct iwl_trans_pcie {
 
        /* PCI bus related data */
        struct pci_dev *pci_dev;
-       void __iomem *hw_base;
+       u8 __iomem *hw_base;
 
        bool ucode_write_complete;
        bool sx_complete;
@@ -414,7 +438,8 @@ struct iwl_trans_pcie {
        bool fw_reset_handshake;
        enum iwl_pcie_fw_reset_state fw_reset_state;
        wait_queue_head_t fw_reset_waitq;
-
+       enum iwl_pcie_imr_status imr_status;
+       wait_queue_head_t imr_waitq;
        char rf_name[32];
 };
 
@@ -809,4 +834,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
                               struct iwl_host_cmd *cmd);
 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                          struct iwl_host_cmd *cmd);
+void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
+                               u32 dst_addr, u64 src_addr, u32 byte_cnt);
+int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
+                           u32 dst_addr, u64 src_addr, u32 byte_cnt);
+
 #endif /* __iwl_trans_int_pcie_h__ */
index 8247014..68a4572 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2003-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -190,11 +190,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
        }
 
        rxq->write_actual = round_down(rxq->write, 8);
-       if (trans->trans_cfg->mq_rx_supported)
+       if (!trans->trans_cfg->mq_rx_supported)
+               iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+       else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+               iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
+                           HBUS_TARG_WRPTR_RX_Q(rxq->id));
+       else
                iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
                            rxq->write_actual);
-       else
-               iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
 }
 
 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -652,23 +655,30 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
        iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
-static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
 {
-       struct iwl_rx_transfer_desc *rx_td;
+       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+               return sizeof(struct iwl_rx_transfer_desc);
 
-       if (use_rx_td)
-               return sizeof(*rx_td);
-       else
-               return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
-                       sizeof(__le32);
+       return trans->trans_cfg->mq_rx_supported ?
+                       sizeof(__le64) : sizeof(__le32);
+}
+
+static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
+{
+       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+               return sizeof(struct iwl_rx_completion_desc_bz);
+
+       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+               return sizeof(struct iwl_rx_completion_desc);
+
+       return sizeof(__le32);
 }
 
 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
                                  struct iwl_rxq *rxq)
 {
-       bool use_rx_td = (trans->trans_cfg->device_family >=
-                         IWL_DEVICE_FAMILY_AX210);
-       int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+       int free_size = iwl_pcie_free_bd_size(trans);
 
        if (rxq->bd)
                dma_free_coherent(trans->dev,
@@ -682,8 +692,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
 
        if (rxq->used_bd)
                dma_free_coherent(trans->dev,
-                                 (use_rx_td ? sizeof(*rxq->cd) :
-                                  sizeof(__le32)) * rxq->queue_size,
+                                 iwl_pcie_used_bd_size(trans) *
+                                       rxq->queue_size,
                                  rxq->used_bd, rxq->used_bd_dma);
        rxq->used_bd_dma = 0;
        rxq->used_bd = NULL;
@@ -707,7 +717,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
        else
                rxq->queue_size = RX_QUEUE_SIZE;
 
-       free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+       free_size = iwl_pcie_free_bd_size(trans);
 
        /*
         * Allocate the circular buffer of Read Buffer Descriptors
@@ -720,14 +730,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
 
        if (trans->trans_cfg->mq_rx_supported) {
                rxq->used_bd = dma_alloc_coherent(dev,
-                                                 (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
+                                                 iwl_pcie_used_bd_size(trans) *
+                                                       rxq->queue_size,
                                                  &rxq->used_bd_dma,
                                                  GFP_KERNEL);
                if (!rxq->used_bd)
                        goto err;
        }
 
-       rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
+       rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
        rxq->rb_stts_dma =
                trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
 
@@ -1307,9 +1318,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                             "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
                             rxq->id, offset,
                             iwl_get_cmd_string(trans,
-                                               iwl_cmd_id(pkt->hdr.cmd,
-                                                          pkt->hdr.group_id,
-                                                          0)),
+                                               WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
                             pkt->hdr.group_id, pkt->hdr.cmd,
                             le16_to_cpu(pkt->hdr.sequence));
 
@@ -1319,7 +1328,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
 
                /* check that what the device tells us made sense */
-               if (offset > max_len)
+               if (len < sizeof(*pkt) || offset > max_len)
                        break;
 
                trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
@@ -1419,6 +1428,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
        u16 vid;
 
        BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
+       BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
 
        if (!trans->trans_cfg->mq_rx_supported) {
                rxb = rxq->queue[i];
@@ -1426,11 +1436,20 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
                return rxb;
        }
 
-       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
-               vid = le16_to_cpu(rxq->cd[i].rbid);
-               *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+               struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
+
+               vid = le16_to_cpu(cd[i].rbid);
+               *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+       } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+               struct iwl_rx_completion_desc *cd = rxq->used_bd;
+
+               vid = le16_to_cpu(cd[i].rbid);
+               *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
        } else {
-               vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
+               __le32 *cd = rxq->used_bd;
+
+               vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
        }
 
        if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
@@ -1608,10 +1627,13 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
        if (WARN_ON(entry->entry >= trans->num_rx_queues))
                return IRQ_NONE;
 
-       if (WARN_ONCE(!rxq,
-                     "[%d] Got MSI-X interrupt before we have Rx queues",
-                     entry->entry))
+       if (!rxq) {
+               if (net_ratelimit())
+                       IWL_ERR(trans,
+                               "[%d] Got MSI-X interrupt before we have Rx queues\n",
+                               entry->entry);
                return IRQ_NONE;
+       }
 
        lock_map_acquire(&trans->sync_cmd_lockdep_map);
        IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
@@ -1954,7 +1976,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                                CSR_INT, CSR_INT_BIT_RX_PERIODIC);
                }
                /* Sending RX interrupt require many steps to be done in the
-                * the device:
+                * device:
                 * 1- write interrupt to current index in ICT table.
                 * 2- dma RX frame.
                 * 3- update RX shared data to indicate last write index.
@@ -1998,6 +2020,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                /* Wake up uCode load routine, now that load is complete */
                trans_pcie->ucode_write_complete = true;
                wake_up(&trans_pcie->ucode_write_waitq);
+               /* Wake up IMR write routine, now that write to SRAM is complete */
+               if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+                       trans_pcie->imr_status = IMR_D2S_COMPLETED;
+                       wake_up(&trans_pcie->ucode_write_waitq);
+               }
        }
 
        if (inta & ~handled) {
@@ -2211,7 +2238,17 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
        }
 
        /* This "Tx" DMA channel is used only for loading uCode */
-       if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+       if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
+           trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+               IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
+               isr_stats->tx++;
+
+               /* Wake up IMR routine once write to SRAM is complete */
+               if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+                       trans_pcie->imr_status = IMR_D2S_COMPLETED;
+                       wake_up(&trans_pcie->ucode_write_waitq);
+               }
+       } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
                IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
                isr_stats->tx++;
                /*
@@ -2220,6 +2257,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
                 */
                trans_pcie->ucode_write_complete = true;
                wake_up(&trans_pcie->ucode_write_waitq);
+
+               /* Wake up IMR routine once write to SRAM is complete */
+               if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+                       trans_pcie->imr_status = IMR_D2S_COMPLETED;
+                       wake_up(&trans_pcie->ucode_write_waitq);
+               }
        }
 
        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
@@ -2234,7 +2277,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
                        inta_fh);
                isr_stats->sw++;
                /* during FW reset flow report errors from there */
-               if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+               if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+                       trans_pcie->imr_status = IMR_D2S_ERROR;
+                       wake_up(&trans_pcie->imr_waitq);
+               } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
                        trans_pcie->fw_reset_state = FW_RESET_ERROR;
                        wake_up(&trans_pcie->fw_reset_waitq);
                } else {
index ef14584..8be3c3c 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2007-2015, 2018-2020 Intel Corporation
+ * Copyright (C) 2007-2015, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -745,7 +745,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
                        iwl_set_bits_prph(trans, LMPM_CHICK,
                                          LMPM_CHICK_EXTENDED_ADDR_SPACE);
 
-               memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+               memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
                ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
                                                   copy_size);
 
@@ -1112,7 +1112,7 @@ static const struct iwl_causes_list causes_list_pre_bz[] = {
 };
 
 static const struct iwl_causes_list causes_list_bz[] = {
-       {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ,      CSR_MSIX_HW_INT_MASK_AD, 0x29},
+       {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ,      CSR_MSIX_HW_INT_MASK_AD, 0x15},
 };
 
 static void iwl_pcie_map_list(struct iwl_trans *trans,
@@ -1948,6 +1948,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
        trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
        trans->txqs.page_offs = trans_cfg->cb_data_offs;
        trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+       trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
 
        if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
                trans_pcie->n_no_reclaim_cmds = 0;
@@ -2863,7 +2864,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
 {
        struct iwl_trans *trans = file->private_data;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
+       u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
        struct cont_rec *data = &trans_pcie->fw_mon_data;
        u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
        ssize_t size, bytes_copied = 0;
@@ -3468,7 +3469,8 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
        .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
        .d3_resume = iwl_trans_pcie_d3_resume,                          \
        .interrupts = iwl_trans_pci_interrupts,                         \
-       .sync_nmi = iwl_trans_pcie_sync_nmi                             \
+       .sync_nmi = iwl_trans_pcie_sync_nmi,                            \
+       .imr_dma_data = iwl_trans_pcie_copy_imr                         \
 
 static const struct iwl_trans_ops trans_ops_pcie = {
        IWL_TRANS_COMMON_OPS,
@@ -3553,6 +3555,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        mutex_init(&trans_pcie->mutex);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
        init_waitqueue_head(&trans_pcie->fw_reset_waitq);
+       init_waitqueue_head(&trans_pcie->imr_waitq);
 
        trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
                                                   WQ_HIGHPRI | WQ_UNBOUND, 1);
@@ -3681,3 +3684,41 @@ out_free_trans:
        iwl_trans_free(trans);
        return ERR_PTR(ret);
 }
+
+void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
+                               u32 dst_addr, u64 src_addr, u32 byte_cnt)
+{
+       iwl_write_prph(trans, IMR_UREG_CHICK,
+                      iwl_read_prph(trans, IMR_UREG_CHICK) |
+                      IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
+       iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
+       iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
+                      (u32)(src_addr & 0xFFFFFFFF));
+       iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
+                      iwl_get_dma_hi_addr(src_addr));
+       iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
+       iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
+                      IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
+                      IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
+                      IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
+}
+
+int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
+                           u32 dst_addr, u64 src_addr, u32 byte_cnt)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int ret = -1;
+
+       trans_pcie->imr_status = IMR_D2S_REQUESTED;
+       iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
+       ret = wait_event_timeout(trans_pcie->imr_waitq,
+                                trans_pcie->imr_status !=
+                                IMR_D2S_REQUESTED, 5 * HZ);
+       if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
+               IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
+               iwl_trans_pcie_dump_regs(trans);
+               return -ETIMEDOUT;
+       }
+       trans_pcie->imr_status = IMR_D2S_IDLE;
+       return 0;
+}
index 34bde8c..c72a84d 100644 (file)
@@ -213,7 +213,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
 
        /* map the remaining (adjusted) nocopy/dup fragments */
        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
-               const void *data = cmddata[i];
+               void *data = (void *)(uintptr_t)cmddata[i];
 
                if (!cmdlen[i])
                        continue;
@@ -222,7 +222,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
                        continue;
                if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
                        data = dup_buf;
-               phys_addr = dma_map_single(trans->dev, (void *)data,
+               phys_addr = dma_map_single(trans->dev, data,
                                           cmdlen[i], DMA_TO_DEVICE);
                if (dma_mapping_error(trans->dev, phys_addr)) {
                        idx = -ENOMEM;
index 4f6c187..3546c52 100644 (file)
@@ -154,7 +154,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
        void *tfd;
        u32 num_tbs;
 
-       tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
+       tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
 
        if (reset)
                memset(tfd, 0, trans->txqs.tfd.size);
@@ -540,7 +540,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
                                          trans->cfg->min_txq_size);
                else
                        slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
-                                         trans->cfg->min_256_ba_txq_size);
+                                         trans->cfg->min_ba_txq_size);
                trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
                ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
                                    cmd_queue);
@@ -594,7 +594,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
                                          trans->cfg->min_txq_size);
                else
                        slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
-                                         trans->cfg->min_256_ba_txq_size);
+                                         trans->cfg->min_ba_txq_size);
                ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
                                   cmd_queue);
                if (ret) {
@@ -877,7 +877,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
        if (configure_scd) {
                iwl_scd_txq_set_inactive(trans, txq_id);
 
-               iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+               iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
                                    ARRAY_SIZE(zero_val));
        }
 
@@ -1114,7 +1114,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 
        /* map the remaining (adjusted) nocopy/dup fragments */
        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
-               const void *data = cmddata[i];
+               void *data = (void *)(uintptr_t)cmddata[i];
 
                if (!cmdlen[i])
                        continue;
@@ -1123,7 +1123,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                        continue;
                if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
                        data = dup_buf;
-               phys_addr = dma_map_single(trans->dev, (void *)data,
+               phys_addr = dma_map_single(trans->dev, data,
                                           cmdlen[i], DMA_TO_DEVICE);
                if (dma_mapping_error(trans->dev, phys_addr)) {
                        iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
@@ -1201,7 +1201,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
        cmd = txq->entries[cmd_index].cmd;
        meta = &txq->entries[cmd_index].meta;
        group_id = cmd->hdr.group_id;
-       cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
+       cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
 
        iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
 
index 0730657..726185d 100644 (file)
@@ -1,13 +1,15 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
  */
 #include <net/tso.h>
 #include <linux/tcp.h>
 
 #include "iwl-debug.h"
 #include "iwl-io.h"
+#include "fw/api/commands.h"
 #include "fw/api/tx.h"
+#include "fw/api/datapath.h"
 #include "queue/tx.h"
 #include "iwl-fh.h"
 #include "iwl-scd.h"
@@ -41,13 +43,13 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
        num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
 
        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
-               struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+               struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
 
                /* Starting from AX210, the HW expects bytes */
                WARN_ON(trans->txqs.bc_table_dword);
                WARN_ON(len > 0x3FFF);
                bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
-               scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+               scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
        } else {
                struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
 
@@ -189,7 +191,7 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
                return NULL;
 
        /* set the chaining pointer to the previous page if there */
-       *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+       *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
        *page_ptr = ret;
 
        return ret;
@@ -314,7 +316,7 @@ alloc:
                return NULL;
        p->pos = page_address(p->page);
        /* set the chaining pointer to NULL */
-       *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+       *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
 out:
        *page_ptr = p->page;
        get_page(p->page);
@@ -963,7 +965,7 @@ void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
        while (next) {
                struct page *tmp = next;
 
-               next = *(void **)(page_address(next) + PAGE_SIZE -
+               next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
                                  sizeof(void *));
                __free_page(tmp);
        }
@@ -1083,9 +1085,8 @@ error:
        return -ENOMEM;
 }
 
-static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
-                                struct iwl_txq **intxq, int size,
-                                unsigned int timeout)
+static struct iwl_txq *
+iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
 {
        size_t bc_tbl_size, bc_tbl_entries;
        struct iwl_txq *txq;
@@ -1097,18 +1098,18 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
        bc_tbl_entries = bc_tbl_size / sizeof(u16);
 
        if (WARN_ON(size > bc_tbl_entries))
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
 
        txq = kzalloc(sizeof(*txq), GFP_KERNEL);
        if (!txq)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
                                          &txq->bc_tbl.dma);
        if (!txq->bc_tbl.addr) {
                IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
                kfree(txq);
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        }
 
        ret = iwl_txq_alloc(trans, txq, size, false);
@@ -1124,12 +1125,11 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
 
        txq->wd_timeout = msecs_to_jiffies(timeout);
 
-       *intxq = txq;
-       return 0;
+       return txq;
 
 error:
        iwl_txq_gen2_free_memory(trans, txq);
-       return ret;
+       return ERR_PTR(ret);
 }
 
 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
@@ -1186,30 +1186,61 @@ error_free_resp:
        return ret;
 }
 
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
-                     int cmd_id, int size, unsigned int timeout)
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+                     u8 tid, int size, unsigned int timeout)
 {
-       struct iwl_txq *txq = NULL;
-       struct iwl_tx_queue_cfg_cmd cmd = {
-               .flags = flags,
-               .sta_id = sta_id,
-               .tid = tid,
-       };
+       struct iwl_txq *txq;
+       union {
+               struct iwl_tx_queue_cfg_cmd old;
+               struct iwl_scd_queue_cfg_cmd new;
+       } cmd;
        struct iwl_host_cmd hcmd = {
-               .id = cmd_id,
-               .len = { sizeof(cmd) },
-               .data = { &cmd, },
                .flags = CMD_WANT_SKB,
        };
        int ret;
 
-       ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
-       if (ret)
-               return ret;
+       if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+           trans->hw_rev_step == SILICON_A_STEP)
+               size = 4096;
+
+       txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+       if (IS_ERR(txq))
+               return PTR_ERR(txq);
 
-       cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
-       cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
-       cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+       if (trans->txqs.queue_alloc_cmd_ver == 0) {
+               memset(&cmd.old, 0, sizeof(cmd.old));
+               cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
+               cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+               cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+               cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
+               cmd.old.tid = tid;
+
+               if (hweight32(sta_mask) != 1) {
+                       ret = -EINVAL;
+                       goto error;
+               }
+               cmd.old.sta_id = ffs(sta_mask) - 1;
+
+               hcmd.id = SCD_QUEUE_CFG;
+               hcmd.len[0] = sizeof(cmd.old);
+               hcmd.data[0] = &cmd.old;
+       } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
+               memset(&cmd.new, 0, sizeof(cmd.new));
+               cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
+               cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
+               cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
+               cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+               cmd.new.u.add.flags = cpu_to_le32(flags);
+               cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
+               cmd.new.u.add.tid = tid;
+
+               hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
+               hcmd.len[0] = sizeof(cmd.new);
+               hcmd.data[0] = &cmd.new;
+       } else {
+               ret = -EOPNOTSUPP;
+               goto error;
+       }
 
        ret = iwl_trans_send_cmd(trans, &hcmd);
        if (ret)
@@ -1307,10 +1338,10 @@ static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
        dma_addr_t hi_len;
 
        if (trans->trans_cfg->use_tfh) {
-               struct iwl_tfh_tfd *tfd = _tfd;
-               struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+               struct iwl_tfh_tfd *tfh_tfd = _tfd;
+               struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
 
-               return (dma_addr_t)(le64_to_cpu(tb->addr));
+               return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
        }
 
        tfd = _tfd;
index 20efc62..eca53bf 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
  */
 #ifndef __iwl_trans_queue_tx_h__
 #define __iwl_trans_queue_tx_h__
@@ -41,7 +41,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
        if (trans->trans_cfg->use_tfh)
                idx = iwl_txq_get_cmd_index(txq, idx);
 
-       return txq->tfds + trans->txqs.tfd.size * idx;
+       return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
 }
 
 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
@@ -112,10 +112,9 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
                            struct iwl_cmd_meta *meta,
                            struct iwl_tfh_tfd *tfd);
 
-int iwl_txq_dyn_alloc(struct iwl_trans *trans,
-                     __le16 flags, u8 sta_id, u8 tid,
-                     int cmd_id, int size,
-                     unsigned int timeout);
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
+                     u32 sta_mask, u8 tid,
+                     int size, unsigned int timeout);
 
 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
                    struct iwl_device_tx_cmd *dev_cmd, int txq_id);
@@ -137,9 +136,9 @@ static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
        struct iwl_tfd *tfd;
 
        if (trans->trans_cfg->use_tfh) {
-               struct iwl_tfh_tfd *tfd = _tfd;
+               struct iwl_tfh_tfd *tfh_tfd = _tfd;
 
-               return le16_to_cpu(tfd->num_tbs) & 0x1f;
+               return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
        }
 
        tfd = (struct iwl_tfd *)_tfd;
@@ -153,10 +152,10 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
        struct iwl_tfd_tb *tb;
 
        if (trans->trans_cfg->use_tfh) {
-               struct iwl_tfh_tfd *tfd = _tfd;
-               struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+               struct iwl_tfh_tfd *tfh_tfd = _tfd;
+               struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
 
-               return le16_to_cpu(tb->tb_len);
+               return le16_to_cpu(tfh_tb->tb_len);
        }
 
        tfd = (struct iwl_tfd *)_tfd;
index fc5725f..28bfa7b 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (c) 2008, Jouni Malinen <j@w1.fi>
  * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
  * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
  */
 
 /*
@@ -173,9 +173,23 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
        }
 };
 
+static const struct ieee80211_regdomain hwsim_world_regdom_custom_03 = {
+       .n_reg_rules = 6,
+       .alpha2 =  "99",
+       .reg_rules = {
+               REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0),
+               REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0),
+               REG_RULE(5150 - 10, 5240 + 10, 40, 0, 30, 0),
+               REG_RULE(5745 - 10, 5825 + 10, 40, 0, 30, 0),
+               REG_RULE(5855 - 10, 5925 + 10, 40, 0, 33, 0),
+               REG_RULE(5955 - 10, 7125 + 10, 320, 0, 33, 0),
+       }
+};
+
 static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
        &hwsim_world_regdom_custom_01,
        &hwsim_world_regdom_custom_02,
+       &hwsim_world_regdom_custom_03,
 };
 
 struct hwsim_vif_priv {
@@ -475,16 +489,16 @@ static const struct ieee80211_sta_s1g_cap hwsim_s1g_cap = {
                     0 },
 };
 
-static void hwsim_init_s1g_channels(struct ieee80211_channel *channels)
+static void hwsim_init_s1g_channels(struct ieee80211_channel *chans)
 {
        int ch, freq;
 
        for (ch = 0; ch < NUM_S1G_CHANS_US; ch++) {
                freq = 902000 + (ch + 1) * 500;
-               channels[ch].band = NL80211_BAND_S1GHZ;
-               channels[ch].center_freq = KHZ_TO_MHZ(freq);
-               channels[ch].freq_offset = freq % 1000;
-               channels[ch].hw_value = ch + 1;
+               chans[ch].band = NL80211_BAND_S1GHZ;
+               chans[ch].center_freq = KHZ_TO_MHZ(freq);
+               chans[ch].freq_offset = freq % 1000;
+               chans[ch].hw_value = ch + 1;
        }
 }
 
@@ -503,6 +517,8 @@ static const struct ieee80211_rate hwsim_rates[] = {
        { .bitrate = 540 }
 };
 
+#define DEFAULT_RX_RSSI -50
+
 static const u32 hwsim_ciphers[] = {
        WLAN_CIPHER_SUITE_WEP40,
        WLAN_CIPHER_SUITE_WEP104,
@@ -652,6 +668,7 @@ struct mac80211_hwsim_data {
                      ARRAY_SIZE(hwsim_channels_6ghz)];
 
        struct ieee80211_channel *channel;
+       enum nl80211_chan_width bw;
        u64 beacon_int  /* beacon interval in us */;
        unsigned int rx_filter;
        bool started, idle, scanning;
@@ -690,6 +707,9 @@ struct mac80211_hwsim_data {
        u64 rx_bytes;
        u64 tx_dropped;
        u64 tx_failed;
+
+       /* RSSI in rx status of the receiver */
+       int rx_rssi;
 };
 
 static const struct rhashtable_params hwsim_rht_params = {
@@ -803,6 +823,40 @@ extern int hwsim_tx_virtio(struct mac80211_hwsim_data *data,
 #define hwsim_virtio_enabled false
 #endif
 
+static int hwsim_get_chanwidth(enum nl80211_chan_width bw)
+{
+       switch (bw) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+       case NL80211_CHAN_WIDTH_20:
+               return 20;
+       case NL80211_CHAN_WIDTH_40:
+               return 40;
+       case NL80211_CHAN_WIDTH_80:
+               return 80;
+       case NL80211_CHAN_WIDTH_80P80:
+       case NL80211_CHAN_WIDTH_160:
+               return 160;
+       case NL80211_CHAN_WIDTH_320:
+               return 320;
+       case NL80211_CHAN_WIDTH_5:
+               return 5;
+       case NL80211_CHAN_WIDTH_10:
+               return 10;
+       case NL80211_CHAN_WIDTH_1:
+               return 1;
+       case NL80211_CHAN_WIDTH_2:
+               return 2;
+       case NL80211_CHAN_WIDTH_4:
+               return 4;
+       case NL80211_CHAN_WIDTH_8:
+               return 8;
+       case NL80211_CHAN_WIDTH_16:
+               return 16;
+       }
+
+       return INT_MAX;
+}
+
 static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
                                    struct sk_buff *skb,
                                    struct ieee80211_channel *chan);
@@ -964,6 +1018,29 @@ DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_group,
                         hwsim_fops_group_read, hwsim_fops_group_write,
                         "%llx\n");
 
+static int hwsim_fops_rx_rssi_read(void *dat, u64 *val)
+{
+       struct mac80211_hwsim_data *data = dat;
+       *val = data->rx_rssi;
+       return 0;
+}
+
+static int hwsim_fops_rx_rssi_write(void *dat, u64 val)
+{
+       struct mac80211_hwsim_data *data = dat;
+       int rssi = (int)val;
+
+       if (rssi >= 0 || rssi < -100)
+               return -EINVAL;
+
+       data->rx_rssi = rssi;
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_rx_rssi,
+                        hwsim_fops_rx_rssi_read, hwsim_fops_rx_rssi_write,
+                        "%lld\n");
+
 static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
                                        struct net_device *dev)
 {
@@ -1482,8 +1559,8 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
                rx_status.bw = RATE_INFO_BW_20;
        if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
                rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
-       /* TODO: simulate real signal strength (and optional packet loss) */
-       rx_status.signal = -50;
+       /* TODO: simulate optional packet loss */
+       rx_status.signal = data->rx_rssi;
        if (info->control.vif)
                rx_status.signal += info->control.vif->bss_conf.txpower;
 
@@ -1595,7 +1672,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_channel *channel;
        bool ack;
-       u32 _portid;
+       enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
+       u32 _portid, i;
 
        if (WARN_ON(skb->len < 10)) {
                /* Should not happen; just a sanity check for addr1 use */
@@ -1605,14 +1683,17 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
 
        if (!data->use_chanctx) {
                channel = data->channel;
+               confbw = data->bw;
        } else if (txi->hw_queue == 4) {
                channel = data->tmp_chan;
        } else {
                chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf);
-               if (chanctx_conf)
+               if (chanctx_conf) {
                        channel = chanctx_conf->def.chan;
-               else
+                       confbw = chanctx_conf->def.width;
+               } else {
                        channel = NULL;
+               }
        }
 
        if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
@@ -1636,6 +1717,25 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
                                       txi->control.rates,
                                       ARRAY_SIZE(txi->control.rates));
 
+       for (i = 0; i < ARRAY_SIZE(txi->control.rates); i++) {
+               u16 rflags = txi->control.rates[i].flags;
+               /* initialize to data->bw for 5/10 MHz handling */
+               enum nl80211_chan_width bw = data->bw;
+
+               if (txi->control.rates[i].idx == -1)
+                       break;
+
+               if (rflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       bw = NL80211_CHAN_WIDTH_40;
+               else if (rflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+                       bw = NL80211_CHAN_WIDTH_80;
+               else if (rflags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+                       bw = NL80211_CHAN_WIDTH_160;
+
+               if (WARN_ON(hwsim_get_chanwidth(bw) > hwsim_get_chanwidth(confbw)))
+                       return;
+       }
+
        if (skb->len >= 24 + 8 &&
            ieee80211_is_probe_resp(hdr->frame_control)) {
                /* fake header transmission time */
@@ -1935,6 +2035,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
                }
 
                data->channel = conf->chandef.chan;
+               data->bw = conf->chandef.width;
 
                for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) {
                        if (data->survey_data[idx].channel &&
@@ -1946,6 +2047,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
                }
        } else {
                data->channel = conf->chandef.chan;
+               data->bw = conf->chandef.width;
        }
        mutex_unlock(&data->mutex);
 
@@ -2077,12 +2179,49 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
                wiphy_dbg(hw->wiphy, "  TX Power: %d dBm\n", info->txpower);
 }
 
+static void
+mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta,
+                            u32 changed)
+{
+       struct mac80211_hwsim_data *data = hw->priv;
+       u32 bw = U32_MAX;
+       enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
+
+       switch (sta->bandwidth) {
+#define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break
+       C(20);
+       C(40);
+       C(80);
+       C(160);
+       C(320);
+#undef C
+       }
+
+       if (!data->use_chanctx) {
+               confbw = data->bw;
+       } else {
+               struct ieee80211_chanctx_conf *chanctx_conf =
+                       rcu_dereference(vif->chanctx_conf);
+
+               if (!WARN_ON(!chanctx_conf))
+                       confbw = chanctx_conf->def.width;
+       }
+
+       WARN(bw > hwsim_get_chanwidth(confbw),
+            "intf %pM: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n",
+            vif->addr, sta->addr, bw, sta->bandwidth,
+            hwsim_get_chanwidth(data->bw), data->bw);
+}
+
 static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif,
                                  struct ieee80211_sta *sta)
 {
        hwsim_check_magic(vif);
        hwsim_set_sta_magic(sta);
+       mac80211_hwsim_sta_rc_update(hw, vif, sta, 0);
 
        return 0;
 }
@@ -2658,6 +2797,7 @@ static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw)
        .sta_add = mac80211_hwsim_sta_add,                      \
        .sta_remove = mac80211_hwsim_sta_remove,                \
        .sta_notify = mac80211_hwsim_sta_notify,                \
+       .sta_rc_update = mac80211_hwsim_sta_rc_update,          \
        .set_tim = mac80211_hwsim_set_tim,                      \
        .conf_tx = mac80211_hwsim_conf_tx,                      \
        .get_survey = mac80211_hwsim_get_survey,                \
@@ -2809,7 +2949,7 @@ out_err:
        nlmsg_free(mcast_skb);
 }
 
-static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
        {
                .types_mask = BIT(NL80211_IFTYPE_STATION) |
                              BIT(NL80211_IFTYPE_AP),
@@ -2855,6 +2995,66 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
                                .tx_mcs_80p80 = cpu_to_le16(0xffff),
                        },
                },
+               .eht_cap = {
+                       .has_eht = true,
+                       .eht_cap_elem = {
+                               .mac_cap_info[0] =
+                                       IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+                                       IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+                                       IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+                               .phy_cap_info[0] =
+                                       IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+                                       IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+                                       IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+                                       IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+                                       IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE,
+                               .phy_cap_info[3] =
+                                       IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+                                       IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+                                       IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+                               .phy_cap_info[4] =
+                                       IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+                                       IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+                                       IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+                                       IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+                                       IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+                               .phy_cap_info[5] =
+                                       IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+                                       IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+                                       IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+                                       IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+                                       IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+                                       IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+                               .phy_cap_info[6] =
+                                       IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+                                       IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+                               .phy_cap_info[7] =
+                                       IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW,
+                       },
+
+                       /* For all MCS and bandwidth, set 8 NSS for both Tx and
+                        * Rx
+                        */
+                       .eht_mcs_nss_supp = {
+                               /*
+                                * Since B0, B1, B2 and B3 are not set in
+                                * the supported channel width set field in the
+                                * HE PHY capabilities information field the
+                                * device is a 20MHz only device on 2.4GHz band.
+                                */
+                               .only_20mhz = {
+                                       .rx_tx_mcs7_max_nss = 0x88,
+                                       .rx_tx_mcs9_max_nss = 0x88,
+                                       .rx_tx_mcs11_max_nss = 0x88,
+                                       .rx_tx_mcs13_max_nss = 0x88,
+                               },
+                       },
+                       /* PPE threshold information is not supported */
+               },
        },
 #ifdef CONFIG_MAC80211_MESH
        {
@@ -2897,7 +3097,7 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
 #endif
 };
 
-static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
        {
                /* TODO: should we support other types, e.g., P2P?*/
                .types_mask = BIT(NL80211_IFTYPE_STATION) |
@@ -2948,6 +3148,81 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
                                .tx_mcs_80p80 = cpu_to_le16(0xfffa),
                        },
                },
+               .eht_cap = {
+                       .has_eht = true,
+                       .eht_cap_elem = {
+                               .mac_cap_info[0] =
+                                       IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+                                       IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+                                       IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+                               .phy_cap_info[0] =
+                                       IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+                                       IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+                                       IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+                                       IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+                                       IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+                                       IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+                               .phy_cap_info[1] =
+                                       IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+                                       IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK,
+                               .phy_cap_info[2] =
+                                       IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+                                       IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK,
+                               .phy_cap_info[3] =
+                                       IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+                                       IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+                                       IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+                               .phy_cap_info[4] =
+                                       IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+                                       IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+                                       IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+                                       IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+                                       IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+                               .phy_cap_info[5] =
+                                       IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+                                       IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+                                       IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+                                       IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+                                       IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+                                       IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+                               .phy_cap_info[6] =
+                                       IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+                                       IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+                               .phy_cap_info[7] =
+                                       IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+                                       IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+                                       IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+                                       IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+                                       IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ,
+                       },
+
+                       /* For all MCS and bandwidth, set 8 NSS for both Tx and
+                        * Rx
+                        */
+                       .eht_mcs_nss_supp = {
+                               /*
+                                * As B1 and B2 are set in the supported
+                                * channel width set field in the HE PHY
+                                * capabilities information field include all
+                                * the following MCS/NSS.
+                                */
+                               .bw._80 = {
+                                       .rx_tx_mcs9_max_nss = 0x88,
+                                       .rx_tx_mcs11_max_nss = 0x88,
+                                       .rx_tx_mcs13_max_nss = 0x88,
+                               },
+                               .bw._160 = {
+                                       .rx_tx_mcs9_max_nss = 0x88,
+                                       .rx_tx_mcs11_max_nss = 0x88,
+                                       .rx_tx_mcs13_max_nss = 0x88,
+                               },
+                       },
+                       /* PPE threshold information is not supported */
+               },
        },
 #ifdef CONFIG_MAC80211_MESH
        {
@@ -2995,7 +3270,7 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
 #endif
 };
 
-static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
        {
                /* TODO: should we support other types, e.g., P2P?*/
                .types_mask = BIT(NL80211_IFTYPE_STATION) |
@@ -3055,6 +3330,93 @@ static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
                                .tx_mcs_80p80 = cpu_to_le16(0xfffa),
                        },
                },
+               .eht_cap = {
+                       .has_eht = true,
+                       .eht_cap_elem = {
+                               .mac_cap_info[0] =
+                                       IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+                                       IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+                                       IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+                               .phy_cap_info[0] =
+                                       IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
+                                       IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+                                       IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+                                       IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+                                       IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+                                       IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+                                       IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+                               .phy_cap_info[1] =
+                                       IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+                                       IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK |
+                                       IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK,
+                               .phy_cap_info[2] =
+                                       IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+                                       IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK |
+                                       IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK,
+                               .phy_cap_info[3] =
+                                       IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+                                       IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+                                       IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+                                       IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+                               .phy_cap_info[4] =
+                                       IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+                                       IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+                                       IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+                                       IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+                                       IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+                               .phy_cap_info[5] =
+                                       IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+                                       IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+                                       IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+                                       IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+                                       IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+                                       IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+                               .phy_cap_info[6] =
+                                       IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+                                       IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
+                                       IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
+                               .phy_cap_info[7] =
+                                       IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+                                       IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+                                       IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+                                       IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+                                       IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+                                       IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+                                       IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ,
+                       },
+
+                       /* For all MCS and bandwidth, set 8 NSS for both Tx and
+                        * Rx
+                        */
+                       .eht_mcs_nss_supp = {
+                               /*
+                                * As B1 and B2 are set in the supported
+                                * channel width set field in the HE PHY
+                                * capabilities information field and 320MHz in
+                                * 6GHz is supported include all the following
+                                * MCS/NSS.
+                                */
+                               .bw._80 = {
+                                       .rx_tx_mcs9_max_nss = 0x88,
+                                       .rx_tx_mcs11_max_nss = 0x88,
+                                       .rx_tx_mcs13_max_nss = 0x88,
+                               },
+                               .bw._160 = {
+                                       .rx_tx_mcs9_max_nss = 0x88,
+                                       .rx_tx_mcs11_max_nss = 0x88,
+                                       .rx_tx_mcs13_max_nss = 0x88,
+                               },
+                               .bw._320 = {
+                                       .rx_tx_mcs9_max_nss = 0x88,
+                                       .rx_tx_mcs11_max_nss = 0x88,
+                                       .rx_tx_mcs13_max_nss = 0x88,
+                               },
+                       },
+                       /* PPE threshold information is not supported */
+               },
        },
 #ifdef CONFIG_MAC80211_MESH
        {
@@ -3111,22 +3473,22 @@ static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
 #endif
 };
 
-static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
+static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband)
 {
        u16 n_iftype_data;
 
        if (sband->band == NL80211_BAND_2GHZ) {
-               n_iftype_data = ARRAY_SIZE(he_capa_2ghz);
+               n_iftype_data = ARRAY_SIZE(sband_capa_2ghz);
                sband->iftype_data =
-                       (struct ieee80211_sband_iftype_data *)he_capa_2ghz;
+                       (struct ieee80211_sband_iftype_data *)sband_capa_2ghz;
        } else if (sband->band == NL80211_BAND_5GHZ) {
-               n_iftype_data = ARRAY_SIZE(he_capa_5ghz);
+               n_iftype_data = ARRAY_SIZE(sband_capa_5ghz);
                sband->iftype_data =
-                       (struct ieee80211_sband_iftype_data *)he_capa_5ghz;
+                       (struct ieee80211_sband_iftype_data *)sband_capa_5ghz;
        } else if (sband->band == NL80211_BAND_6GHZ) {
-               n_iftype_data = ARRAY_SIZE(he_capa_6ghz);
+               n_iftype_data = ARRAY_SIZE(sband_capa_6ghz);
                sband->iftype_data =
-                       (struct ieee80211_sband_iftype_data *)he_capa_6ghz;
+                       (struct ieee80211_sband_iftype_data *)sband_capa_6ghz;
        } else {
                return;
        }
@@ -3318,6 +3680,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                hw->wiphy->n_cipher_suites = param->n_ciphers;
        }
 
+       data->rx_rssi = DEFAULT_RX_RSSI;
+
        INIT_DELAYED_WORK(&data->roc_start, hw_roc_start);
        INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
        INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
@@ -3449,7 +3813,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                        sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
                }
 
-               mac80211_hwsim_he_capab(sband);
+               mac80211_hwsim_sband_capab(sband);
 
                hw->wiphy->bands[band] = sband;
        }
@@ -3509,6 +3873,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
        debugfs_create_file("group", 0666, data->debugfs, data,
                            &hwsim_fops_group);
+       debugfs_create_file("rx_rssi", 0666, data->debugfs, data,
+                           &hwsim_fops_rx_rssi);
        if (!data->use_chanctx)
                debugfs_create_file("dfs_simulate_radar", 0222,
                                    data->debugfs,
index 9f24b07..c34d30f 100644 (file)
@@ -147,7 +147,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
        dev->stats.rx_packets++;
 
        skb->protocol = eth_type_trans(skb, dev);
-       netif_rx_any_context(skb);
+       netif_rx(skb);
 
        ret = 0;
 done:
@@ -262,7 +262,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
        dev->stats.rx_packets++;
 
        skb->protocol = eth_type_trans(skb, priv->dev);
-       netif_rx_any_context(skb);
+       netif_rx(skb);
 
        ret = 0;
 
index 18e8977..630e167 100644 (file)
@@ -389,7 +389,7 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
 {
        const u8 *vendor_ie;
        const u8 *wmm_ie;
-       u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
+       static const u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
 
        vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
                                            WLAN_OUI_TYPE_MICROSOFT_WMM,
index 245ff64..4e49ed2 100644 (file)
@@ -350,7 +350,7 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
                skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
 
        /* Forward multicast/broadcast packet to upper layer*/
-       netif_rx_any_context(skb);
+       netif_rx(skb);
        return 0;
 }
 
index d583fa6..d5edb1e 100644 (file)
@@ -488,7 +488,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
            (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
                skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
 
-       netif_rx_any_context(skb);
+       netif_rx(skb);
        return 0;
 }
 
index a4bb281..5b53d00 100644 (file)
@@ -932,6 +932,36 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
 }
 EXPORT_SYMBOL(mt76_wcid_key_setup);
 
+static int
+mt76_rx_signal(struct mt76_rx_status *status)
+{
+       s8 *chain_signal = status->chain_signal;
+       int signal = -128;
+       u8 chains;
+
+       for (chains = status->chains; chains; chains >>= 1, chain_signal++) {
+               int cur, diff;
+
+               cur = *chain_signal;
+               if (!(chains & BIT(0)) ||
+                   cur > 0)
+                       continue;
+
+               if (cur > signal)
+                       swap(cur, signal);
+
+               diff = signal - cur;
+               if (diff == 0)
+                       signal += 3;
+               else if (diff <= 2)
+                       signal += 2;
+               else if (diff <= 6)
+                       signal += 1;
+       }
+
+       return signal;
+}
+
 static void
 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
                struct ieee80211_hw **hw,
@@ -960,6 +990,9 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
        status->ampdu_reference = mstat.ampdu_ref;
        status->device_timestamp = mstat.timestamp;
        status->mactime = mstat.timestamp;
+       status->signal = mt76_rx_signal(&mstat);
+       if (status->signal <= -128)
+               status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
        if (ieee80211_is_beacon(hdr->frame_control) ||
            ieee80211_is_probe_resp(hdr->frame_control))
@@ -1626,7 +1659,7 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
                return MT_DFS_STATE_DISABLED;
        }
 
-       if (phy->chandef.chan->dfs_state != NL80211_DFS_AVAILABLE)
+       if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
                return MT_DFS_STATE_CAC;
 
        return MT_DFS_STATE_ACTIVE;
index 5e10fe1..882fb5d 100644 (file)
@@ -19,7 +19,7 @@
 
 #define MT_MCU_RING_SIZE       32
 #define MT_RX_BUF_SIZE         2048
-#define MT_SKB_HEAD_LEN                128
+#define MT_SKB_HEAD_LEN                256
 
 #define MT_MAX_NON_AQL_PKT     16
 #define MT_TXQ_FREE_THR                32
@@ -1274,13 +1274,21 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
                         struct mt76_sta_stats *stats);
 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
+int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
+                          u16 val, u16 offset, void *buf, size_t len);
 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
                         u8 req_type, u16 val, u16 offset,
                         void *buf, size_t len);
 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
                     const u16 offset, const u32 val);
-int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
-              bool ext);
+void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
+                    void *data, int len);
+u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr);
+void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
+                u32 addr, u32 val);
+int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+                struct mt76_bus_ops *ops);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
 int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
 int mt76u_alloc_queues(struct mt76_dev *dev);
 void mt76u_stop_tx(struct mt76_dev *dev);
index 415ea17..37b092e 100644 (file)
@@ -76,7 +76,7 @@ void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
        __le32 *end = (__le32 *)&skb->data[skb->len];
        enum rx_pkt_type type;
 
-       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+       type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
 
        if (q == MT_RXQ_MCU) {
                if (type == PKT_TYPE_RX_EVENT)
index a272d64..17713c8 100644 (file)
@@ -643,11 +643,6 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
                status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
                                          dev->rssi_offset[1];
 
-               status->signal = status->chain_signal[0];
-               if (status->chains & BIT(1))
-                       status->signal = max(status->signal,
-                                            status->chain_signal[1]);
-
                if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
                        status->bw = RATE_INFO_BW_40;
 
@@ -1135,7 +1130,7 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
        }
 
        rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
-       rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) -
+       rs_idx = !((u32)(le32_get_bits(txs_data[1], MT_TXS1_F0_TIMESTAMP) -
                         rate_set_tsf) < 1000000);
        rs_idx ^= rate_set_tsf & BIT(0);
        rs = &sta->rateset[rs_idx];
@@ -1249,14 +1244,11 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
        struct mt7603_sta *msta = NULL;
        struct mt76_wcid *wcid;
        __le32 *txs_data = data;
-       u32 txs;
        u8 wcidx;
        u8 pid;
 
-       txs = le32_to_cpu(txs_data[4]);
-       pid = FIELD_GET(MT_TXS4_PID, txs);
-       txs = le32_to_cpu(txs_data[3]);
-       wcidx = FIELD_GET(MT_TXS3_WCID, txs);
+       pid = le32_get_bits(txs_data[4], MT_TXS4_PID);
+       wcidx = le32_get_bits(txs_data[3], MT_TXS3_WCID);
 
        if (pid == MT_PACKET_ID_NO_ACK)
                return;
index ca7efca..c26b45a 100644 (file)
@@ -443,11 +443,16 @@ mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf,
                         size_t count, loff_t *ppos)
 {
        struct mt7615_dev *dev = file->private_data;
-       char buf[32 * ((ETH_ALEN * 3) + 4) + 1];
+       u32 len = 32 * ((ETH_ALEN * 3) + 4) + 1;
        u8 addr[ETH_ALEN];
+       char *buf;
        int ofs = 0;
        int i;
 
+       buf = kzalloc(len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
        for (i = 0; i < 32; i++) {
                if (!(dev->muar_mask & BIT(i)))
                        continue;
@@ -458,10 +463,13 @@ mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf,
                put_unaligned_le32(mt76_rr(dev, MT_WF_RMAC_MAR0), addr);
                put_unaligned_le16((mt76_rr(dev, MT_WF_RMAC_MAR1) &
                                    MT_WF_RMAC_MAR1_ADDR), addr + 4);
-               ofs += snprintf(buf + ofs, sizeof(buf) - ofs, "%d=%pM\n", i, addr);
+               ofs += snprintf(buf + ofs, len - ofs, "%d=%pM\n", i, addr);
        }
 
-       return simple_read_from_buffer(userbuf, count, ppos, buf, ofs);
+       ofs = simple_read_from_buffer(userbuf, count, ppos, buf, ofs);
+
+       kfree(buf);
+       return ofs;
 }
 
 static ssize_t
index f035cd8..bd687f7 100644 (file)
@@ -259,9 +259,9 @@ static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
        struct ieee80211_sta *sta;
        struct ieee80211_vif *vif;
        struct ieee80211_hdr hdr;
-       __le32 qos_ctrl, ht_ctrl;
+       u16 frame_control;
 
-       if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) !=
+       if (le32_get_bits(rxd[1], MT_RXD1_NORMAL_ADDR_TYPE) !=
            MT_RXD1_NORMAL_U2M)
                return -EINVAL;
 
@@ -275,16 +275,15 @@ static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
        vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
 
        /* store the info from RXD and ethhdr to avoid being overridden */
-       hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]);
-       hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]);
-       qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]);
-       ht_ctrl = FIELD_GET(MT_RXD7_HT_CONTROL, rxd[7]);
-
+       frame_control = le32_get_bits(rxd[4], MT_RXD4_FRAME_CONTROL);
+       hdr.frame_control = cpu_to_le16(frame_control);
+       hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_SEQ_CTRL));
        hdr.duration_id = 0;
+
        ether_addr_copy(hdr.addr1, vif->addr);
        ether_addr_copy(hdr.addr2, sta->addr);
-       switch (le16_to_cpu(hdr.frame_control) &
-               (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+       switch (frame_control & (IEEE80211_FCTL_TODS |
+                                IEEE80211_FCTL_FROMDS)) {
        case 0:
                ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
                break;
@@ -306,15 +305,23 @@ static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
        if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
            eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
                ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
-       else if (eth_hdr->h_proto >= cpu_to_be16(ETH_P_802_3_MIN))
+       else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
                ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
        else
                skb_pull(skb, 2);
 
        if (ieee80211_has_order(hdr.frame_control))
-               memcpy(skb_push(skb, 2), &ht_ctrl, 2);
-       if (ieee80211_is_data_qos(hdr.frame_control))
-               memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+               memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[7],
+                      IEEE80211_HT_CTL_LEN);
+
+       if (ieee80211_is_data_qos(hdr.frame_control)) {
+               __le16 qos_ctrl;
+
+               qos_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_QOS_CTL));
+               memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+                      IEEE80211_QOS_CTL_LEN);
+       }
+
        if (ieee80211_has_a4(hdr.frame_control))
                memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
        else
@@ -569,15 +576,6 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
                status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
                status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
                status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
-               status->signal = status->chain_signal[0];
-
-               for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
-                       if (!(status->chains & BIT(i)))
-                               continue;
-
-                       status->signal = max(status->signal,
-                                            status->chain_signal[i]);
-               }
 
                mt7615_mac_fill_tm_rx(mphy->priv, rxd);
 
@@ -1429,7 +1427,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
        }
 
        rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
-       rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
+       rs_idx = !((u32)(le32_get_bits(txs_data[4], MT_TXS4_F0_TIMESTAMP) -
                         rate_set_tsf) < 1000000);
        rs_idx ^= rate_set_tsf & BIT(0);
        rs = &sta->rateset[rs_idx];
@@ -1560,14 +1558,11 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
        struct mt76_wcid *wcid;
        struct mt76_phy *mphy = &dev->mt76.phy;
        __le32 *txs_data = data;
-       u32 txs;
        u8 wcidx;
        u8 pid;
 
-       txs = le32_to_cpu(txs_data[0]);
-       pid = FIELD_GET(MT_TXS0_PID, txs);
-       txs = le32_to_cpu(txs_data[2]);
-       wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+       pid = le32_get_bits(txs_data[0], MT_TXS0_PID);
+       wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
 
        if (pid == MT_PACKET_ID_NO_ACK)
                return;
@@ -1655,7 +1650,7 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
                        mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
        }
 
-       count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
+       count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_ID_CNT);
        if (is_mt7615(&dev->mt76)) {
                __le16 *token = &free->token[0];
 
@@ -1688,7 +1683,8 @@ bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
        __le32 *end = (__le32 *)&rxd[len / 4];
        enum rx_pkt_type type;
 
-       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+       type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
        switch (type) {
        case PKT_TYPE_TXRX_NOTIFY:
                mt7615_mac_tx_free(dev, data, len);
@@ -1712,8 +1708,8 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
        enum rx_pkt_type type;
        u16 flag;
 
-       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
-       flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+       type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+       flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
        if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
                type = PKT_TYPE_NORMAL_MCU;
 
@@ -1862,7 +1858,7 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
        struct mt7615_dev *dev = phy->dev;
        int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
        bool ext_phy = phy != &dev->phy;
-       u16 def_th = ofdm ? -98 : -110;
+       s16 def_th = ofdm ? -98 : -110;
        bool update = false;
        s8 *sensitivity;
        int signal;
index 7dcf1fb..d79cbdb 100644 (file)
@@ -431,6 +431,29 @@ out:
        return err;
 }
 
+static int mt7615_set_sar_specs(struct ieee80211_hw *hw,
+                               const struct cfg80211_sar_specs *sar)
+{
+       struct mt7615_phy *phy = mt7615_hw_phy(hw);
+       int err;
+
+       if (!cfg80211_chandef_valid(&phy->mt76->chandef))
+               return -EINVAL;
+
+       err = mt76_init_sar_power(hw, sar);
+       if (err)
+               return err;
+
+       if (mt7615_firmware_offload(phy->dev))
+               return mt76_connac_mcu_set_rate_txpower(phy->mt76);
+
+       ieee80211_stop_queues(hw);
+       err = mt7615_set_channel(phy);
+       ieee80211_wake_queues(hw);
+
+       return err;
+}
+
 static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
 {
        struct mt7615_dev *dev = mt7615_hw_dev(hw);
@@ -1333,6 +1356,7 @@ const struct ieee80211_ops mt7615_ops = {
        .set_wakeup = mt7615_set_wakeup,
        .set_rekey_data = mt7615_set_rekey_data,
 #endif /* CONFIG_PM */
+       .set_sar_specs = mt7615_set_sar_specs,
 };
 EXPORT_SYMBOL_GPL(mt7615_ops);
 
index f992e12..97e2a85 100644 (file)
@@ -880,7 +880,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                                                 NULL, wtbl_hdr);
                if (sta)
                        mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
-                                                   NULL, wtbl_hdr, true);
+                                                   NULL, wtbl_hdr, true, true);
                mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
                                                   NULL, wtbl_hdr);
        }
@@ -2020,7 +2020,7 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
        struct mt76_power_limits limits;
        s8 *limits_array = (s8 *)&limits;
        int n_chains = hweight8(mphy->antenna_mask);
-       int tx_power;
+       int tx_power = hw->conf.power_level * 2;
        int i;
        static const u8 sku_mapping[] = {
 #define SKU_FIELD(_type, _field) \
@@ -2077,9 +2077,8 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
 #undef SKU_FIELD
        };
 
-       tx_power = hw->conf.power_level * 2 -
-                  mt76_tx_power_nss_delta(n_chains);
-
+       tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan, tx_power);
+       tx_power -= mt76_tx_power_nss_delta(n_chains);
        tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
                                              &limits, tx_power);
        mphy->txpower_cur = tx_power;
@@ -2152,10 +2151,13 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
                .center_chan2 = ieee80211_frequency_to_channel(freq2),
        };
 
-       if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+       if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+           dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+               req.switch_reason = CH_SWITCH_NORMAL;
+       else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
                req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
-       else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
-                chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+       else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+                                         NL80211_IFTYPE_AP))
                req.switch_reason = CH_SWITCH_DFS;
        else
                req.switch_reason = CH_SWITCH_NORMAL;
index 600fa2b..2e91f6a 100644 (file)
@@ -559,6 +559,7 @@ void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
                                     struct mt76_queue_entry *e);
 int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
 int mt7663u_mcu_init(struct mt7615_dev *dev);
+int mt7663u_mcu_power_on(struct mt7615_dev *dev);
 
 /* sdio */
 int mt7663s_mcu_init(struct mt7615_dev *dev);
index 5cad398..967641a 100644 (file)
@@ -21,6 +21,64 @@ static const struct usb_device_id mt7615_device_table[] = {
        { },
 };
 
+static u32 mt7663u_rr(struct mt76_dev *dev, u32 addr)
+{
+       u32 ret;
+
+       mutex_lock(&dev->usb.usb_ctrl_mtx);
+       ret = ___mt76u_rr(dev, MT_VEND_READ_EXT,
+                         USB_DIR_IN | USB_TYPE_VENDOR, addr);
+       mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+       return ret;
+}
+
+static void mt7663u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+       mutex_lock(&dev->usb.usb_ctrl_mtx);
+       ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+                   USB_DIR_OUT | USB_TYPE_VENDOR, addr, val);
+       mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt7663u_rmw(struct mt76_dev *dev, u32 addr,
+                      u32 mask, u32 val)
+{
+       mutex_lock(&dev->usb.usb_ctrl_mtx);
+       val |= ___mt76u_rr(dev, MT_VEND_READ_EXT,
+                          USB_DIR_IN | USB_TYPE_VENDOR, addr) & ~mask;
+       ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+                   USB_DIR_OUT | USB_TYPE_VENDOR, addr, val);
+       mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+       return val;
+}
+
+static void mt7663u_copy(struct mt76_dev *dev, u32 offset,
+                        const void *data, int len)
+{
+       struct mt76_usb *usb = &dev->usb;
+       int ret, i = 0, batch_len;
+       const u8 *val = data;
+
+       len = round_up(len, 4);
+
+       mutex_lock(&usb->usb_ctrl_mtx);
+       while (i < len) {
+               batch_len = min_t(int, usb->data_len, len - i);
+               memcpy(usb->data, val + i, batch_len);
+               ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+                                            USB_DIR_OUT | USB_TYPE_VENDOR,
+                                            (offset + i) >> 16, offset + i,
+                                            usb->data, batch_len);
+               if (ret < 0)
+                       break;
+
+               i += batch_len;
+       }
+       mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
 static void mt7663u_stop(struct ieee80211_hw *hw)
 {
        struct mt7615_phy *phy = mt7615_hw_phy(hw);
@@ -66,6 +124,14 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
                .sta_remove = mt7615_mac_sta_remove,
                .update_survey = mt7615_update_channel,
        };
+       static struct mt76_bus_ops bus_ops = {
+               .rr = mt7663u_rr,
+               .wr = mt7663u_wr,
+               .rmw = mt7663u_rmw,
+               .read_copy = mt76u_read_copy,
+               .write_copy = mt7663u_copy,
+               .type = MT76_BUS_USB,
+       };
        struct usb_device *udev = interface_to_usbdev(usb_intf);
        struct ieee80211_ops *ops;
        struct mt7615_dev *dev;
@@ -92,7 +158,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
        INIT_WORK(&dev->mcu_work, mt7663u_init_work);
        dev->reg_map = mt7663_usb_sdio_reg_map;
        dev->ops = ops;
-       ret = mt76u_init(mdev, usb_intf, true);
+       ret = __mt76u_init(mdev, usb_intf, &bus_ops);
        if (ret < 0)
                goto error;
 
@@ -100,27 +166,15 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
                    (mt76_rr(dev, MT_HW_REV) & 0xff);
        dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
 
-       if (mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
-                          FW_STATE_PWR_ON << 1, 500)) {
-               dev_dbg(dev->mt76.dev, "Usb device already powered on\n");
-               set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
-               goto alloc_queues;
-       }
-
-       ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
-                                  USB_DIR_OUT | USB_TYPE_VENDOR,
-                                  0x0, 0x1, NULL, 0);
-       if (ret)
-               goto error;
-
        if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
                            FW_STATE_PWR_ON << 1, 500)) {
-               dev_err(dev->mt76.dev, "Timeout for power on\n");
-               ret = -EIO;
-               goto error;
+               ret = mt7663u_mcu_power_on(dev);
+               if (ret)
+                       goto error;
+       } else {
+               set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
        }
 
-alloc_queues:
        ret = mt76u_alloc_mcu_queue(&dev->mt76);
        if (ret)
                goto error;
index 0ebb4c3..98bf2f6 100644 (file)
@@ -42,6 +42,26 @@ out:
        return ret;
 }
 
+int mt7663u_mcu_power_on(struct mt7615_dev *dev)
+{
+       int ret;
+
+       ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+                                  USB_DIR_OUT | USB_TYPE_VENDOR,
+                                  0x0, 0x1, NULL, 0);
+       if (ret)
+               return ret;
+
+       if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
+                           MT_TOP_MISC2_FW_PWR_ON,
+                           FW_STATE_PWR_ON << 1, 500)) {
+               dev_err(dev->mt76.dev, "Timeout for power on\n");
+               ret = -EIO;
+       }
+
+       return 0;
+}
+
 int mt7663u_mcu_init(struct mt7615_dev *dev)
 {
        static const struct mt76_mcu_ops mt7663u_mcu_ops = {
@@ -57,23 +77,17 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
 
        mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
        if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
-               mt7615_mcu_restart(&dev->mt76);
+               ret = mt7615_mcu_restart(&dev->mt76);
+               if (ret)
+                       return ret;
+
                if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
                                    MT_TOP_MISC2_FW_PWR_ON, 0, 500))
                        return -EIO;
 
-               ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
-                                          USB_DIR_OUT | USB_TYPE_VENDOR,
-                                          0x0, 0x1, NULL, 0);
+               ret = mt7663u_mcu_power_on(dev);
                if (ret)
                        return ret;
-
-               if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
-                                   MT_TOP_MISC2_FW_PWR_ON,
-                                   FW_STATE_PWR_ON << 1, 500)) {
-                       dev_err(dev->mt76.dev, "Timeout for power on\n");
-                       return -EIO;
-               }
        }
 
        ret = __mt7663_load_firmware(dev);
index e624843..400ba51 100644 (file)
@@ -117,6 +117,11 @@ static inline bool is_mt7916(struct mt76_dev *dev)
        return mt76_chip(dev) == 0x7906;
 }
 
+static inline bool is_mt7986(struct mt76_dev *dev)
+{
+       return mt76_chip(dev) == 0x7986;
+}
+
 static inline bool is_mt7622(struct mt76_dev *dev)
 {
        if (!IS_ENABLED(CONFIG_MT7622_WMAC))
index cdd82a6..7cb17bf 100644 (file)
@@ -899,24 +899,33 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_smps_tlv);
 
 void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                                 struct ieee80211_sta *sta, void *sta_wtbl,
-                                void *wtbl_tlv, bool ldpc)
+                                void *wtbl_tlv, bool ht_ldpc, bool vht_ldpc)
 {
        struct wtbl_ht *ht = NULL;
        struct tlv *tlv;
        u32 flags = 0;
 
-       if (sta->ht_cap.ht_supported) {
+       if (sta->ht_cap.ht_supported || sta->he_6ghz_capa.capa) {
                tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
                                                     wtbl_tlv, sta_wtbl);
                ht = (struct wtbl_ht *)tlv;
-               ht->ldpc = ldpc &&
+               ht->ldpc = ht_ldpc &&
                           !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
-               ht->af = sta->ht_cap.ampdu_factor;
-               ht->mm = sta->ht_cap.ampdu_density;
+
+               if (sta->ht_cap.ht_supported) {
+                       ht->af = sta->ht_cap.ampdu_factor;
+                       ht->mm = sta->ht_cap.ampdu_density;
+               } else {
+                       ht->af = le16_get_bits(sta->he_6ghz_capa.capa,
+                                              IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+                       ht->mm = le16_get_bits(sta->he_6ghz_capa.capa,
+                                              IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+               }
+
                ht->ht = true;
        }
 
-       if (sta->vht_cap.vht_supported) {
+       if (sta->vht_cap.vht_supported || sta->he_6ghz_capa.capa) {
                struct wtbl_vht *vht;
                u8 af;
 
@@ -924,7 +933,7 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                                                     sizeof(*vht), wtbl_tlv,
                                                     sta_wtbl);
                vht = (struct wtbl_vht *)tlv;
-               vht->ldpc = ldpc &&
+               vht->ldpc = vht_ldpc &&
                            !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
                vht->vht = true;
 
@@ -1004,7 +1013,8 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
                                                   sta_wtbl, wtbl_hdr);
                if (info->sta)
                        mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
-                                                   sta_wtbl, wtbl_hdr, true);
+                                                   sta_wtbl, wtbl_hdr,
+                                                   true, true);
        }
 
        return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
@@ -1044,7 +1054,7 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
        }
 
        if (enable && tx) {
-               u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
+               static const u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
                int i;
 
                for (i = 7; i > 0; i--) {
@@ -1241,7 +1251,7 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
 
                if (he_cap && he_cap->has_he)
                        mode |= PHY_MODE_AX_24G;
-       } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
+       } else if (band == NL80211_BAND_5GHZ) {
                mode |= PHY_MODE_A;
 
                if (ht_cap->ht_supported)
@@ -1250,8 +1260,11 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
                if (vht_cap->vht_supported)
                        mode |= PHY_MODE_AC;
 
-               if (he_cap && he_cap->has_he && band == NL80211_BAND_5GHZ)
+               if (he_cap && he_cap->has_he)
                        mode |= PHY_MODE_AX_5G;
+       } else if (band == NL80211_BAND_6GHZ) {
+               mode |= PHY_MODE_A | PHY_MODE_AN |
+                       PHY_MODE_AC | PHY_MODE_AX_5G;
        }
 
        return mode;
@@ -1501,7 +1514,6 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
        int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
        struct ieee80211_channel **scan_list = sreq->channels;
        struct mt76_dev *mdev = phy->dev;
-       bool ext_phy = phy == mdev->phy2;
        struct mt76_connac_mcu_scan_channel *chan;
        struct mt76_connac_hw_scan_req *req;
        struct sk_buff *skb;
@@ -1515,7 +1527,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
 
        req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
 
-       req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+       req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
        req->bss_idx = mvif->idx;
        req->scan_type = sreq->n_ssids ? 1 : 0;
        req->probe_req_num = sreq->n_ssids ? 2 : 0;
@@ -1623,7 +1635,6 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
        struct mt76_connac_mcu_scan_channel *chan;
        struct mt76_connac_sched_scan_req *req;
        struct mt76_dev *mdev = phy->dev;
-       bool ext_phy = phy == mdev->phy2;
        struct cfg80211_match_set *match;
        struct cfg80211_ssid *ssid;
        struct sk_buff *skb;
@@ -1637,7 +1648,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
 
        req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
        req->version = 1;
-       req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+       req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
 
        if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
                u8 *addr = is_mt7663(phy->dev) ? req->mt7663.random_mac
@@ -2656,7 +2667,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_ext_tlv);
 int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
                                  struct ieee80211_vif *vif,
                                  struct ieee80211_sta *sta,
-                                 struct mt76_phy *phy, u8 wlan_idx,
+                                 struct mt76_phy *phy, u16 wlan_idx,
                                  bool enable)
 {
        struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
@@ -2664,11 +2675,25 @@ int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
        struct bss_info_basic *bss;
        struct tlv *tlv;
 
+       tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+       bss = (struct bss_info_basic *)tlv;
+
        switch (vif->type) {
        case NL80211_IFTYPE_MESH_POINT:
-       case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_MONITOR:
                break;
+       case NL80211_IFTYPE_AP:
+               if (ieee80211_hw_check(phy->hw, SUPPORTS_MULTI_BSSID)) {
+                       u8 bssid_id = vif->bss_conf.bssid_indicator;
+                       struct wiphy *wiphy = phy->hw->wiphy;
+
+                       if (bssid_id > ilog2(wiphy->mbssid_max_interfaces))
+                               return -EINVAL;
+
+                       bss->non_tx_bssid = vif->bss_conf.bssid_index;
+                       bss->max_bssid = bssid_id;
+               }
+               break;
        case NL80211_IFTYPE_STATION:
                if (enable) {
                        rcu_read_lock();
@@ -2693,9 +2718,6 @@ int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
                break;
        }
 
-       tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
-
-       bss = (struct bss_info_basic *)tlv;
        bss->network_type = cpu_to_le32(type);
        bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
        bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
index 7b9d82d..c3c9333 100644 (file)
@@ -993,6 +993,7 @@ enum {
        MCU_UNI_CMD_SUSPEND = 0x05,
        MCU_UNI_CMD_OFFLOAD = 0x06,
        MCU_UNI_CMD_HIF_CTRL = 0x07,
+       MCU_UNI_CMD_SNIFFER = 0x24,
 };
 
 enum {
@@ -1561,7 +1562,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
                             u8 rcpi, u8 state);
 void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                                 struct ieee80211_sta *sta, void *sta_wtbl,
-                                void *wtbl_tlv, bool ldpc);
+                                void *wtbl_tlv, bool ht_ldpc, bool vht_ldpc);
 void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                                 struct ieee80211_ampdu_params *params,
                                 bool enable, bool tx, void *sta_wtbl,
@@ -1642,7 +1643,7 @@ void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
 int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
                                  struct ieee80211_vif *vif,
                                  struct ieee80211_sta *sta,
-                                 struct mt76_phy *phy, u8 wlan_idx,
+                                 struct mt76_phy *phy, u16 wlan_idx,
                                  bool enable);
 void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
                               struct ieee80211_sta *sta);
index 436daf6..0422c33 100644 (file)
@@ -245,7 +245,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
        usb_set_intfdata(usb_intf, dev);
 
        mt76x02u_init_mcu(mdev);
-       ret = mt76u_init(mdev, usb_intf, false);
+       ret = mt76u_init(mdev, usb_intf);
        if (ret)
                goto err;
 
index dc2aeaa..2afad8c 100644 (file)
@@ -860,9 +860,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
                status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
                                                               rxwi->rssi[1],
                                                               1);
-               signal = max_t(s8, signal, status->chain_signal[1]);
        }
-       status->signal = signal;
        status->freq = dev->mphy.chandef.chan->center_freq;
        status->band = dev->mphy.chandef.chan->band;
 
index 2575369..55068f3 100644 (file)
@@ -57,7 +57,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
        usb_set_intfdata(intf, dev);
 
        mt76x02u_init_mcu(mdev);
-       err = mt76u_init(mdev, intf, false);
+       err = mt76u_init(mdev, intf);
        if (err < 0)
                goto err;
 
index 6dc4708..f21282c 100644 (file)
@@ -12,3 +12,13 @@ config MT7915E
          OFDMA, spatial reuse and dual carrier modulation.
 
          To compile this driver as a module, choose M here.
+
+config MT7986_WMAC
+       bool "MT7986 (SoC) WMAC support"
+       depends on MT7915E
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       select REGMAP
+       help
+         This adds support for the built-in WMAC on MT7986 SoC device
+         which has the same feature set as a MT7915, but enables 6E
+         support.
index 80e4924..b794ceb 100644 (file)
@@ -6,3 +6,4 @@ mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
             debugfs.o mmio.o
 
 mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o
+mt7915e-$(CONFIG_MT7986_WMAC) += soc.o
\ No newline at end of file
index 280823f..4e1ecae 100644 (file)
@@ -548,12 +548,12 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
 
        /* Tx ampdu stat */
        for (i = 0; i < ARRAY_SIZE(range); i++)
-               range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
+               range[i] = mt76_rr(dev, MT_MIB_ARNG(phy->band_idx, i));
 
        for (i = 0; i < ARRAY_SIZE(bound); i++)
                bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
 
-       seq_printf(file, "\nPhy %d\n", ext_phy);
+       seq_printf(file, "\nPhy %d, Phy band %d\n", ext_phy, phy->band_idx);
 
        seq_printf(file, "Length: %8d | ", bound[0]);
        for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
@@ -561,7 +561,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
                           bound[i] + 1, bound[i + 1]);
 
        seq_puts(file, "\nCount:  ");
-       n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+       n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
        for (i = 0; i < ARRAY_SIZE(bound); i++)
                seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + n]);
        seq_puts(file, "\n");
@@ -898,7 +898,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
        debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir,
                                    mt7915_twt_stats);
        debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
-       if (!dev->dbdc_support || ext_phy) {
+       if (!dev->dbdc_support || phy->band_idx) {
                debugfs_create_u32("dfs_hw_pattern", 0400, dir,
                                   &dev->hw_pattern);
                debugfs_create_file("radar_trigger", 0200, dir, dev,
@@ -947,13 +947,13 @@ void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int
                __le16 len;
        } hdr = {
                .magic = cpu_to_le32(FW_BIN_LOG_MAGIC),
-               .msg_type = PKT_TYPE_RX_FW_MONITOR,
+               .msg_type = cpu_to_le16(PKT_TYPE_RX_FW_MONITOR),
        };
 
        if (!dev->relay_fwlog)
                return;
 
-       hdr.timestamp = mt76_rr(dev, MT_LPON_FRCR(0));
+       hdr.timestamp = cpu_to_le32(mt76_rr(dev, MT_LPON_FRCR(0)));
        hdr.len = *(__le16 *)data;
        mt7915_debugfs_write_fwlog(dev, &hdr, sizeof(hdr), data, len);
 }
index 2dc2d6b..49b4d8a 100644 (file)
@@ -310,10 +310,12 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
        /* enable interrupts for TX/RX rings */
        irq_mask = MT_INT_RX_DONE_MCU |
                   MT_INT_TX_DONE_MCU |
-                  MT_INT_MCU_CMD |
-                  MT_INT_BAND0_RX_DONE;
+                  MT_INT_MCU_CMD;
 
-       if (dev->dbdc_support)
+       if (!dev->phy.band_idx)
+               irq_mask |= MT_INT_BAND0_RX_DONE;
+
+       if (dev->dbdc_support || dev->phy.band_idx)
                irq_mask |= MT_INT_BAND1_RX_DONE;
 
        mt7915_irq_enable(dev, irq_mask);
@@ -338,7 +340,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
 
        /* init tx queue */
        ret = mt7915_init_tx_queues(&dev->phy,
-                                   MT_TXQ_ID(0),
+                                   MT_TXQ_ID(dev->phy.band_idx),
                                    MT7915_TX_RING_SIZE,
                                    MT_TXQ_RING_BASE(0));
        if (ret)
@@ -387,13 +389,15 @@ int mt7915_dma_init(struct mt7915_dev *dev)
                return ret;
 
        /* rx data queue for band0 */
-       ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
-                              MT_RXQ_ID(MT_RXQ_MAIN),
-                              MT7915_RX_RING_SIZE,
-                              MT_RX_BUF_SIZE,
-                              MT_RXQ_RING_BASE(MT_RXQ_MAIN));
-       if (ret)
-               return ret;
+       if (!dev->phy.band_idx) {
+               ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+                                      MT_RXQ_ID(MT_RXQ_MAIN),
+                                      MT7915_RX_RING_SIZE,
+                                      MT_RX_BUF_SIZE,
+                                      MT_RXQ_RING_BASE(MT_RXQ_MAIN));
+               if (ret)
+                       return ret;
+       }
 
        /* tx free notify event from WA for band0 */
        if (!is_mt7915(mdev)) {
@@ -406,7 +410,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
                        return ret;
        }
 
-       if (dev->dbdc_support) {
+       if (dev->dbdc_support || dev->phy.band_idx) {
                /* rx data queue for band1 */
                ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
                                       MT_RXQ_ID(MT_RXQ_EXT),
index 6aa749b..5b133bc 100644 (file)
@@ -36,27 +36,48 @@ static int mt7915_check_eeprom(struct mt7915_dev *dev)
        switch (val) {
        case 0x7915:
        case 0x7916:
+       case 0x7986:
                return 0;
        default:
                return -EINVAL;
        }
 }
 
+static char *mt7915_eeprom_name(struct mt7915_dev *dev)
+{
+       switch (mt76_chip(&dev->mt76)) {
+       case 0x7915:
+               return dev->dbdc_support ?
+                      MT7915_EEPROM_DEFAULT_DBDC : MT7915_EEPROM_DEFAULT;
+       case 0x7986:
+               switch (mt7915_check_adie(dev, true)) {
+               case MT7976_ONE_ADIE_DBDC:
+                       return MT7986_EEPROM_MT7976_DEFAULT_DBDC;
+               case MT7975_ONE_ADIE:
+                       return MT7986_EEPROM_MT7975_DEFAULT;
+               case MT7976_ONE_ADIE:
+                       return MT7986_EEPROM_MT7976_DEFAULT;
+               case MT7975_DUAL_ADIE:
+                       return MT7986_EEPROM_MT7975_DUAL_DEFAULT;
+               case MT7976_DUAL_ADIE:
+                       return MT7986_EEPROM_MT7976_DUAL_DEFAULT;
+               default:
+                       break;
+               }
+               return NULL;
+       default:
+               return MT7916_EEPROM_DEFAULT;
+       }
+}
+
 static int
 mt7915_eeprom_load_default(struct mt7915_dev *dev)
 {
-       char *default_bin = MT7915_EEPROM_DEFAULT;
        u8 *eeprom = dev->mt76.eeprom.data;
        const struct firmware *fw = NULL;
        int ret;
 
-       if (dev->dbdc_support)
-               default_bin = MT7915_EEPROM_DEFAULT_DBDC;
-
-       if (!is_mt7915(&dev->mt76))
-               default_bin = MT7916_EEPROM_DEFAULT;
-
-       ret = request_firmware(&fw, default_bin, dev->mt76.dev);
+       ret = request_firmware(&fw, mt7915_eeprom_name(dev), dev->mt76.dev);
        if (ret)
                return ret;
 
@@ -109,14 +130,29 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
 static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
-       bool ext_phy = phy != &dev->phy;
        u8 *eeprom = dev->mt76.eeprom.data;
        u32 val;
 
-       val = eeprom[MT_EE_WIFI_CONF + ext_phy];
+       val = eeprom[MT_EE_WIFI_CONF + phy->band_idx];
        val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
-       if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support)
-               val = ext_phy ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
+
+       if (!is_mt7915(&dev->mt76)) {
+               switch (val) {
+               case MT_EE_V2_BAND_SEL_5GHZ:
+                       phy->mt76->cap.has_5ghz = true;
+                       return;
+               case MT_EE_V2_BAND_SEL_6GHZ:
+                       phy->mt76->cap.has_6ghz = true;
+                       return;
+               case MT_EE_V2_BAND_SEL_5GHZ_6GHZ:
+                       phy->mt76->cap.has_5ghz = true;
+                       phy->mt76->cap.has_6ghz = true;
+                       return;
+               default:
+                       phy->mt76->cap.has_2ghz = true;
+                       return;
+               }
+       }
 
        switch (val) {
        case MT_EE_BAND_SEL_5GHZ:
@@ -135,7 +171,7 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
 void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
                                struct mt7915_phy *phy)
 {
-       u8 nss, nss_band, *eeprom = dev->mt76.eeprom.data;
+       u8 nss, nss_band, nss_band_max, *eeprom = dev->mt76.eeprom.data;
        struct mt76_phy *mphy = phy->mt76;
        bool ext_phy = phy != &dev->phy;
 
@@ -147,7 +183,7 @@ void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
                                eeprom[MT_EE_WIFI_CONF]);
        } else {
                nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
-                               eeprom[MT_EE_WIFI_CONF + ext_phy]);
+                               eeprom[MT_EE_WIFI_CONF + phy->band_idx]);
        }
 
        if (!nss || nss > 4)
@@ -155,32 +191,42 @@ void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
 
        /* read tx/rx stream */
        nss_band = nss;
+
        if (dev->dbdc_support) {
                if (is_mt7915(&dev->mt76)) {
                        nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
                                             eeprom[MT_EE_WIFI_CONF + 3]);
-                       if (ext_phy)
+                       if (phy->band_idx)
                                nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
                                                     eeprom[MT_EE_WIFI_CONF + 3]);
                } else {
                        nss_band = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
-                                            eeprom[MT_EE_WIFI_CONF + 2 + ext_phy]);
+                                            eeprom[MT_EE_WIFI_CONF + 2 + phy->band_idx]);
                }
 
-               if (!nss_band || nss_band > 2)
-                       nss_band = 2;
+               nss_band_max = is_mt7986(&dev->mt76) ?
+                              MT_EE_NSS_MAX_DBDC_MA7986 : MT_EE_NSS_MAX_DBDC_MA7915;
+       } else {
+               nss_band_max = is_mt7986(&dev->mt76) ?
+                              MT_EE_NSS_MAX_MA7986 : MT_EE_NSS_MAX_MA7915;
        }
 
+       if (!nss_band || nss_band > nss_band_max)
+               nss_band = nss_band_max;
+
        if (nss_band > nss) {
-               dev_err(dev->mt76.dev,
-                       "nss mismatch, nss(%d) nss_band(%d) ext_phy(%d)\n",
-                       nss, nss_band, ext_phy);
+               dev_warn(dev->mt76.dev,
+                        "nss mismatch, nss(%d) nss_band(%d) band(%d) ext_phy(%d)\n",
+                        nss, nss_band, phy->band_idx, ext_phy);
                nss = nss_band;
        }
 
-       mphy->chainmask = ext_phy ? (BIT(nss_band) - 1) << 2 : (BIT(nss_band) - 1);
-       mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
+       mphy->chainmask = BIT(nss) - 1;
+       if (ext_phy)
+               mphy->chainmask <<= dev->chainshift;
+       mphy->antenna_mask = BIT(nss_band) - 1;
        dev->chainmask |= mphy->chainmask;
+       dev->chainshift = hweight8(dev->mphy.chainmask);
 }
 
 int mt7915_eeprom_init(struct mt7915_dev *dev)
@@ -217,32 +263,43 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
 {
        u8 *eeprom = dev->mt76.eeprom.data;
        int index, target_power;
-       bool tssi_on;
+       bool tssi_on, is_7976;
 
        if (chain_idx > 3)
                return -EINVAL;
 
        tssi_on = mt7915_tssi_enabled(dev, chan->band);
+       is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
 
        if (chan->band == NL80211_BAND_2GHZ) {
-               u32 power = is_mt7915(&dev->mt76) ?
-                       MT_EE_TX0_POWER_2G : MT_EE_TX0_POWER_2G_V2;
+               if (is_7976) {
+                       index = MT_EE_TX0_POWER_2G_V2 + chain_idx;
+                       target_power = eeprom[index];
+               } else {
+                       index = MT_EE_TX0_POWER_2G + chain_idx * 3;
+                       target_power = eeprom[index];
 
-               index = power + chain_idx * 3;
-               target_power = eeprom[index];
+                       if (!tssi_on)
+                               target_power += eeprom[index + 1];
+               }
+       } else if (chan->band == NL80211_BAND_5GHZ) {
+               int group = mt7915_get_channel_group_5g(chan->hw_value, is_7976);
 
-               if (!tssi_on)
-                       target_power += eeprom[index + 1];
-       } else {
-               int group = mt7915_get_channel_group(chan->hw_value);
-               u32 power = is_mt7915(&dev->mt76) ?
-                       MT_EE_TX0_POWER_5G : MT_EE_TX0_POWER_5G_V2;
+               if (is_7976) {
+                       index = MT_EE_TX0_POWER_5G_V2 + chain_idx * 5;
+                       target_power = eeprom[index + group];
+               } else {
+                       index = MT_EE_TX0_POWER_5G + chain_idx * 12;
+                       target_power = eeprom[index + group];
 
-               index = power + chain_idx * 12;
-               target_power = eeprom[index + group];
+                       if (!tssi_on)
+                               target_power += eeprom[index + 8];
+               }
+       } else {
+               int group = mt7915_get_channel_group_6g(chan->hw_value);
 
-               if (!tssi_on)
-                       target_power += eeprom[index + 8];
+               index = MT_EE_TX0_POWER_6G_V2 + chain_idx * 8;
+               target_power = is_7976 ? eeprom[index + group] : 0;
        }
 
        return target_power;
@@ -251,22 +308,20 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
 s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
 {
        u8 *eeprom = dev->mt76.eeprom.data;
-       u32 val;
+       u32 val, offs;
        s8 delta;
-       u32 rate_2g, rate_5g;
-
-       rate_2g = is_mt7915(&dev->mt76) ?
-               MT_EE_RATE_DELTA_2G : MT_EE_RATE_DELTA_2G_V2;
-
-       rate_5g = is_mt7915(&dev->mt76) ?
-               MT_EE_RATE_DELTA_5G : MT_EE_RATE_DELTA_5G_V2;
+       bool is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
 
        if (band == NL80211_BAND_2GHZ)
-               val = eeprom[rate_2g];
+               offs = is_7976 ? MT_EE_RATE_DELTA_2G_V2 : MT_EE_RATE_DELTA_2G;
+       else if (band == NL80211_BAND_5GHZ)
+               offs = is_7976 ? MT_EE_RATE_DELTA_5G_V2 : MT_EE_RATE_DELTA_5G;
        else
-               val = eeprom[rate_5g];
+               offs = is_7976 ? MT_EE_RATE_DELTA_6G_V2 : 0;
+
+       val = eeprom[offs];
 
-       if (!(val & MT_EE_RATE_DELTA_EN))
+       if (!offs || !(val & MT_EE_RATE_DELTA_EN))
                return 0;
 
        delta = FIELD_GET(MT_EE_RATE_DELTA_MASK, val);
index 92d1a94..7578ac6 100644 (file)
@@ -25,8 +25,10 @@ enum mt7915_eeprom_field {
        MT_EE_TX0_POWER_5G =    0x34b,
        MT_EE_RATE_DELTA_2G_V2 = 0x7d3,
        MT_EE_RATE_DELTA_5G_V2 = 0x81e,
+       MT_EE_RATE_DELTA_6G_V2 = 0x884, /* 6g fields only appear in eeprom v2 */
        MT_EE_TX0_POWER_2G_V2 = 0x441,
        MT_EE_TX0_POWER_5G_V2 = 0x445,
+       MT_EE_TX0_POWER_6G_V2 = 0x465,
        MT_EE_ADIE_FT_VERSION = 0x9a0,
 
        __MT_EE_MAX =           0xe00,
@@ -56,6 +58,19 @@ enum mt7915_eeprom_field {
 #define MT_EE_RATE_DELTA_SIGN                  BIT(6)
 #define MT_EE_RATE_DELTA_EN                    BIT(7)
 
+#define MT_EE_NSS_MAX_MA7915                   4
+#define MT_EE_NSS_MAX_DBDC_MA7915              2
+#define MT_EE_NSS_MAX_MA7986                   4
+#define MT_EE_NSS_MAX_DBDC_MA7986              4
+
+enum mt7915_adie_sku {
+       MT7976_ONE_ADIE_DBDC = 0x7,
+       MT7975_ONE_ADIE = 0x8,
+       MT7976_ONE_ADIE = 0xa,
+       MT7975_DUAL_ADIE = 0xd,
+       MT7976_DUAL_ADIE = 0xf,
+};
+
 enum mt7915_eeprom_band {
        MT_EE_BAND_SEL_DEFAULT,
        MT_EE_BAND_SEL_5GHZ,
@@ -63,6 +78,13 @@ enum mt7915_eeprom_band {
        MT_EE_BAND_SEL_DUAL,
 };
 
+enum {
+       MT_EE_V2_BAND_SEL_2GHZ,
+       MT_EE_V2_BAND_SEL_5GHZ,
+       MT_EE_V2_BAND_SEL_6GHZ,
+       MT_EE_V2_BAND_SEL_5GHZ_6GHZ,
+};
+
 enum mt7915_sku_rate_group {
        SKU_CCK,
        SKU_OFDM,
@@ -83,8 +105,20 @@ enum mt7915_sku_rate_group {
 };
 
 static inline int
-mt7915_get_channel_group(int channel)
+mt7915_get_channel_group_5g(int channel, bool is_7976)
 {
+       if (is_7976) {
+               if (channel <= 64)
+                       return 0;
+               if (channel <= 96)
+                       return 1;
+               if (channel <= 128)
+                       return 2;
+               if (channel <= 144)
+                       return 3;
+               return 4;
+       }
+
        if (channel >= 184 && channel <= 196)
                return 0;
        if (channel <= 48)
@@ -102,6 +136,15 @@ mt7915_get_channel_group(int channel)
        return 7;
 }
 
+static inline int
+mt7915_get_channel_group_6g(int channel)
+{
+       if (channel <= 29)
+               return 0;
+
+       return DIV_ROUND_UP(channel - 29, 32);
+}
+
 static inline bool
 mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
 {
index 705f362..6d29366 100644 (file)
@@ -50,15 +50,22 @@ static ssize_t mt7915_thermal_temp_show(struct device *dev,
        int i = to_sensor_dev_attr(attr)->index;
        int temperature;
 
-       if (i)
-               return sprintf(buf, "%u\n", phy->throttle_temp[i - 1] * 1000);
-
-       temperature = mt7915_mcu_get_temperature(phy);
-       if (temperature < 0)
-               return temperature;
-
-       /* display in millidegree celcius */
-       return sprintf(buf, "%u\n", temperature * 1000);
+       switch (i) {
+       case 0:
+               temperature = mt7915_mcu_get_temperature(phy);
+               if (temperature < 0)
+                       return temperature;
+               /* display in millidegree celcius */
+               return sprintf(buf, "%u\n", temperature * 1000);
+       case 1:
+       case 2:
+               return sprintf(buf, "%u\n",
+                              phy->throttle_temp[i - 1] * 1000);
+       case 3:
+               return sprintf(buf, "%hhu\n", phy->throttle_state);
+       default:
+               return -EINVAL;
+       }
 }
 
 static ssize_t mt7915_thermal_temp_store(struct device *dev,
@@ -84,11 +91,13 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
 static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7915_thermal_temp, 0);
 static SENSOR_DEVICE_ATTR_RW(temp1_crit, mt7915_thermal_temp, 1);
 static SENSOR_DEVICE_ATTR_RW(temp1_max, mt7915_thermal_temp, 2);
+static SENSOR_DEVICE_ATTR_RO(throttle1, mt7915_thermal_temp, 3);
 
 static struct attribute *mt7915_hwmon_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp1_crit.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
+       &sensor_dev_attr_throttle1.dev_attr.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(mt7915_hwmon);
@@ -97,7 +106,7 @@ static int
 mt7915_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
                                      unsigned long *state)
 {
-       *state = MT7915_THERMAL_THROTTLE_MAX;
+       *state = MT7915_CDEV_THROTTLE_MAX;
 
        return 0;
 }
@@ -108,7 +117,7 @@ mt7915_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
 {
        struct mt7915_phy *phy = cdev->devdata;
 
-       *state = phy->throttle_state;
+       *state = phy->cdev_state;
 
        return 0;
 }
@@ -118,22 +127,27 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
                                      unsigned long state)
 {
        struct mt7915_phy *phy = cdev->devdata;
+       u8 throttling = MT7915_THERMAL_THROTTLE_MAX - state;
        int ret;
 
-       if (state > MT7915_THERMAL_THROTTLE_MAX)
+       if (state > MT7915_CDEV_THROTTLE_MAX)
                return -EINVAL;
 
        if (phy->throttle_temp[0] > phy->throttle_temp[1])
                return 0;
 
-       if (state == phy->throttle_state)
+       if (state == phy->cdev_state)
                return 0;
 
-       ret = mt7915_mcu_set_thermal_throttling(phy, state);
+       /*
+        * cooling_device convention: 0 = no cooling, more = more cooling
+        * mcu convention: 1 = max cooling, more = less cooling
+        */
+       ret = mt7915_mcu_set_thermal_throttling(phy, throttling);
        if (ret)
                return ret;
 
-       phy->throttle_state = state;
+       phy->cdev_state = state;
 
        return 0;
 }
@@ -186,7 +200,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
        phy->throttle_temp[0] = 110;
        phy->throttle_temp[1] = 120;
 
-       return 0;
+       return mt7915_mcu_set_thermal_throttling(phy,
+                                                MT7915_THERMAL_THROTTLE_MAX);
 }
 
 static void mt7915_led_set_config(struct led_classdev *led_cdev,
@@ -297,6 +312,7 @@ mt7915_regd_notifier(struct wiphy *wiphy,
 
        mt7915_init_txpower(dev, &mphy->sband_2g.sband);
        mt7915_init_txpower(dev, &mphy->sband_5g.sband);
+       mt7915_init_txpower(dev, &mphy->sband_6g.sband);
 
        mphy->dfs_state = MT_DFS_STATE_UNKNOWN;
        mt7915_dfs_init_radar_detector(phy);
@@ -311,8 +327,8 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
        struct mt7915_dev *dev = phy->dev;
 
        hw->queues = 4;
-       hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
-       hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+       hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+       hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
        hw->netdev_features = NETIF_F_RXCSUM;
 
        hw->radiotap_timestamp.units_pos =
@@ -327,6 +343,7 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
        wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
        wiphy->reg_notifier = mt7915_regd_notifier;
        wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+       wiphy->mbssid_max_interfaces = 16;
 
        wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR);
        wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
@@ -344,6 +361,7 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
        ieee80211_hw_set(hw, HAS_RATE_CONTROL);
        ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
        ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+       ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
        ieee80211_hw_set(hw, WANT_MONITOR_VIF);
 
        hw->max_tx_fragments = 4;
@@ -486,6 +504,9 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
        phy->dev = dev;
        phy->mt76 = mphy;
 
+       /* Bind main phy to band0 and ext_phy to band1 for dbdc case */
+       phy->band_idx = 1;
+
        INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
 
        mt7915_eeprom_parse_hw_cap(dev, phy);
@@ -505,7 +526,7 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
 
        /* init wiphy according to mphy and phy */
        mt7915_init_wiphy(mphy->hw);
-       ret = mt7915_init_tx_queues(phy, MT_TXQ_ID(1),
+       ret = mt7915_init_tx_queues(phy, MT_TXQ_ID(phy->band_idx),
                                    MT7915_TX_RING_SIZE,
                                    MT_TXQ_RING_BASE(1));
        if (ret)
@@ -540,6 +561,7 @@ static void mt7915_init_work(struct work_struct *work)
        mt7915_mac_init(dev);
        mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
        mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
+       mt7915_init_txpower(dev, &dev->mphy.sband_6g.sband);
        mt7915_txbf_init(dev);
 }
 
@@ -561,7 +583,7 @@ static void mt7915_wfsys_reset(struct mt7915_dev *dev)
                val &= ~MT_TOP_PWR_SW_RST;
                mt76_wr(dev, MT_TOP_PWR_CTRL, val);
 
-               /* release wfsys then mcu re-excutes romcode */
+               /* release wfsys then mcu re-executes romcode */
                val |= MT_TOP_PWR_SW_RST;
                mt76_wr(dev, MT_TOP_PWR_CTRL, val);
 
@@ -582,6 +604,12 @@ static void mt7915_wfsys_reset(struct mt7915_dev *dev)
                mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
 
                msleep(100);
+       } else if (is_mt7986(&dev->mt76)) {
+               mt7986_wmac_disable(dev);
+               msleep(20);
+
+               mt7986_wmac_enable(dev);
+               msleep(20);
        } else {
                mt76_set(dev, MT_WF_SUBSYS_RST, 0x1);
                msleep(20);
@@ -591,6 +619,32 @@ static void mt7915_wfsys_reset(struct mt7915_dev *dev)
        }
 }
 
+static bool mt7915_band_config(struct mt7915_dev *dev)
+{
+       bool ret = true;
+
+       dev->phy.band_idx = 0;
+
+       if (is_mt7986(&dev->mt76)) {
+               u32 sku = mt7915_check_adie(dev, true);
+
+               /*
+                * for mt7986, dbdc support is determined by the number
+                * of adie chips and the main phy is bound to band1 when
+                * dbdc is disabled.
+                */
+               if (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE) {
+                       dev->phy.band_idx = 1;
+                       ret = false;
+               }
+       } else {
+               ret = is_mt7915(&dev->mt76) ?
+                     !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)) : true;
+       }
+
+       return ret;
+}
+
 static int mt7915_init_hardware(struct mt7915_dev *dev)
 {
        int ret, idx;
@@ -599,8 +653,7 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
 
        INIT_WORK(&dev->init_work, mt7915_init_work);
 
-       dev->dbdc_support = is_mt7915(&dev->mt76) ?
-                           !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)) : true;
+       dev->dbdc_support = mt7915_band_config(dev);
 
        /* If MCU was already running, it is likely in a bad state */
        if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) >
@@ -675,11 +728,18 @@ void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
 }
 
 static void
-mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
+mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
+                              struct ieee80211_sta_he_cap *he_cap,
                               int vif, int nss)
 {
        struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
-       u8 c;
+       u8 c, nss_160;
+
+       /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+       if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
+               nss_160 = nss / 2;
+       else
+               nss_160 = nss;
 
 #ifdef CONFIG_MAC80211_MESH
        if (vif == NL80211_IFTYPE_MESH_POINT)
@@ -733,13 +793,21 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
        /* num_snd_dim
         * for mt7915, max supported nss is 2 for bw > 80MHz
         */
-       c = (nss - 1) |
-           IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
+       c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+                      nss - 1) |
+           FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+                      nss_160 - 1);
        elem->phy_cap_info[5] |= c;
 
        c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
            IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
        elem->phy_cap_info[6] |= c;
+
+       if (!is_mt7915(&dev->mt76)) {
+               c = IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+                   IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+               elem->phy_cap_info[7] |= c;
+       }
 }
 
 static void
@@ -767,9 +835,17 @@ static int
 mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
                    struct ieee80211_sband_iftype_data *data)
 {
+       struct mt7915_dev *dev = phy->dev;
        int i, idx = 0, nss = hweight8(phy->mt76->chainmask);
        u16 mcs_map = 0;
        u16 mcs_map_160 = 0;
+       u8 nss_160;
+
+       /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+       if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
+               nss_160 = nss / 2;
+       else
+               nss_160 = nss;
 
        for (i = 0; i < 8; i++) {
                if (i < nss)
@@ -777,8 +853,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
                else
                        mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
 
-               /* Can do 1/2 of NSS streams in 160Mhz mode. */
-               if (i < nss / 2)
+               if (i < nss_160)
                        mcs_map_160 |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
                else
                        mcs_map_160 |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
@@ -816,7 +891,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
                if (band == NL80211_BAND_2GHZ)
                        he_cap_elem->phy_cap_info[0] =
                                IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
-               else if (band == NL80211_BAND_5GHZ)
+               else
                        he_cap_elem->phy_cap_info[0] =
                                IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
                                IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
@@ -855,7 +930,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
                        if (band == NL80211_BAND_2GHZ)
                                he_cap_elem->phy_cap_info[0] |=
                                        IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
-                       else if (band == NL80211_BAND_5GHZ)
+                       else
                                he_cap_elem->phy_cap_info[0] |=
                                        IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
 
@@ -894,7 +969,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
                he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map_160);
                he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map_160);
 
-               mt7915_set_stream_he_txbf_caps(he_cap, i, nss);
+               mt7915_set_stream_he_txbf_caps(dev, he_cap, i, nss);
 
                memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
                if (he_cap_elem->phy_cap_info[6] &
@@ -905,6 +980,21 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
                                u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
                                               IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
                }
+
+               if (band == NL80211_BAND_6GHZ) {
+                       u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+                                 IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+
+                       cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_8,
+                                              IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+                              u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+                                              IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+                              u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+                                              IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+
+                       data[idx].he_6ghz_capa.capa = cpu_to_le16(cap);
+               }
+
                idx++;
        }
 
@@ -934,6 +1024,15 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy)
                band->iftype_data = data;
                band->n_iftype_data = n;
        }
+
+       if (phy->mt76->cap.has_6ghz) {
+               data = phy->iftype[NL80211_BAND_6GHZ];
+               n = mt7915_init_he_caps(phy, NL80211_BAND_6GHZ, data);
+
+               band = &phy->mt76->sband_6g.sband;
+               band->iftype_data = data;
+               band->n_iftype_data = n;
+       }
 }
 
 static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
@@ -1011,5 +1110,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
        mt7915_dma_cleanup(dev);
        tasklet_disable(&dev->irq_tasklet);
 
+       if (is_mt7986(&dev->mt76))
+               mt7986_wmac_disable(dev);
+
        mt76_free_device(&dev->mt76);
 }
index 08ee78f..e9e7efb 100644 (file)
@@ -226,8 +226,8 @@ mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
        u32 ru_h, ru_l;
        u8 ru, offs = 0;
 
-       ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
-       ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
+       ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
+       ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
        ru = (u8)(ru_l | ru_h << 4);
 
        status->bw = RATE_INFO_BW_HE_RU;
@@ -349,14 +349,16 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
        case MT_PHY_TYPE_HE_SU:
                he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
                             HE_BITS(DATA1_UL_DL_KNOWN) |
-                            HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
+                            HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+                            HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
 
                he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
                             HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
                break;
        case MT_PHY_TYPE_HE_EXT_SU:
                he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
-                            HE_BITS(DATA1_UL_DL_KNOWN);
+                            HE_BITS(DATA1_UL_DL_KNOWN) |
+                            HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
 
                he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
                break;
@@ -398,9 +400,9 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
        struct ieee80211_sta *sta;
        struct ieee80211_vif *vif;
        struct ieee80211_hdr hdr;
-       __le32 qos_ctrl, ht_ctrl;
+       u16 frame_control;
 
-       if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
+       if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
            MT_RXD3_NORMAL_U2M)
                return -EINVAL;
 
@@ -414,16 +416,15 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
        vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
 
        /* store the info from RXD and ethhdr to avoid being overridden */
-       hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
-       hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
-       qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
-       ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
-
+       frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
+       hdr.frame_control = cpu_to_le16(frame_control);
+       hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
        hdr.duration_id = 0;
+
        ether_addr_copy(hdr.addr1, vif->addr);
        ether_addr_copy(hdr.addr2, sta->addr);
-       switch (le16_to_cpu(hdr.frame_control) &
-               (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+       switch (frame_control & (IEEE80211_FCTL_TODS |
+                                IEEE80211_FCTL_FROMDS)) {
        case 0:
                ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
                break;
@@ -445,15 +446,22 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
        if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
            eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
                ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
-       else if (eth_hdr->h_proto >= cpu_to_be16(ETH_P_802_3_MIN))
+       else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
                ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
        else
                skb_pull(skb, 2);
 
        if (ieee80211_has_order(hdr.frame_control))
-               memcpy(skb_push(skb, 2), &ht_ctrl, 2);
-       if (ieee80211_is_data_qos(hdr.frame_control))
-               memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+               memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
+                      IEEE80211_HT_CTL_LEN);
+       if (ieee80211_is_data_qos(hdr.frame_control)) {
+               __le16 qos_ctrl;
+
+               qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
+               memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+                      IEEE80211_QOS_CTL_LEN);
+       }
+
        if (ieee80211_has_a4(hdr.frame_control))
                memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
        else
@@ -587,11 +595,11 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
        u16 seq_ctrl = 0;
        u8 qos_ctl = 0;
        __le16 fc = 0;
-       int i, idx;
+       int idx;
 
        memset(status, 0, sizeof(*status));
 
-       if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) {
+       if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) {
                mphy = dev->mt76.phy2;
                if (!mphy)
                        return -EINVAL;
@@ -632,6 +640,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
        status->band = mphy->chandef.chan->band;
        if (status->band == NL80211_BAND_5GHZ)
                sband = &mphy->sband_5g.sband;
+       else if (status->band == NL80211_BAND_6GHZ)
+               sband = &mphy->sband_6g.sband;
        else
                sband = &mphy->sband_2g.sband;
 
@@ -747,15 +757,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
                status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
                status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
                status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
-               status->signal = status->chain_signal[0];
-
-               for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
-                       if (!(status->chains & BIT(i)))
-                               continue;
-
-                       status->signal = max(status->signal,
-                                            status->chain_signal[i]);
-               }
 
                /* RXD Group 5 - C-RXV */
                if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
@@ -764,9 +765,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
                                return -EINVAL;
                }
 
-               if (!is_mt7915(&dev->mt76) ||
-                   (is_mt7915(&dev->mt76) &&
-                    (rxd1 & MT_RXD1_NORMAL_GROUP_5))) {
+               if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
                        ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv);
                        if (ret < 0)
                                return ret;
@@ -859,13 +858,13 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
        __le32 *rxv_hdr = rxd + 2;
        __le32 *rxv = rxd + 4;
        u32 rcpi, ib_rssi, wb_rssi, v20, v21;
-       bool ext_phy;
+       u8 band_idx;
        s32 foe;
        u8 snr;
        int i;
 
-       ext_phy = FIELD_GET(MT_RXV_HDR_BAND_IDX, le32_to_cpu(rxv_hdr[1]));
-       if (ext_phy)
+       band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
+       if (band_idx && !phy->band_idx)
                phy = mt7915_ext_phy(dev);
 
        rcpi = le32_to_cpu(rxv[6]);
@@ -1106,6 +1105,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
        if (ieee80211_is_beacon(fc)) {
                txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
                txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
+               txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 0x18));
        }
 
        if (info->flags & IEEE80211_TX_CTL_INJECTED) {
@@ -1121,6 +1121,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
                val = MT_TXD3_SN_VALID |
                      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
                txwi[3] |= cpu_to_le32(val);
+               txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
        }
 
        val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
@@ -1181,7 +1182,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
        struct ieee80211_vif *vif = info->control.vif;
        struct mt76_phy *mphy = &dev->mphy;
        bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
-       u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
+       u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
        bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
        bool mcast = false;
        u16 tx_count = 15;
@@ -1192,6 +1193,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
 
                omac_idx = mvif->mt76.omac_idx;
                wmm_idx = mvif->mt76.wmm_idx;
+               band_idx = mvif->mt76.band_idx;
        }
 
        if (ext_phy && dev->mt76.phy2)
@@ -1218,7 +1220,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
              FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
              FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
 
-       if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
+       if (ext_phy || band_idx)
                val |= MT_TXD1_TGID;
 
        txwi[1] = cpu_to_le32(val);
@@ -1352,10 +1354,10 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
        u16 fc, tid;
        u32 val;
 
-       if (!sta || !sta->ht_cap.ht_supported)
+       if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
                return;
 
-       tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+       tid = le32_get_bits(txwi[1], MT_TXD1_TID);
        if (tid >= 6) /* skip VO queue */
                return;
 
@@ -1403,7 +1405,7 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
                if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
                        mt7915_tx_check_aggr(sta, txwi);
        } else {
-               wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+               wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
        }
 
        __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
@@ -1427,7 +1429,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
        bool v3, wake = false;
        u16 total, count = 0;
        u32 txd = le32_to_cpu(free->txd);
-       u32 *cur_info;
+       __le32 *cur_info;
 
        /* clean DMA queues and unmap buffers first */
        mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1437,12 +1439,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
                mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
        }
 
-       /*
-        * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
-        * to the time ack is received or dropped by hw (air + hw queue time).
-        * Should avoid accessing WTBL to get Tx airtime, and use it instead.
-        */
-       total = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+       total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
        v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
        if (WARN_ON_ONCE((void *)&free->info[total >> v3] > end))
                return;
@@ -1560,6 +1557,8 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
 
                if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
                        sband = &mphy->sband_5g.sband;
+               else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+                       sband = &mphy->sband_6g.sband;
                else
                        sband = &mphy->sband_2g.sband;
 
@@ -1633,18 +1632,13 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
        struct mt76_wcid *wcid;
        __le32 *txs_data = data;
        u16 wcidx;
-       u32 txs;
        u8 pid;
 
-       txs = le32_to_cpu(txs_data[0]);
-       if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+       if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
                return;
 
-       txs = le32_to_cpu(txs_data[2]);
-       wcidx = FIELD_GET(MT_TXS2_WCID, txs);
-
-       txs = le32_to_cpu(txs_data[3]);
-       pid = FIELD_GET(MT_TXS3_PID, txs);
+       wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+       pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
 
        if (pid < MT_PACKET_ID_FIRST)
                return;
@@ -1681,7 +1675,8 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
        __le32 *end = (__le32 *)&rxd[len / 4];
        enum rx_pkt_type type;
 
-       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+       type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
        switch (type) {
        case PKT_TYPE_TXRX_NOTIFY:
                mt7915_mac_tx_free(dev, data, len);
@@ -1706,7 +1701,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
        __le32 *end = (__le32 *)&skb->data[skb->len];
        enum rx_pkt_type type;
 
-       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+       type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
 
        switch (type) {
        case PKT_TYPE_TXRX_NOTIFY:
@@ -1726,6 +1721,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
                break;
        case PKT_TYPE_RX_FW_MONITOR:
                mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
+               dev_kfree_skb(skb);
                break;
        case PKT_TYPE_NORMAL:
                if (!mt7915_mac_fill_rx(dev, skb)) {
@@ -1763,8 +1759,7 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
-       bool ext_phy = phy != &dev->phy;
-       u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy);
+       u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx);
 
        mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
        mt76_set(dev, reg, BIT(11) | BIT(9));
@@ -1773,25 +1768,22 @@ void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
 void mt7915_mac_reset_counters(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
-       bool ext_phy = phy != &dev->phy;
        int i;
 
        for (i = 0; i < 4; i++) {
-               mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
-               mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+               mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
+               mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
        }
 
-       if (ext_phy) {
-               dev->mt76.phy2->survey_time = ktime_get_boottime();
+       i = 0;
+       phy->mt76->survey_time = ktime_get_boottime();
+       if (phy->band_idx)
                i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
-       } else {
-               dev->mt76.phy.survey_time = ktime_get_boottime();
-               i = 0;
-       }
+
        memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
 
        /* reset airtime counters */
-       mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
+       mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx),
                 MT_WF_RMAC_MIB_RXTIME_CLR);
 
        mt7915_mcu_get_chan_mib_info(phy, true);
@@ -1801,29 +1793,23 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
 {
        s16 coverage_class = phy->coverage_class;
        struct mt7915_dev *dev = phy->dev;
-       bool ext_phy = phy != &dev->phy;
+       struct mt7915_phy *ext_phy = mt7915_ext_phy(dev);
        u32 val, reg_offset;
        u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
                  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
        u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
                   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
        int offset;
-       bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
+       bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
 
        if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
                return;
 
-       if (ext_phy) {
+       if (ext_phy)
                coverage_class = max_t(s16, dev->phy.coverage_class,
-                                      coverage_class);
-       } else {
-               struct mt7915_phy *phy_ext = mt7915_ext_phy(dev);
+                                      ext_phy->coverage_class);
 
-               if (phy_ext)
-                       coverage_class = max_t(s16, phy_ext->coverage_class,
-                                              coverage_class);
-       }
-       mt76_set(dev, MT_ARB_SCR(ext_phy),
+       mt76_set(dev, MT_ARB_SCR(phy->band_idx),
                 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
        udelay(1);
 
@@ -1831,35 +1817,40 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
        reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
                     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
 
-       mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset);
-       mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset);
-       mt76_wr(dev, MT_TMAC_ICR0(ext_phy),
-               FIELD_PREP(MT_IFS_EIFS_OFDM, is_5ghz ? 84 : 78) |
+       mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset);
+       mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset);
+       mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx),
+               FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
                FIELD_PREP(MT_IFS_RIFS, 2) |
                FIELD_PREP(MT_IFS_SIFS, 10) |
                FIELD_PREP(MT_IFS_SLOT, phy->slottime));
 
-       mt76_wr(dev, MT_TMAC_ICR1(ext_phy),
+       mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx),
                FIELD_PREP(MT_IFS_EIFS_CCK, 314));
 
-       if (phy->slottime < 20 || is_5ghz)
+       if (phy->slottime < 20 || a_band)
                val = MT7915_CFEND_RATE_DEFAULT;
        else
                val = MT7915_CFEND_RATE_11B;
 
-       mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
-       mt76_clear(dev, MT_ARB_SCR(ext_phy),
+       mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val);
+       mt76_clear(dev, MT_ARB_SCR(phy->band_idx),
                   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
 }
 
 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy)
 {
-       mt76_set(dev, MT_WF_PHY_RXTD12(ext_phy),
+       u32 reg;
+
+       reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) :
+               MT_WF_PHY_RXTD12_MT7916(ext_phy);
+       mt76_set(dev, reg,
                 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
                 MT_WF_PHY_RXTD12_IRPI_SW_CLR);
 
-       mt76_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy),
-                FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
+       reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) :
+               MT_WF_PHY_RX_CTRL1_MT7916(ext_phy);
+       mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
 }
 
 static u8
@@ -1871,7 +1862,9 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
        int nss, i;
 
        for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
-               u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support));
+               u32 reg = is_mt7915(&dev->mt76) ?
+                       MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) :
+                       MT_WF_IRPI_NSS_MT7916(idx, nss);
 
                for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
                        val = mt76_rr(dev, reg);
@@ -1890,12 +1883,11 @@ void mt7915_update_channel(struct mt76_phy *mphy)
 {
        struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
        struct mt76_channel_state *state = mphy->chan_state;
-       bool ext_phy = phy != &phy->dev->phy;
        int nf;
 
        mt7915_mcu_get_chan_mib_info(phy, false);
 
-       nf = mt7915_phy_get_nf(phy, ext_phy);
+       nf = mt7915_phy_get_nf(phy, phy->band_idx);
        if (!phy->noise)
                phy->noise = nf << 4;
        else if (nf)
@@ -1956,16 +1948,22 @@ mt7915_dma_reset(struct mt7915_dev *dev)
        int i;
 
        mt76_clear(dev, MT_WFDMA0_GLO_CFG,
-                  MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
-       mt76_clear(dev, MT_WFDMA1_GLO_CFG,
-                  MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+                  MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+                  MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+       if (is_mt7915(&dev->mt76))
+               mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+                          MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+                          MT_WFDMA1_GLO_CFG_RX_DMA_EN);
        if (dev->hif2) {
                mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
-                          (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
-                           MT_WFDMA0_GLO_CFG_RX_DMA_EN));
-               mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
-                          (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
-                           MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+                          MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+                          MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+               if (is_mt7915(&dev->mt76))
+                       mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+                                  MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+                                  MT_WFDMA1_GLO_CFG_RX_DMA_EN);
        }
 
        usleep_range(1000, 2000);
@@ -1989,19 +1987,23 @@ mt7915_dma_reset(struct mt7915_dev *dev)
 
        mt76_set(dev, MT_WFDMA0_GLO_CFG,
                 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
-       mt76_set(dev, MT_WFDMA1_GLO_CFG,
-                MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN |
-                MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
-                MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
-       if (dev->hif2) {
-               mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
-                       (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
-                        MT_WFDMA0_GLO_CFG_RX_DMA_EN));
-               mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
-                       (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+       if (is_mt7915(&dev->mt76))
+               mt76_set(dev, MT_WFDMA1_GLO_CFG,
+                        MT_WFDMA1_GLO_CFG_TX_DMA_EN |
                         MT_WFDMA1_GLO_CFG_RX_DMA_EN |
                         MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
-                        MT_WFDMA1_GLO_CFG_OMIT_RX_INFO));
+                        MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+       if (dev->hif2) {
+               mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+                        MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+                        MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+               if (is_mt7915(&dev->mt76))
+                       mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+                                MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+                                MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+                                MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+                                MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
        }
 }
 
@@ -2111,120 +2113,96 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
        struct mib_stats *mib = &phy->mib;
-       bool ext_phy = phy != &dev->phy;
        int i, aggr0, aggr1, cnt;
        u32 val;
 
-       cnt = mt76_rr(dev, MT_MIB_SDR3(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
        mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
                FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR4(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx));
        mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR5(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx));
        mib->rx_mpdu_cnt += cnt;
 
-       cnt = mt76_rr(dev, MT_MIB_SDR6(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx));
        mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR7(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx));
        mib->rx_vector_mismatch_cnt += FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR8(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx));
        mib->rx_delimiter_fail_cnt += FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR11(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx));
        mib->rx_len_mismatch_cnt += FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR12(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx));
        mib->tx_ampdu_cnt += cnt;
 
-       cnt = mt76_rr(dev, MT_MIB_SDR13(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx));
        mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR14(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx));
        mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
                FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
                FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR15(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx));
        mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
                FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
                FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR22(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx));
        mib->rx_ampdu_cnt += cnt;
 
-       cnt = mt76_rr(dev, MT_MIB_SDR23(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx));
        mib->rx_ampdu_bytes_cnt += cnt;
 
-       cnt = mt76_rr(dev, MT_MIB_SDR24(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx));
        mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
                FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
                FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR25(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx));
        mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
 
-       cnt = mt76_rr(dev, MT_MIB_SDR27(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx));
        mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR28(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx));
        mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR29(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx));
        mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
                FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
                FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDRVEC(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx));
        mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
                FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
                FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR31(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx));
        mib->rx_ba_cnt += cnt;
 
-       cnt = mt76_rr(dev, MT_MIB_SDR32(ext_phy));
-       mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK, cnt);
-
-       if (is_mt7915(&dev->mt76))
-               cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy));
-       mib->tx_pkt_ibf_cnt += is_mt7915(&dev->mt76) ?
-                      FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK, cnt) :
-                      FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK_MT7916, cnt);
-
-       cnt = mt76_rr(dev, MT_MIB_SDRMUBF(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx));
        mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
 
-       cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx));
        mib->tx_mu_mpdu_cnt += cnt;
 
-       cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx));
        mib->tx_mu_acked_mpdu_cnt += cnt;
 
-       cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx));
        mib->tx_su_acked_mpdu_cnt += cnt;
 
-       cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy));
-       mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
-       mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
-
-       cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
-       mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
-       mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
-       mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
-       mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
-
-       cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy));
-       mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
-       mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
-       mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
-
-       cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
-       mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
-       mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
+       cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx));
+       mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt);
+       mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt);
+       mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt);
 
        for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
                cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
@@ -2232,55 +2210,97 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
                mib->tx_amsdu_cnt += cnt;
        }
 
-       aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+       aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
        if (is_mt7915(&dev->mt76)) {
                for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
-                       val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, (i << 4)));
+                       val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4)));
                        mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
                        mib->ack_fail_cnt +=
                                FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
 
-                       val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, (i << 4)));
+                       val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4)));
                        mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
                        mib->rts_retries_cnt +=
                                FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
 
-                       val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+                       val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
                        dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
                        dev->mt76.aggr_stats[aggr0++] += val >> 16;
 
-                       val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+                       val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
                        dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
                        dev->mt76.aggr_stats[aggr1++] += val >> 16;
                }
+
+               cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
+               mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+
+               cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx));
+               mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt);
+
+               cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx));
+               mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
+               mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
+
+               cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx));
+               mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
+               mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
+
+               cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx));
+               mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
+               mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
+               mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
+               mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
        } else {
                for (i = 0; i < 2; i++) {
                        /* rts count */
-                       val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, (i << 2)));
+                       val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2)));
                        mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
                        mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
 
                        /* rts retry count */
-                       val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, (i << 2)));
+                       val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2)));
                        mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
                        mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
 
                        /* ba miss count */
-                       val = mt76_rr(dev, MT_MIB_MB_SDR2(ext_phy, (i << 2)));
+                       val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2)));
                        mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
                        mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
 
                        /* ack fail count */
-                       val = mt76_rr(dev, MT_MIB_MB_BFTF(ext_phy, (i << 2)));
+                       val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2)));
                        mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
                        mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
                }
 
                for (i = 0; i < 8; i++) {
-                       val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+                       val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
                        dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
                        dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
                }
+
+               cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
+               mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
+               mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
+               mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+               mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+
+               cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx));
+               mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt);
+
+               cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx));
+               mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt);
+
+               cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx));
+               mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
+               mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
+               mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
+               mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
+
+               cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx));
+               mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
+               mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
        }
 }
 
@@ -2360,10 +2380,23 @@ static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
 
 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
 {
-       int err;
+       int err, region;
+
+       switch (dev->mt76.region) {
+       case NL80211_DFS_ETSI:
+               region = 0;
+               break;
+       case NL80211_DFS_JP:
+               region = 2;
+               break;
+       case NL80211_DFS_FCC:
+       default:
+               region = 1;
+               break;
+       }
 
        err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
-                                     MT_RX_SEL0, 0);
+                                     MT_RX_SEL0, region);
        if (err < 0)
                return err;
 
@@ -2375,20 +2408,22 @@ static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
 {
        struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
        struct mt7915_dev *dev = phy->dev;
-       bool ext_phy = phy != &dev->phy;
        int err;
 
        /* start CAC */
-       err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
+       err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx,
                                      MT_RX_SEL0, 0);
        if (err < 0)
                return err;
 
-       err = mt7915_dfs_start_rdd(dev, ext_phy);
+       err = mt7915_dfs_start_rdd(dev, phy->band_idx);
        if (err < 0)
                return err;
 
-       phy->rdd_state |= BIT(ext_phy);
+       phy->rdd_state |= BIT(phy->band_idx);
+
+       if (!is_mt7915(&dev->mt76))
+               return 0;
 
        if (chandef->width == NL80211_CHAN_WIDTH_160 ||
            chandef->width == NL80211_CHAN_WIDTH_80P80) {
@@ -2439,7 +2474,6 @@ mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
-       bool ext_phy = phy != &dev->phy;
        enum mt76_dfs_state dfs_state, prev_state;
        int err;
 
@@ -2471,7 +2505,7 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
                return 0;
 
        err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
-                                     ext_phy, MT_RX_SEL0, 0);
+                                     phy->band_idx, MT_RX_SEL0, 0);
        if (err < 0) {
                phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
                return err;
@@ -2481,8 +2515,8 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
        return 0;
 
 stop:
-       err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
-                                     MT_RX_SEL0, 0);
+       err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
+                                     phy->band_idx, MT_RX_SEL0, 0);
        if (err < 0)
                return err;
 
index dee7fc0..c3f44d8 100644 (file)
@@ -49,7 +49,7 @@ static int mt7915_start(struct ieee80211_hw *hw)
                mt7915_mac_enable_nf(dev, 0);
        }
 
-       if (phy != &dev->phy) {
+       if (phy != &dev->phy || phy->band_idx) {
                ret = mt76_connac_mcu_set_pm(&dev->mt76, 1, 0);
                if (ret)
                        goto out;
@@ -217,7 +217,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
        }
        mvif->mt76.omac_idx = idx;
        mvif->phy = phy;
-       mvif->mt76.band_idx = ext_phy;
+       mvif->mt76.band_idx = phy->band_idx;
 
        mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
        if (ext_phy)
@@ -235,7 +235,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
        INIT_LIST_HEAD(&mvif->sta.rc_list);
        INIT_LIST_HEAD(&mvif->sta.poll_list);
        mvif->sta.wcid.idx = idx;
-       mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
+       mvif->sta.wcid.ext_phy = ext_phy;
        mvif->sta.wcid.hw_key_idx = -1;
        mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
        mt76_packet_id_init(&mvif->sta.wcid);
@@ -654,6 +654,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
        struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
        struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       bool ext_phy = mvif->phy != &dev->phy;
        int ret, idx;
 
        idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
@@ -665,7 +666,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        msta->vif = mvif;
        msta->wcid.sta = 1;
        msta->wcid.idx = idx;
-       msta->wcid.ext_phy = mvif->mt76.band_idx;
+       msta->wcid.ext_phy = ext_phy;
        msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
        msta->jiffies = jiffies;
 
@@ -969,12 +970,9 @@ mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
 
        phy->mt76->antenna_mask = tx_ant;
 
-       if (ext_phy) {
-               if (dev->chainmask == 0xf)
-                       tx_ant <<= 2;
-               else
-                       tx_ant <<= 1;
-       }
+       if (ext_phy)
+               tx_ant <<= dev->chainshift;
+
        phy->mt76->chainmask = tx_ant;
 
        mt76_set_stream_caps(phy->mt76, true);
@@ -1241,7 +1239,6 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
        };
        struct mib_stats *mib = &phy->mib;
        /* See mt7915_ampdu_stat_read_phy, etc */
-       bool ext_phy = phy != &dev->phy;
        int i, n, ei = 0;
 
        mutex_lock(&dev->mt76.mutex);
@@ -1258,7 +1255,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
        data[ei++] = mib->tx_pkt_ibf_cnt;
 
        /* Tx ampdu stat */
-       n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+       n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
        for (i = 0; i < 15 /*ARRAY_SIZE(bound)*/; i++)
                data[ei++] = dev->mt76.aggr_stats[i + n];
 
index 462c7da..e7a6f80 100644 (file)
@@ -64,6 +64,26 @@ struct mt7915_fw_region {
        u8 reserved1[15];
 } __packed;
 
+#define fw_name(_dev, name, ...)       ({                      \
+       char *_fw;                                              \
+       switch (mt76_chip(&(_dev)->mt76)) {                     \
+       case 0x7915:                                            \
+               _fw = MT7915_##name;                            \
+               break;                                          \
+       case 0x7986:                                            \
+               _fw = MT7986_##name##__VA_ARGS__;               \
+               break;                                          \
+       default:                                                \
+               _fw = MT7916_##name;                            \
+               break;                                          \
+       }                                                       \
+       _fw;                                                    \
+})
+
+#define fw_name_var(_dev, name)                (mt7915_check_adie(dev, false) ?        \
+                                        fw_name(_dev, name) :                  \
+                                        fw_name(_dev, name, _MT7975))
+
 #define MCU_PATCH_ADDRESS              0x200000
 
 #define HE_PHY(p, c)                   u8_get_bits(c, IEEE80211_HE_PHY_##p)
@@ -89,6 +109,7 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
                          u16 mcs_map)
 {
        struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct mt7915_dev *dev = msta->vif->phy->dev;
        enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
        const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
        int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
@@ -129,8 +150,9 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
                mcs_map &= ~(0x3 << (nss * 2));
                mcs_map |= mcs << (nss * 2);
 
-               /* only support 2ss on 160MHz */
-               if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+               /* only support 2ss on 160MHz for mt7915 */
+               if (is_mt7915(&dev->mt76) && nss > 1 &&
+                   sta->bandwidth == IEEE80211_STA_RX_BW_160)
                        break;
        }
 
@@ -141,6 +163,8 @@ static void
 mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
                           const u16 *mask)
 {
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct mt7915_dev *dev = msta->vif->phy->dev;
        u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
        int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
        u16 mcs;
@@ -162,8 +186,9 @@ mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
 
                vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]);
 
-               /* only support 2ss on 160MHz */
-               if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+               /* only support 2ss on 160MHz for mt7915 */
+               if (is_mt7915(&dev->mt76) && nss > 1 &&
+                   sta->bandwidth == IEEE80211_STA_RX_BW_160)
                        break;
        }
 }
@@ -309,7 +334,7 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
 
        c = (struct mt7915_mcu_csa_notify *)skb->data;
 
-       if (c->band_idx && dev->mt76.phy2)
+       if ((c->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
                mphy = dev->mt76.phy2;
 
        ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -328,7 +353,7 @@ mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
        if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
                return;
 
-       if (t->ctrl.band_idx && dev->mt76.phy2)
+       if ((t->ctrl.band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
                mphy = dev->mt76.phy2;
 
        phy = (struct mt7915_phy *)mphy->priv;
@@ -343,7 +368,7 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
 
        r = (struct mt7915_mcu_rdd_report *)skb->data;
 
-       if (r->band_idx && dev->mt76.phy2)
+       if ((r->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
                mphy = dev->mt76.phy2;
 
        if (r->band_idx == MT_RX_SEL2)
@@ -390,6 +415,22 @@ mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
        ieee80211_color_change_finish(vif);
 }
 
+static void
+mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+       struct mt76_phy *mphy = &dev->mt76.phy;
+       struct mt7915_mcu_bcc_notify *b;
+
+       b = (struct mt7915_mcu_bcc_notify *)skb->data;
+
+       if ((b->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
+               mphy = dev->mt76.phy2;
+
+       ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+                       IEEE80211_IFACE_ITER_RESUME_ALL,
+                       mt7915_mcu_cca_finish, mphy->hw);
+}
+
 static void
 mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
 {
@@ -409,9 +450,7 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
                mt7915_mcu_rx_log_message(dev, skb);
                break;
        case MCU_EXT_EVENT_BCC_NOTIFY:
-               ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
-                               IEEE80211_IFACE_ITER_RESUME_ALL,
-                               mt7915_mcu_cca_finish, dev);
+               mt7915_mcu_rx_bcc_notify(dev, skb);
                break;
        default:
                break;
@@ -533,12 +572,7 @@ mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
        }
 
        if (vif->bss_conf.he_support && vif->type == NL80211_IFTYPE_STATION) {
-               struct mt7915_dev *dev = phy->dev;
-               struct mt76_phy *mphy = &dev->mt76.phy;
-               bool ext_phy = phy != &dev->phy;
-
-               if (ext_phy && dev->mt76.phy2)
-                       mphy = dev->mt76.phy2;
+               struct mt76_phy *mphy = phy->mt76;
 
                ch->he_ru26_block =
                        mt7915_check_he_obss_narrow_bw_ru(mphy->hw, vif);
@@ -787,8 +821,9 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
             IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
                cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
 
-       if (mvif->cap.ldpc && (elem->phy_cap_info[1] &
-                              IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+       if (mvif->cap.he_ldpc &&
+           (elem->phy_cap_info[1] &
+            IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
                cap |= STA_REC_HE_CAP_LDPC;
 
        if (elem->phy_cap_info[1] &
@@ -904,9 +939,6 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
            vif->type != NL80211_IFTYPE_AP)
                return;
 
-       if (!sta->vht_cap.vht_supported)
-               return;
-
        tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
 
        muru = (struct sta_rec_muru *)tlv;
@@ -914,9 +946,12 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
        muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer ||
                               mvif->cap.vht_mu_ebfer ||
                               mvif->cap.vht_mu_ebfee;
+       muru->cfg.mimo_ul_en = true;
+       muru->cfg.ofdma_dl_en = true;
 
-       muru->mimo_dl.vht_mu_bfee =
-               !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+       if (sta->vht_cap.vht_supported)
+               muru->mimo_dl.vht_mu_bfee =
+                       !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
 
        if (!sta->he_cap.has_he)
                return;
@@ -924,13 +959,11 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
        muru->mimo_dl.partial_bw_dl_mimo =
                HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]);
 
-       muru->cfg.mimo_ul_en = true;
        muru->mimo_ul.full_ul_mimo =
                HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]);
        muru->mimo_ul.partial_ul_mimo =
                HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
 
-       muru->cfg.ofdma_dl_en = true;
        muru->ofdma_dl.punc_pream_rx =
                HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
        muru->ofdma_dl.he_20m_in_40m_2g =
@@ -954,6 +987,9 @@ mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
        struct sta_rec_ht *ht;
        struct tlv *tlv;
 
+       if (!sta->ht_cap.ht_supported)
+               return;
+
        tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
 
        ht = (struct sta_rec_ht *)tlv;
@@ -1041,7 +1077,8 @@ mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
        mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, tlv, wtbl_hdr);
        if (sta)
                mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, skb, sta, tlv,
-                                           wtbl_hdr, mvif->cap.ldpc);
+                                           wtbl_hdr, mvif->cap.ht_ldpc,
+                                           mvif->cap.vht_ldpc);
 
        return 0;
 }
@@ -1221,8 +1258,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
                        struct ieee80211_vif *vif, struct ieee80211_sta *sta)
 {
        struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
-       struct mt7915_phy *phy =
-               mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+       struct mt7915_phy *phy = mvif->phy;
        int tx_ant = hweight8(phy->mt76->chainmask) - 1;
        struct sta_rec_bf *bf;
        struct tlv *tlv;
@@ -1234,6 +1270,9 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
        };
        bool ebf;
 
+       if (!(sta->ht_cap.ht_supported || sta->he_cap.has_he))
+               return;
+
        ebf = mt7915_is_ebf_supported(phy, vif, sta, false);
        if (!ebf && !dev->ibf)
                return;
@@ -1288,13 +1327,15 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
                        struct ieee80211_vif *vif, struct ieee80211_sta *sta)
 {
        struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
-       struct mt7915_phy *phy =
-               mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+       struct mt7915_phy *phy = mvif->phy;
        int tx_ant = hweight8(phy->mt76->chainmask) - 1;
        struct sta_rec_bfee *bfee;
        struct tlv *tlv;
        u8 nrow = 0;
 
+       if (!(sta->vht_cap.vht_supported || sta->he_cap.has_he))
+               return;
+
        if (!mt7915_is_ebf_supported(phy, vif, sta, true))
                return;
 
@@ -1518,6 +1559,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
        ra->channel = chandef->chan->hw_value;
        ra->bw = sta->bandwidth;
        ra->phy.bw = sta->bandwidth;
+       ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
 
        if (supp_rate) {
                supp_rate &= mask->control[band].legacy;
@@ -1551,7 +1593,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
                        cap |= STA_CAP_TX_STBC;
                if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
                        cap |= STA_CAP_RX_STBC;
-               if (mvif->cap.ldpc &&
+               if (mvif->cap.ht_ldpc &&
                    (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
                        cap |= STA_CAP_LDPC;
 
@@ -1577,7 +1619,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
                        cap |= STA_CAP_VHT_TX_STBC;
                if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
                        cap |= STA_CAP_VHT_RX_STBC;
-               if (mvif->cap.ldpc &&
+               if (mvif->cap.vht_ldpc &&
                    (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
                        cap |= STA_CAP_VHT_LDPC;
 
@@ -1588,6 +1630,10 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
        if (sta->he_cap.has_he) {
                ra->supp_mode |= MODE_HE;
                cap |= STA_CAP_HE;
+
+               if (sta->he_6ghz_capa.capa)
+                       ra->af = le16_get_bits(sta->he_6ghz_capa.capa,
+                                              IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
        }
 
        ra->sta_cap = cpu_to_le32(cap);
@@ -1610,7 +1656,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
         * once dev->rc_work changes the settings driver should also
         * update sta_rec_he here.
         */
-       if (sta->he_cap.has_he && changed)
+       if (changed)
                mt7915_mcu_sta_he_tlv(skb, sta, vif);
 
        /* sta_rec_ra accommodates BW, NSS and only MCS range format
@@ -1679,7 +1725,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                goto out;
 
        /* tag order is in accordance with firmware dependency. */
-       if (sta && sta->ht_cap.ht_supported) {
+       if (sta) {
                /* starec bfer */
                mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta);
                /* starec ht */
@@ -1696,7 +1742,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                return ret;
        }
 
-       if (sta && sta->ht_cap.ht_supported) {
+       if (sta) {
                /* starec amsdu */
                mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
                /* starec he */
@@ -1779,6 +1825,55 @@ mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
        info->cnt = skb->data[offs->cntdwn_counter_offs[0]];
 }
 
+static void
+mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
+                      struct ieee80211_vif *vif, struct bss_info_bcn *bcn,
+                      struct ieee80211_mutable_offsets *offs)
+{
+       struct bss_info_bcn_mbss *mbss;
+       const struct element *elem;
+       struct tlv *tlv;
+
+       if (!vif->bss_conf.bssid_indicator)
+               return;
+
+       tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_MBSSID,
+                                          sizeof(*mbss), &bcn->sub_ntlv,
+                                          &bcn->len);
+
+       mbss = (struct bss_info_bcn_mbss *)tlv;
+       mbss->offset[0] = cpu_to_le16(offs->tim_offset);
+       mbss->bitmap = cpu_to_le32(1);
+
+       for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID,
+                           &skb->data[offs->mbssid_off],
+                           skb->len - offs->mbssid_off) {
+               const struct element *sub_elem;
+
+               if (elem->datalen < 2)
+                       continue;
+
+               for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) {
+                       const u8 *data;
+
+                       if (sub_elem->id || sub_elem->datalen < 4)
+                               continue; /* not a valid BSS profile */
+
+                       /* Find WLAN_EID_MULTI_BSSID_IDX
+                        * in the merged nontransmitted profile
+                        */
+                       data = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+                                               sub_elem->data,
+                                               sub_elem->datalen);
+                       if (!data || data[1] < 1 || !data[2])
+                               continue;
+
+                       mbss->offset[data[2]] = cpu_to_le16(data - skb->data);
+                       mbss->bitmap |= cpu_to_le32(BIT(data[2]));
+               }
+       }
+}
+
 static void
 mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                       struct sk_buff *rskb, struct sk_buff *skb,
@@ -1841,8 +1936,8 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
                              len);
        if (ie && ie[1] >= sizeof(*ht)) {
                ht = (void *)(ie + 2);
-               vc->ldpc |= !!(le16_to_cpu(ht->cap_info) &
-                              IEEE80211_HT_CAP_LDPC_CODING);
+               vc->ht_ldpc = !!(le16_to_cpu(ht->cap_info) &
+                                IEEE80211_HT_CAP_LDPC_CODING);
        }
 
        ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, mgmt->u.beacon.variable,
@@ -1853,7 +1948,7 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
                vht = (void *)(ie + 2);
                bc = le32_to_cpu(vht->vht_cap_info);
 
-               vc->ldpc |= !!(bc & IEEE80211_VHT_CAP_RXLDPC);
+               vc->vht_ldpc = !!(bc & IEEE80211_VHT_CAP_RXLDPC);
                vc->vht_su_ebfer =
                        (bc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
                        (pc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
@@ -1877,6 +1972,8 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
 
                he = (void *)(ie + 3);
 
+               vc->he_ldpc =
+                       HE_PHY(CAP1_LDPC_CODING_IN_PAYLOAD, pe->phy_cap_info[1]);
                vc->he_su_ebfer =
                        HE_PHY(CAP3_SU_BEAMFORMER, he->phy_cap_info[3]) &&
                        HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
@@ -1902,6 +1999,10 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
        struct tlv *tlv;
        struct bss_info_bcn *bcn;
        int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
+       bool ext_phy = phy != &dev->phy;
+
+       if (vif->bss_conf.nontransmitted)
+               return 0;
 
        rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
                                               NULL, len);
@@ -1925,15 +2026,15 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
                return -EINVAL;
        }
 
-       if (mvif->mt76.band_idx) {
+       if (ext_phy) {
                info = IEEE80211_SKB_CB(skb);
                info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
        }
 
        mt7915_mcu_beacon_check_caps(phy, vif, skb);
 
-       /* TODO: subtag - 11v MBSSID */
        mt7915_mcu_beacon_cntdwn(vif, rskb, skb, bcn, &offs);
+       mt7915_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs);
        mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
        dev_kfree_skb(skb);
 
@@ -1962,7 +2063,6 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
 {
        const struct mt7915_patch_hdr *hdr;
        const struct firmware *fw = NULL;
-       const char *patch;
        int i, ret, sem;
 
        sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 1);
@@ -1976,8 +2076,8 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
                return -EAGAIN;
        }
 
-       patch = is_mt7915(&dev->mt76) ? MT7915_ROM_PATCH : MT7916_ROM_PATCH;
-       ret = request_firmware(&fw, patch, dev->mt76.dev);
+       ret = request_firmware(&fw, fw_name_var(dev, ROM_PATCH),
+                              dev->mt76.dev);
        if (ret)
                goto out;
 
@@ -2096,11 +2196,10 @@ static int mt7915_load_ram(struct mt7915_dev *dev)
 {
        const struct mt7915_fw_trailer *hdr;
        const struct firmware *fw;
-       const char *mcu;
        int ret;
 
-       mcu = is_mt7915(&dev->mt76) ? MT7915_FIRMWARE_WM : MT7916_FIRMWARE_WM;
-       ret = request_firmware(&fw, mcu, dev->mt76.dev);
+       ret = request_firmware(&fw, fw_name_var(dev, FIRMWARE_WM),
+                              dev->mt76.dev);
        if (ret)
                return ret;
 
@@ -2124,8 +2223,8 @@ static int mt7915_load_ram(struct mt7915_dev *dev)
 
        release_firmware(fw);
 
-       mcu = is_mt7915(&dev->mt76) ? MT7915_FIRMWARE_WA : MT7916_FIRMWARE_WA;
-       ret = request_firmware(&fw, mcu, dev->mt76.dev);
+       ret = request_firmware(&fw, fw_name(dev, FIRMWARE_WA),
+                              dev->mt76.dev);
        if (ret)
                return ret;
 
@@ -2268,7 +2367,7 @@ int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms)
                u8 band_idx;
        } req = {
                .cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS),
-               .band_idx = phy != &dev->phy,
+               .band_idx = phy->band_idx,
        };
 
        ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL),
@@ -2738,10 +2837,14 @@ int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
 
 int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
 {
+       static const u8 ch_band[] = {
+               [NL80211_BAND_2GHZ] = 0,
+               [NL80211_BAND_5GHZ] = 1,
+               [NL80211_BAND_6GHZ] = 2,
+       };
        struct mt7915_dev *dev = phy->dev;
        struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
        int freq1 = chandef->center_freq1;
-       bool ext_phy = phy != &dev->phy;
        struct {
                u8 control_ch;
                u8 center_ch;
@@ -2765,8 +2868,8 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
                .bw = mt76_connac_chan_bw(chandef),
                .tx_streams_num = hweight8(phy->mt76->antenna_mask),
                .rx_streams = phy->mt76->antenna_mask,
-               .band_idx = ext_phy,
-               .channel_band = chandef->chan->band,
+               .band_idx = phy->band_idx,
+               .channel_band = ch_band[chandef->chan->band],
        };
 
 #ifdef CONFIG_NL80211_TESTMODE
@@ -2777,17 +2880,18 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
                req.tx_streams_num = fls(phy->mt76->test.tx_antenna_mask);
                req.rx_streams = phy->mt76->test.tx_antenna_mask;
 
-               if (ext_phy) {
-                       req.tx_streams_num = 2;
-                       req.rx_streams >>= 2;
-               }
+               if (phy != &dev->phy)
+                       req.rx_streams >>= dev->chainshift;
        }
 #endif
 
-       if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+       if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+           dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+               req.switch_reason = CH_SWITCH_NORMAL;
+       else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
                req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
-       else if (phy->mt76->hw->conf.radar_enabled &&
-                chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+       else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+                                         NL80211_IFTYPE_AP))
                req.switch_reason = CH_SWITCH_DFS;
        else
                req.switch_reason = CH_SWITCH_NORMAL;
@@ -3064,10 +3168,12 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
        struct mt7915_dev *dev = phy->dev;
        struct mt7915_mcu_mib *res, req[4];
        struct sk_buff *skb;
-       int i, ret, start = 0;
+       int i, ret, start = 0, ofs = 20;
 
-       if (!is_mt7915(&dev->mt76))
+       if (!is_mt7915(&dev->mt76)) {
                start = 4;
+               ofs = 0;
+       }
 
        for (i = 0; i < 4; i++) {
                req[i].band = cpu_to_le32(phy != &dev->phy);
@@ -3079,7 +3185,7 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
        if (ret)
                return ret;
 
-       res = (struct mt7915_mcu_mib *)(skb->data + 20);
+       res = (struct mt7915_mcu_mib *)(skb->data + ofs);
 
        if (chan_switch)
                goto out;
@@ -3131,7 +3237,7 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
                u8 rsv[2];
        } __packed req = {
                .ctrl = {
-                       .band_idx = phy != &dev->phy,
+                       .band_idx = phy->band_idx,
                },
        };
        int level;
@@ -3426,6 +3532,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
        case MT_PHY_TYPE_OFDM:
                if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
                        sband = &mphy->sband_5g.sband;
+               else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+                       sband = &mphy->sband_6g.sband;
                else
                        sband = &mphy->sband_2g.sband;
 
index 9417f7b..960072a 100644 (file)
@@ -79,6 +79,15 @@ struct mt7915_mcu_csa_notify {
        u8 rsv;
 } __packed;
 
+struct mt7915_mcu_bcc_notify {
+       struct mt7915_mcu_rxd rxd;
+
+       u8 band_idx;
+       u8 omac_idx;
+       u8 cca_count;
+       u8 rsv;
+} __packed;
+
 struct mt7915_mcu_rdd_report {
        struct mt7915_mcu_rxd rxd;
 
index e8ff686..5062e0d 100644 (file)
@@ -17,6 +17,11 @@ static const u32 mt7915_reg[] = {
        [INT1_MASK_CSR]         = 0xd708c,
        [INT_MCU_CMD_SOURCE]    = 0xd51f0,
        [INT_MCU_CMD_EVENT]     = 0x3108,
+       [WFDMA0_ADDR]           = 0xd4000,
+       [WFDMA0_PCIE1_ADDR]     = 0xd8000,
+       [WFDMA_EXT_CSR_ADDR]    = 0xd7000,
+       [CBTOP1_PHY_END]        = 0x77ffffff,
+       [INFRA_MCU_ADDR_END]    = 0x7c3fffff,
 };
 
 static const u32 mt7916_reg[] = {
@@ -26,6 +31,25 @@ static const u32 mt7916_reg[] = {
        [INT1_MASK_CSR]         = 0xd8204,
        [INT_MCU_CMD_SOURCE]    = 0xd41f0,
        [INT_MCU_CMD_EVENT]     = 0x2108,
+       [WFDMA0_ADDR]           = 0xd4000,
+       [WFDMA0_PCIE1_ADDR]     = 0xd8000,
+       [WFDMA_EXT_CSR_ADDR]    = 0xd7000,
+       [CBTOP1_PHY_END]        = 0x7fffffff,
+       [INFRA_MCU_ADDR_END]    = 0x7c085fff,
+};
+
+static const u32 mt7986_reg[] = {
+       [INT_SOURCE_CSR]        = 0x24200,
+       [INT_MASK_CSR]          = 0x24204,
+       [INT1_SOURCE_CSR]       = 0x28200,
+       [INT1_MASK_CSR]         = 0x28204,
+       [INT_MCU_CMD_SOURCE]    = 0x241f0,
+       [INT_MCU_CMD_EVENT]     = 0x54000108,
+       [WFDMA0_ADDR]           = 0x24000,
+       [WFDMA0_PCIE1_ADDR]     = 0x28000,
+       [WFDMA_EXT_CSR_ADDR]    = 0x27000,
+       [CBTOP1_PHY_END]        = 0x7fffffff,
+       [INFRA_MCU_ADDR_END]    = 0x7c085fff,
 };
 
 static const u32 mt7915_offs[] = {
@@ -98,6 +122,7 @@ static const u32 mt7915_offs[] = {
        [PLE_PG_HIF_GROUP]      = 0x110,
        [PLE_HIF_PG_INFO]       = 0x114,
        [AC_OFFSET]             = 0x040,
+       [ETBF_PAR_RPT0]         = 0x068,
 };
 
 static const u32 mt7916_offs[] = {
@@ -170,6 +195,7 @@ static const u32 mt7916_offs[] = {
        [PLE_PG_HIF_GROUP]      = 0x00c,
        [PLE_HIF_PG_INFO]       = 0x388,
        [AC_OFFSET]             = 0x080,
+       [ETBF_PAR_RPT0]         = 0x100,
 };
 
 static const struct __map mt7915_reg_map[] = {
@@ -264,12 +290,69 @@ static const struct __map mt7916_reg_map[] = {
        { 0x0, 0x0, 0x0 }, /* imply end of search */
 };
 
+static const struct __map mt7986_reg_map[] = {
+       { 0x54000000, 0x402000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+       { 0x55000000, 0x403000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+       { 0x56000000, 0x404000, 0x1000 }, /* WFDMA_2 (Reserved) */
+       { 0x57000000, 0x405000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
+       { 0x58000000, 0x406000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+       { 0x59000000, 0x407000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+       { 0x820c0000, 0x408000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+       { 0x820c8000, 0x40c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+       { 0x820cc000, 0x40e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+       { 0x820e0000, 0x420000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+       { 0x820e1000, 0x420400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+       { 0x820e2000, 0x420800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+       { 0x820e3000, 0x420c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+       { 0x820e4000, 0x421000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+       { 0x820e5000, 0x421400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+       { 0x820ce000, 0x421c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+       { 0x820e7000, 0x421e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+       { 0x820cf000, 0x422000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+       { 0x820e9000, 0x423400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+       { 0x820ea000, 0x424000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+       { 0x820eb000, 0x424200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+       { 0x820ec000, 0x424600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+       { 0x820ed000, 0x424800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+       { 0x820ca000, 0x426000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+       { 0x820d0000, 0x430000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
+       { 0x00400000, 0x480000, 0x10000}, /* WF_MCU_SYSRAM */
+       { 0x00410000, 0x490000, 0x10000}, /* WF_MCU_SYSRAM */
+       { 0x820f0000, 0x4a0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+       { 0x820f1000, 0x4a0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+       { 0x820f2000, 0x4a0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+       { 0x820f3000, 0x4a0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+       { 0x820f4000, 0x4a1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+       { 0x820f5000, 0x4a1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+       { 0x820f7000, 0x4a1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+       { 0x820f9000, 0x4a3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+       { 0x820fa000, 0x4a4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+       { 0x820fb000, 0x4a4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+       { 0x820fc000, 0x4a4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+       { 0x820fd000, 0x4a4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+       { 0x820c4000, 0x4a8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+       { 0x820b0000, 0x4ae000, 0x1000 }, /* [APB2] WFSYS_ON */
+       { 0x80020000, 0x4b0000, 0x10000}, /* WF_TOP_MISC_OFF */
+       { 0x81020000, 0x4c0000, 0x10000}, /* WF_TOP_MISC_ON */
+       { 0x89000000, 0x4d0000, 0x1000 }, /* WF_MCU_CFG_ON */
+       { 0x89010000, 0x4d1000, 0x1000 }, /* WF_MCU_CIRQ */
+       { 0x89020000, 0x4d2000, 0x1000 }, /* WF_MCU_GPT */
+       { 0x89030000, 0x4d3000, 0x1000 }, /* WF_MCU_WDT */
+       { 0x80010000, 0x4d4000, 0x1000 }, /* WF_AXIDMA */
+       { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
 static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
 {
        u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
        u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
-       u32 l1_remap = is_mt7915(&dev->mt76) ?
-                       MT_HIF_REMAP_L1 : MT_HIF_REMAP_L1_MT7916;
+       u32 l1_remap;
+
+       if (is_mt7986(&dev->mt76))
+               return MT_CONN_INFRA_OFFSET(addr);
+
+       l1_remap = is_mt7915(&dev->mt76) ?
+                  MT_HIF_REMAP_L1 : MT_HIF_REMAP_L1_MT7916;
 
        dev->bus_ops->rmw(&dev->mt76, l1_remap,
                          MT_HIF_REMAP_L1_MASK,
@@ -295,17 +378,19 @@ static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
                /* use read to push write */
                dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
        } else {
+               u32 ofs = is_mt7986(&dev->mt76) ? 0x400000 : 0;
+
                offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_MT7916, addr);
                base = FIELD_GET(MT_HIF_REMAP_L2_BASE_MT7916, addr);
 
-               dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2_MT7916,
+               dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2_MT7916 + ofs,
                                  MT_HIF_REMAP_L2_MASK_MT7916,
                                  FIELD_PREP(MT_HIF_REMAP_L2_MASK_MT7916, base));
 
                /* use read to push write */
-               dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2_MT7916);
+               dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2_MT7916 + ofs);
 
-               offset += MT_HIF_REMAP_BASE_L2_MT7916;
+               offset += (MT_HIF_REMAP_BASE_L2_MT7916 + ofs);
        }
 
        return offset;
@@ -338,11 +423,20 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
 
        if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
            (addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
-           (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END) ||
-           (addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
-           (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END))
+           (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
+               return mt7915_reg_map_l1(dev, addr);
+
+       if (dev_is_pci(dev->mt76.dev) &&
+           ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+            (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
                return mt7915_reg_map_l1(dev, addr);
 
+       /* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
+       if (addr >= MT_INFRA_MCU_START && addr <= MT_INFRA_MCU_END) {
+               addr = addr - MT_INFRA_MCU_START + MT_INFRA_BASE;
+               return mt7915_reg_map_l1(dev, addr);
+       }
+
        return mt7915_reg_map_l2(dev, addr);
 }
 
@@ -393,6 +487,12 @@ static int mt7915_mmio_init(struct mt76_dev *mdev,
                dev->reg.map = mt7916_reg_map;
                dev->reg.map_size = ARRAY_SIZE(mt7916_reg_map);
                break;
+       case 0x7986:
+               dev->reg.reg_rev = mt7986_reg;
+               dev->reg.offs_rev = mt7916_offs;
+               dev->reg.map = mt7986_reg_map;
+               dev->reg.map_size = ARRAY_SIZE(mt7986_reg_map);
+               break;
        default:
                return -EINVAL;
        }
@@ -585,13 +685,29 @@ static int __init mt7915_init(void)
 
        ret = pci_register_driver(&mt7915_pci_driver);
        if (ret)
-               pci_unregister_driver(&mt7915_hif_driver);
+               goto error_pci;
+
+       if (IS_ENABLED(CONFIG_MT7986_WMAC)) {
+               ret = platform_driver_register(&mt7986_wmac_driver);
+               if (ret)
+                       goto error_wmac;
+       }
+
+       return 0;
+
+error_wmac:
+       pci_unregister_driver(&mt7915_pci_driver);
+error_pci:
+       pci_unregister_driver(&mt7915_hif_driver);
 
        return ret;
 }
 
 static void __exit mt7915_exit(void)
 {
+       if (IS_ENABLED(CONFIG_MT7986_WMAC))
+               platform_driver_unregister(&mt7986_wmac_driver);
+
        pci_unregister_driver(&mt7915_pci_driver);
        pci_unregister_driver(&mt7915_hif_driver);
 }
index 96653d6..6efa0a2 100644 (file)
 #define MT7916_FIRMWARE_WM             "mediatek/mt7916_wm.bin"
 #define MT7916_ROM_PATCH               "mediatek/mt7916_rom_patch.bin"
 
+#define MT7986_FIRMWARE_WA             "mediatek/mt7986_wa.bin"
+#define MT7986_FIRMWARE_WM             "mediatek/mt7986_wm.bin"
+#define MT7986_FIRMWARE_WM_MT7975      "mediatek/mt7986_wm_mt7975.bin"
+#define MT7986_ROM_PATCH               "mediatek/mt7986_rom_patch.bin"
+#define MT7986_ROM_PATCH_MT7975                "mediatek/mt7986_rom_patch_mt7975.bin"
+
 #define MT7915_EEPROM_DEFAULT          "mediatek/mt7915_eeprom.bin"
 #define MT7915_EEPROM_DEFAULT_DBDC     "mediatek/mt7915_eeprom_dbdc.bin"
 #define MT7916_EEPROM_DEFAULT          "mediatek/mt7916_eeprom.bin"
+#define MT7986_EEPROM_MT7975_DEFAULT           "mediatek/mt7986_eeprom_mt7975.bin"
+#define MT7986_EEPROM_MT7975_DUAL_DEFAULT      "mediatek/mt7986_eeprom_mt7975_dual.bin"
+#define MT7986_EEPROM_MT7976_DEFAULT           "mediatek/mt7986_eeprom_mt7976.bin"
+#define MT7986_EEPROM_MT7976_DEFAULT_DBDC      "mediatek/mt7986_eeprom_mt7976_dbdc.bin"
+#define MT7986_EEPROM_MT7976_DUAL_DEFAULT      "mediatek/mt7986_eeprom_mt7976_dual.bin"
 
 #define MT7915_EEPROM_SIZE             3584
 #define MT7916_EEPROM_SIZE             4096
@@ -49,6 +60,7 @@
 #define MT7915_CFEND_RATE_11B          0x03    /* 11B LP, 11M */
 
 #define MT7915_THERMAL_THROTTLE_MAX    100
+#define MT7915_CDEV_THROTTLE_MAX       99
 
 #define MT7915_SKU_RATE_NUM            161
 
@@ -126,7 +138,9 @@ struct mt7915_sta {
 };
 
 struct mt7915_vif_cap {
-       bool ldpc:1;
+       bool ht_ldpc:1;
+       bool vht_ldpc:1;
+       bool he_ldpc:1;
        bool vht_su_ebfer:1;
        bool vht_su_ebfee:1;
        bool vht_mu_ebfer:1;
@@ -213,16 +227,18 @@ struct mt7915_phy {
        struct mt76_phy *mt76;
        struct mt7915_dev *dev;
 
-       struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES];
+       struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
 
        struct ieee80211_vif *monitor_vif;
 
        struct thermal_cooling_device *cdev;
+       u8 cdev_state;
        u8 throttle_state;
        u32 throttle_temp[2]; /* 0: critical high, 1: maximum */
 
        u32 rxfilter;
        u64 omac_mask;
+       u8 band_idx;
 
        u16 noise;
 
@@ -273,6 +289,7 @@ struct mt7915_dev {
        struct mt7915_phy *rdd2_phy;
 
        u16 chainmask;
+       u16 chainshift;
        u32 hif_idx;
 
        struct work_struct init_work;
@@ -305,6 +322,10 @@ struct mt7915_dev {
                u8 table_mask;
                u8 n_agrt;
        } twt;
+
+       struct reset_control *rstc;
+       void __iomem *dcm;
+       void __iomem *sku;
 };
 
 enum {
@@ -377,11 +398,35 @@ mt7915_ext_phy(struct mt7915_dev *dev)
        return phy->priv;
 }
 
+static inline u32 mt7915_check_adie(struct mt7915_dev *dev, bool sku)
+{
+       u32 mask = sku ? MT_CONNINFRA_SKU_MASK : MT_ADIE_TYPE_MASK;
+
+       if (!is_mt7986(&dev->mt76))
+               return 0;
+
+       return mt76_rr(dev, MT_CONNINFRA_SKU_DEC_ADDR) & mask;
+}
+
 extern const struct ieee80211_ops mt7915_ops;
 extern const struct mt76_testmode_ops mt7915_testmode_ops;
 extern struct pci_driver mt7915_pci_driver;
 extern struct pci_driver mt7915_hif_driver;
+extern struct platform_driver mt7986_wmac_driver;
 
+#ifdef CONFIG_MT7986_WMAC
+int mt7986_wmac_enable(struct mt7915_dev *dev);
+void mt7986_wmac_disable(struct mt7915_dev *dev);
+#else
+static inline int mt7986_wmac_enable(struct mt7915_dev *dev)
+{
+       return 0;
+}
+
+static inline void mt7986_wmac_disable(struct mt7915_dev *dev)
+{
+}
+#endif
 struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
                                     void __iomem *mem_base, u32 device_id);
 irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
index 6a0f681..e5f93c4 100644 (file)
@@ -25,6 +25,11 @@ enum reg_rev {
        INT1_MASK_CSR,
        INT_MCU_CMD_SOURCE,
        INT_MCU_CMD_EVENT,
+       WFDMA0_ADDR,
+       WFDMA0_PCIE1_ADDR,
+       WFDMA_EXT_CSR_ADDR,
+       CBTOP1_PHY_END,
+       INFRA_MCU_ADDR_END,
        __MT_REG_MAX,
 };
 
@@ -98,6 +103,7 @@ enum offs_rev {
        PLE_PG_HIF_GROUP,
        PLE_HIF_PG_INFO,
        AC_OFFSET,
+       ETBF_PAR_RPT0,
        __MT_OFFS_MAX,
 };
 
@@ -218,10 +224,10 @@ enum offs_rev {
 #define MT_ETBF_TX_FB_CPL              GENMASK(31, 16)
 #define MT_ETBF_TX_FB_TRI              GENMASK(15, 0)
 
-#define MT_ETBF_RX_FB_CONT(_band)      MT_WF_ETBF(_band, 0x068)
-#define MT_ETBF_RX_FB_BW               GENMASK(7, 6)
-#define MT_ETBF_RX_FB_NC               GENMASK(5, 3)
-#define MT_ETBF_RX_FB_NR               GENMASK(2, 0)
+#define MT_ETBF_PAR_RPT0(_band)                MT_WF_ETBF(_band, __OFFS(ETBF_PAR_RPT0))
+#define MT_ETBF_PAR_RPT0_FB_BW         GENMASK(7, 6)
+#define MT_ETBF_PAR_RPT0_FB_NC         GENMASK(5, 3)
+#define MT_ETBF_PAR_RPT0_FB_NR         GENMASK(2, 0)
 
 #define MT_ETBF_TX_APP_CNT(_band)      MT_WF_ETBF(_band, 0x0f0)
 #define MT_ETBF_TX_IBF_CNT             GENMASK(31, 16)
@@ -362,11 +368,11 @@ enum offs_rev {
 #define MT_MIB_SDR31(_band)            MT_WF_MIB(_band, __OFFS(MIB_SDR31))
 
 #define MT_MIB_SDR32(_band)            MT_WF_MIB(_band, __OFFS(MIB_SDR32))
-#define MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK       GENMASK(15, 0)
+#define MT_MIB_SDR32_TX_PKT_EBF_CNT    GENMASK(15, 0)
+#define MT_MIB_SDR32_TX_PKT_IBF_CNT    GENMASK(31, 16)
 
 #define MT_MIB_SDR33(_band)            MT_WF_MIB(_band, 0x088)
-#define MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK       GENMASK(15, 0)
-#define MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK_MT7916        GENMASK(31, 16)
+#define MT_MIB_SDR33_TX_PKT_IBF_CNT    GENMASK(15, 0)
 
 #define MT_MIB_SDRMUBF(_band)          MT_WF_MIB(_band, __OFFS(MIB_SDRMUBF))
 #define MT_MIB_MU_BF_TX_CNT            GENMASK(15, 0)
@@ -396,6 +402,19 @@ enum offs_rev {
                                                  ((n) << 2))
 #define MT_MIB_ARNCR_RANGE(val, n)     (((val) >> ((n) << 3)) & GENMASK(7, 0))
 
+#define MT_MIB_BFCR0(_band)            MT_WF_MIB(_band, 0x7b0)
+#define MT_MIB_BFCR0_RX_FB_HT          GENMASK(15, 0)
+#define MT_MIB_BFCR0_RX_FB_VHT         GENMASK(31, 16)
+
+#define MT_MIB_BFCR1(_band)            MT_WF_MIB(_band, 0x7b4)
+#define MT_MIB_BFCR1_RX_FB_HE          GENMASK(15, 0)
+
+#define MT_MIB_BFCR2(_band)            MT_WF_MIB(_band, 0x7b8)
+#define MT_MIB_BFCR2_BFEE_TX_FB_TRIG   GENMASK(15, 0)
+
+#define MT_MIB_BFCR7(_band)            MT_WF_MIB(_band, 0x7cc)
+#define MT_MIB_BFCR7_BFEE_TX_FB_CPL    GENMASK(15, 0)
+
 /* WTBLON TOP */
 #define MT_WTBLON_TOP_BASE             0x820d4000
 #define MT_WTBLON_TOP(ofs)             (MT_WTBLON_TOP_BASE + (ofs))
@@ -497,7 +516,7 @@ enum offs_rev {
 #define MT_WF_RMAC_MIB_RXTIME_CLR      BIT(31)
 
 /* WFDMA0 */
-#define MT_WFDMA0_BASE                 0xd4000
+#define MT_WFDMA0_BASE                 __REG(WFDMA0_ADDR)
 #define MT_WFDMA0(ofs)                 (MT_WFDMA0_BASE + (ofs))
 
 #define MT_WFDMA0_RST                  MT_WFDMA0(0x100)
@@ -545,7 +564,7 @@ enum offs_rev {
 #define MT_WFDMA1_PRI_DLY_INT_CFG0     MT_WFDMA1(0x2f0)
 
 /* WFDMA CSR */
-#define MT_WFDMA_EXT_CSR_BASE          0xd7000
+#define MT_WFDMA_EXT_CSR_BASE          __REG(WFDMA_EXT_CSR_ADDR)
 #define MT_WFDMA_EXT_CSR(ofs)          (MT_WFDMA_EXT_CSR_BASE + (ofs))
 
 #define MT_WFDMA_HOST_CONFIG           MT_WFDMA_EXT_CSR(0x30)
@@ -559,7 +578,7 @@ enum offs_rev {
 #define MT_PCIE_RECOG_ID_SEM           BIT(31)
 
 /* WFDMA0 PCIE1 */
-#define MT_WFDMA0_PCIE1_BASE           0xd8000
+#define MT_WFDMA0_PCIE1_BASE           __REG(WFDMA0_PCIE1_ADDR)
 #define MT_WFDMA0_PCIE1(ofs)           (MT_WFDMA0_PCIE1_BASE + (ofs))
 
 #define MT_WFDMA0_PCIE1_BUSY_ENA       MT_WFDMA0_PCIE1(0x13c)
@@ -662,6 +681,16 @@ enum offs_rev {
 #define MT_TOP_PWR_HW_CTRL             BIT(4)
 #define MT_TOP_PWR_PWR_ON              BIT(7)
 
+#define MT_TOP_RGU_SYSRAM_PDN          (MT_TOP_RGU_BASE + 0x050)
+#define MT_TOP_RGU_SYSRAM_SLP          (MT_TOP_RGU_BASE + 0x054)
+#define MT_TOP_WFSYS_PWR               (MT_TOP_RGU_BASE + 0x010)
+#define MT_TOP_PWR_EN_MASK             BIT(7)
+#define MT_TOP_PWR_ACK_MASK            BIT(6)
+#define MT_TOP_PWR_KEY_MASK            GENMASK(31, 16)
+
+#define MT7986_TOP_WM_RESET            (MT_TOP_RGU_BASE + 0x120)
+#define MT7986_TOP_WM_RESET_MASK       BIT(0)
+
 /* l1/l2 remap */
 #define MT_HIF_REMAP_L1                        0xf11ac
 #define MT_HIF_REMAP_L1_MT7916         0xfe260
@@ -685,9 +714,203 @@ enum offs_rev {
 #define MT_WFSYS1_PHY_START            0x18800000
 #define MT_WFSYS1_PHY_END              0x18bfffff
 #define MT_CBTOP1_PHY_START            0x70000000
-#define MT_CBTOP1_PHY_END              0x7fffffff
+#define MT_CBTOP1_PHY_END              __REG(CBTOP1_PHY_END)
 #define MT_CBTOP2_PHY_START            0xf0000000
 #define MT_CBTOP2_PHY_END              0xffffffff
+#define MT_INFRA_MCU_START             0x7c000000
+#define MT_INFRA_MCU_END               __REG(INFRA_MCU_ADDR_END)
+#define MT_CONN_INFRA_OFFSET(p)                ((p) - MT_INFRA_BASE)
+
+/* CONN INFRA CFG */
+#define MT_CONN_INFRA_BASE             0x18001000
+#define MT_CONN_INFRA(ofs)             (MT_CONN_INFRA_BASE + (ofs))
+
+#define MT_CONN_INFRA_EFUSE            MT_CONN_INFRA(0x020)
+
+#define MT_CONN_INFRA_ADIE_RESET       MT_CONN_INFRA(0x030)
+#define MT_CONN_INFRA_ADIE1_RESET_MASK BIT(0)
+#define MT_CONN_INFRA_ADIE2_RESET_MASK BIT(2)
+
+#define MT_CONN_INFRA_OSC_RC_EN                MT_CONN_INFRA(0x380)
+
+#define MT_CONN_INFRA_OSC_CTRL         MT_CONN_INFRA(0x300)
+#define MT_CONN_INFRA_OSC_RC_EN_MASK   BIT(7)
+#define MT_CONN_INFRA_OSC_STB_TIME_MASK        GENMASK(23, 0)
+
+#define MT_CONN_INFRA_HW_CTRL          MT_CONN_INFRA(0x200)
+#define MT_CONN_INFRA_HW_CTRL_MASK     BIT(0)
+
+#define MT_CONN_INFRA_WF_SLP_PROT      MT_CONN_INFRA(0x540)
+#define MT_CONN_INFRA_WF_SLP_PROT_MASK BIT(0)
+
+#define MT_CONN_INFRA_WF_SLP_PROT_RDY  MT_CONN_INFRA(0x544)
+#define MT_CONN_INFRA_CONN_WF_MASK     (BIT(29) | BIT(31))
+#define MT_CONN_INFRA_CONN             (BIT(25) | BIT(29) | BIT(31))
+
+#define MT_CONN_INFRA_EMI_REQ          MT_CONN_INFRA(0x414)
+#define MT_CONN_INFRA_EMI_REQ_MASK     BIT(0)
+#define MT_CONN_INFRA_INFRA_REQ_MASK   BIT(5)
+
+/* AFE */
+#define MT_AFE_CTRL_BASE(_band)                (0x18003000 + ((_band) << 19))
+#define MT_AFE_CTRL(_band, ofs)                (MT_AFE_CTRL_BASE(_band) + (ofs))
+
+#define MT_AFE_DIG_EN_01(_band)                MT_AFE_CTRL(_band, 0x00)
+#define MT_AFE_DIG_EN_02(_band)                MT_AFE_CTRL(_band, 0x04)
+#define MT_AFE_DIG_EN_03(_band)                MT_AFE_CTRL(_band, 0x08)
+#define MT_AFE_DIG_TOP_01(_band)       MT_AFE_CTRL(_band, 0x0c)
+
+#define MT_AFE_PLL_STB_TIME(_band)     MT_AFE_CTRL(_band, 0xf4)
+#define MT_AFE_PLL_STB_TIME_MASK       (GENMASK(30, 16) | GENMASK(14, 0))
+#define MT_AFE_PLL_STB_TIME_VAL                (FIELD_PREP(GENMASK(30, 16), 0x4bc) | \
+                                        FIELD_PREP(GENMASK(14, 0), 0x7e4))
+#define MT_AFE_BPLL_CFG_MASK           GENMASK(7, 6)
+#define MT_AFE_WPLL_CFG_MASK           GENMASK(1, 0)
+#define MT_AFE_MCU_WPLL_CFG_MASK       GENMASK(3, 2)
+#define MT_AFE_MCU_BPLL_CFG_MASK       GENMASK(17, 16)
+#define MT_AFE_PLL_CFG_MASK            (MT_AFE_BPLL_CFG_MASK | \
+                                        MT_AFE_WPLL_CFG_MASK | \
+                                        MT_AFE_MCU_WPLL_CFG_MASK | \
+                                        MT_AFE_MCU_BPLL_CFG_MASK)
+#define MT_AFE_PLL_CFG_VAL             (FIELD_PREP(MT_AFE_BPLL_CFG_MASK, 0x1) | \
+                                        FIELD_PREP(MT_AFE_WPLL_CFG_MASK, 0x2) | \
+                                        FIELD_PREP(MT_AFE_MCU_WPLL_CFG_MASK, 0x1) | \
+                                        FIELD_PREP(MT_AFE_MCU_BPLL_CFG_MASK, 0x2))
+
+#define MT_AFE_DIG_TOP_01_MASK         GENMASK(18, 15)
+#define MT_AFE_DIG_TOP_01_VAL          FIELD_PREP(MT_AFE_DIG_TOP_01_MASK, 0x9)
+
+#define MT_AFE_RG_WBG_EN_RCK_MASK      BIT(0)
+#define MT_AFE_RG_WBG_EN_BPLL_UP_MASK  BIT(21)
+#define MT_AFE_RG_WBG_EN_WPLL_UP_MASK  BIT(20)
+#define MT_AFE_RG_WBG_EN_PLL_UP_MASK   (MT_AFE_RG_WBG_EN_BPLL_UP_MASK | \
+                                        MT_AFE_RG_WBG_EN_WPLL_UP_MASK)
+#define MT_AFE_RG_WBG_EN_TXCAL_MASK    GENMASK(21, 17)
+
+#define MT_ADIE_SLP_CTRL_BASE(_band)   (0x18005000 + ((_band) << 19))
+#define MT_ADIE_SLP_CTRL(_band, ofs)   (MT_ADIE_SLP_CTRL_BASE(_band) + (ofs))
+
+#define MT_ADIE_SLP_CTRL_CK0(_band)    MT_ADIE_SLP_CTRL(_band, 0x120)
+
+/* ADIE */
+#define MT_ADIE_CHIP_ID                        0x02c
+#define MT_ADIE_CHIP_ID_MASK           GENMASK(31, 16)
+#define MT_ADIE_IDX0                   GENMASK(15, 0)
+#define MT_ADIE_IDX1                   GENMASK(31, 16)
+
+#define MT_ADIE_RG_TOP_THADC_BG                0x034
+#define MT_ADIE_VRPI_SEL_CR_MASK       GENMASK(15, 12)
+#define MT_ADIE_VRPI_SEL_EFUSE_MASK    GENMASK(6, 3)
+
+#define MT_ADIE_RG_TOP_THADC           0x038
+#define MT_ADIE_PGA_GAIN_MASK          GENMASK(25, 23)
+#define MT_ADIE_PGA_GAIN_EFUSE_MASK    GENMASK(2, 0)
+#define MT_ADIE_LDO_CTRL_MASK          GENMASK(27, 26)
+#define MT_ADIE_LDO_CTRL_EFUSE_MASK    GENMASK(6, 5)
+
+#define MT_AFE_RG_ENCAL_WBTAC_IF_SW    0x070
+#define MT_ADIE_EFUSE_RDATA0           0x130
+
+#define MT_ADIE_EFUSE2_CTRL            0x148
+#define MT_ADIE_EFUSE_CTRL_MASK                BIT(1)
+
+#define MT_ADIE_EFUSE_CFG              0x144
+#define MT_ADIE_EFUSE_MODE_MASK                GENMASK(7, 6)
+#define MT_ADIE_EFUSE_ADDR_MASK                GENMASK(25, 16)
+#define MT_ADIE_EFUSE_VALID_MASK       BIT(29)
+#define MT_ADIE_EFUSE_KICK_MASK                BIT(30)
+
+#define MT_ADIE_THADC_ANALOG           0x3a6
+
+#define MT_ADIE_THADC_SLOP             0x3a7
+#define MT_ADIE_ANA_EN_MASK            BIT(7)
+
+#define MT_ADIE_7975_XTAL_CAL          0x3a1
+#define MT_ADIE_TRIM_MASK              GENMASK(6, 0)
+#define MT_ADIE_EFUSE_TRIM_MASK                GENMASK(5, 0)
+#define MT_ADIE_XO_TRIM_EN_MASK                BIT(7)
+#define MT_ADIE_XTAL_DECREASE_MASK     BIT(6)
+
+#define MT_ADIE_7975_XO_TRIM2          0x3a2
+#define MT_ADIE_7975_XO_TRIM3          0x3a3
+#define MT_ADIE_7975_XO_TRIM4          0x3a4
+#define MT_ADIE_7975_XTAL_EN           0x3a5
+
+#define MT_ADIE_XO_TRIM_FLOW           0x3ac
+#define MT_ADIE_XTAL_AXM_80M_OSC       0x390
+#define MT_ADIE_XTAL_AXM_40M_OSC       0x391
+#define MT_ADIE_XTAL_TRIM1_80M_OSC     0x398
+#define MT_ADIE_XTAL_TRIM1_40M_OSC     0x399
+#define MT_ADIE_WRI_CK_SEL             0x4ac
+#define MT_ADIE_RG_STRAP_PIN_IN                0x4fc
+#define MT_ADIE_XTAL_C1                        0x654
+#define MT_ADIE_XTAL_C2                        0x658
+#define MT_ADIE_RG_XO_01               0x65c
+#define MT_ADIE_RG_XO_03               0x664
+
+#define MT_ADIE_CLK_EN                 0xa00
+
+#define MT_ADIE_7975_XTAL              0xa18
+#define MT_ADIE_7975_XTAL_EN_MASK      BIT(29)
+
+#define MT_ADIE_7975_COCLK             0xa1c
+#define MT_ADIE_7975_XO_2              0xa84
+#define MT_ADIE_7975_XO_2_FIX_EN       BIT(31)
+
+#define MT_ADIE_7975_XO_CTRL2          0xa94
+#define MT_ADIE_7975_XO_CTRL2_C1_MASK  GENMASK(26, 20)
+#define MT_ADIE_7975_XO_CTRL2_C2_MASK  GENMASK(18, 12)
+#define MT_ADIE_7975_XO_CTRL2_MASK     (MT_ADIE_7975_XO_CTRL2_C1_MASK | \
+                                        MT_ADIE_7975_XO_CTRL2_C2_MASK)
+
+#define MT_ADIE_7975_XO_CTRL6          0xaa4
+#define MT_ADIE_7975_XO_CTRL6_MASK     BIT(16)
+
+/* TOP SPI */
+#define MT_TOP_SPI_ADIE_BASE(_band)    (0x18004000 + ((_band) << 19))
+#define MT_TOP_SPI_ADIE(_band, ofs)    (MT_TOP_SPI_ADIE_BASE(_band) + (ofs))
+
+#define MT_TOP_SPI_BUSY_CR(_band)      MT_TOP_SPI_ADIE(_band, 0)
+#define MT_TOP_SPI_POLLING_BIT         BIT(5)
+
+#define MT_TOP_SPI_ADDR_CR(_band)      MT_TOP_SPI_ADIE(_band, 0x50)
+#define MT_TOP_SPI_READ_ADDR_FORMAT    (BIT(12) | BIT(13) | BIT(15))
+#define MT_TOP_SPI_WRITE_ADDR_FORMAT   (BIT(13) | BIT(15))
+
+#define MT_TOP_SPI_WRITE_DATA_CR(_band)        MT_TOP_SPI_ADIE(_band, 0x54)
+#define MT_TOP_SPI_READ_DATA_CR(_band) MT_TOP_SPI_ADIE(_band, 0x58)
+
+/* CONN INFRA CKGEN */
+#define MT_INFRA_CKGEN_BASE            0x18009000
+#define MT_INFRA_CKGEN(ofs)            (MT_INFRA_CKGEN_BASE + (ofs))
+
+#define MT_INFRA_CKGEN_BUS             MT_INFRA_CKGEN(0xa00)
+#define MT_INFRA_CKGEN_BUS_CLK_SEL_MASK        BIT(23)
+#define MT_INFRA_CKGEN_BUS_RDY_SEL_MASK        BIT(29)
+
+#define MT_INFRA_CKGEN_BUS_WPLL_DIV_1  MT_INFRA_CKGEN(0x008)
+#define MT_INFRA_CKGEN_BUS_WPLL_DIV_2  MT_INFRA_CKGEN(0x00c)
+
+#define MT_INFRA_CKGEN_RFSPI_WPLL_DIV  MT_INFRA_CKGEN(0x040)
+#define MT_INFRA_CKGEN_DIV_SEL_MASK    GENMASK(7, 2)
+#define MT_INFRA_CKGEN_DIV_EN_MASK     BIT(0)
+
+/* CONN INFRA BUS */
+#define MT_INFRA_BUS_BASE              0x1800e000
+#define MT_INFRA_BUS(ofs)              (MT_INFRA_BUS_BASE + (ofs))
+
+#define MT_INFRA_BUS_OFF_TIMEOUT       MT_INFRA_BUS(0x300)
+#define MT_INFRA_BUS_TIMEOUT_LIMIT_MASK        GENMASK(14, 7)
+#define MT_INFRA_BUS_TIMEOUT_EN_MASK   GENMASK(3, 0)
+
+#define MT_INFRA_BUS_ON_TIMEOUT                MT_INFRA_BUS(0x31c)
+#define MT_INFRA_BUS_EMI_START         MT_INFRA_BUS(0x360)
+#define MT_INFRA_BUS_EMI_END           MT_INFRA_BUS(0x364)
+
+/* CONN_INFRA_SKU */
+#define MT_CONNINFRA_SKU_DEC_ADDR      0x18050000
+#define MT_CONNINFRA_SKU_MASK          GENMASK(15, 0)
+#define MT_ADIE_TYPE_MASK              BIT(1)
 
 /* FW MODE SYNC */
 #define MT_SWDEF_MODE                  0x41f23c
@@ -746,6 +969,67 @@ enum offs_rev {
 #define MT_HW_REV                      0x70010204
 #define MT_WF_SUBSYS_RST               0x70002600
 
+#define MT_TOP_WFSYS_WAKEUP            MT_TOP(0x1a4)
+#define MT_TOP_WFSYS_WAKEUP_MASK       BIT(0)
+
+#define MT_TOP_MCU_EMI_BASE            MT_TOP(0x1c4)
+#define MT_TOP_MCU_EMI_BASE_MASK       GENMASK(19, 0)
+
+#define MT_TOP_CONN_INFRA_WAKEUP       MT_TOP(0x1a0)
+#define MT_TOP_CONN_INFRA_WAKEUP_MASK  BIT(0)
+
+#define MT_TOP_WFSYS_RESET_STATUS      MT_TOP(0x2cc)
+#define MT_TOP_WFSYS_RESET_STATUS_MASK BIT(30)
+
+/* SEMA */
+#define MT_SEMA_BASE                   0x18070000
+#define MT_SEMA(ofs)                   (MT_SEMA_BASE + (ofs))
+
+#define MT_SEMA_RFSPI_STATUS           (MT_SEMA(0x2000) + (11 * 4))
+#define MT_SEMA_RFSPI_RELEASE          (MT_SEMA(0x2200) + (11 * 4))
+#define MT_SEMA_RFSPI_STATUS_MASK      BIT(1)
+
+/* MCU BUS */
+#define MT_MCU_BUS_BASE                        0x18400000
+#define MT_MCU_BUS(ofs)                        (MT_MCU_BUS_BASE + (ofs))
+
+#define MT_MCU_BUS_TIMEOUT             MT_MCU_BUS(0xf0440)
+#define MT_MCU_BUS_TIMEOUT_SET_MASK    GENMASK(7, 0)
+#define MT_MCU_BUS_TIMEOUT_CG_EN_MASK  BIT(28)
+#define MT_MCU_BUS_TIMEOUT_EN_MASK     BIT(31)
+
+#define MT_MCU_BUS_REMAP               MT_MCU_BUS(0x120)
+
+/* TOP CFG */
+#define MT_TOP_CFG_BASE                        0x184b0000
+#define MT_TOP_CFG(ofs)                        (MT_TOP_CFG_BASE + (ofs))
+
+#define MT_TOP_CFG_IP_VERSION_ADDR     MT_TOP_CFG(0x010)
+
+/* TOP CFG ON */
+#define MT_TOP_CFG_ON_BASE             0x184c1000
+#define MT_TOP_CFG_ON(ofs)             (MT_TOP_CFG_ON_BASE + (ofs))
+
+#define MT_TOP_CFG_ON_ROM_IDX          MT_TOP_CFG_ON(0x604)
+
+/* SLP CTRL */
+#define MT_SLP_BASE                    0x184c3000
+#define MT_SLP(ofs)                    (MT_SLP_BASE + (ofs))
+
+#define MT_SLP_STATUS                  MT_SLP(0x00c)
+#define MT_SLP_WFDMA2CONN_MASK         (BIT(21) | BIT(23))
+#define MT_SLP_CTRL_EN_MASK            BIT(0)
+#define MT_SLP_CTRL_BSY_MASK           BIT(1)
+
+/* MCU BUS DBG */
+#define MT_MCU_BUS_DBG_BASE            0x18500000
+#define MT_MCU_BUS_DBG(ofs)            (MT_MCU_BUS_DBG_BASE + (ofs))
+
+#define MT_MCU_BUS_DBG_TIMEOUT         MT_MCU_BUS_DBG(0x0)
+#define MT_MCU_BUS_DBG_TIMEOUT_SET_MASK GENMASK(31, 16)
+#define MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK BIT(3)
+#define MT_MCU_BUS_DBG_TIMEOUT_EN_MASK BIT(2)
+
 /* PCIE MAC */
 #define MT_PCIE_MAC_BASE               0x74030000
 #define MT_PCIE_MAC(ofs)               (MT_PCIE_MAC_BASE + (ofs))
@@ -761,18 +1045,23 @@ enum offs_rev {
 #define MT_WF_PP_TOP_RXQ_WFDMA_CF_5    MT_WF_PP_TOP(0x0e8)
 #define MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK       BIT(6)
 
-#define MT_WF_IRPI_BASE                        0x83006000
-#define MT_WF_IRPI(ofs)                        (MT_WF_IRPI_BASE + ((ofs) << 16))
+#define MT_WF_IRPI_BASE                        0x83000000
+#define MT_WF_IRPI(ofs)                        (MT_WF_IRPI_BASE + (ofs))
+
+#define MT_WF_IRPI_NSS(phy, nss)       MT_WF_IRPI(0x6000 + ((phy) << 20) + ((nss) << 16))
+#define MT_WF_IRPI_NSS_MT7916(phy, nss)        MT_WF_IRPI(0x1000 + ((phy) << 20) + ((nss) << 16))
 
-/* PHY: band 0(0x83080000), band 1(0x83090000) */
+/* PHY */
 #define MT_WF_PHY_BASE                 0x83080000
 #define MT_WF_PHY(ofs)                 (MT_WF_PHY_BASE + (ofs))
 
 #define MT_WF_PHY_RX_CTRL1(_phy)       MT_WF_PHY(0x2004 + ((_phy) << 16))
+#define MT_WF_PHY_RX_CTRL1_MT7916(_phy)        MT_WF_PHY(0x2004 + ((_phy) << 20))
 #define MT_WF_PHY_RX_CTRL1_IPI_EN      GENMASK(2, 0)
 #define MT_WF_PHY_RX_CTRL1_STSCNT_EN   GENMASK(11, 9)
 
 #define MT_WF_PHY_RXTD12(_phy)         MT_WF_PHY(0x8230 + ((_phy) << 16))
+#define MT_WF_PHY_RXTD12_MT7916(_phy)  MT_WF_PHY(0x8230 + ((_phy) << 20))
 #define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY      BIT(18)
 #define MT_WF_PHY_RXTD12_IRPI_SW_CLR           BIT(29)
 
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
new file mode 100644 (file)
index 0000000..3028c02
--- /dev/null
@@ -0,0 +1,1212 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_gpio.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+
+#include "mt7915.h"
+
+/* INFRACFG */
+#define MT_INFRACFG_CONN2AP_SLPPROT    0x0d0
+#define MT_INFRACFG_AP2CONN_SLPPROT    0x0d4
+
+#define MT_INFRACFG_RX_EN_MASK         BIT(16)
+#define MT_INFRACFG_TX_RDY_MASK                BIT(4)
+#define MT_INFRACFG_TX_EN_MASK         BIT(0)
+
+/* TOP POS */
+#define MT_TOP_POS_FAST_CTRL           0x114
+#define MT_TOP_POS_FAST_EN_MASK                BIT(3)
+
+#define MT_TOP_POS_SKU                 0x21c
+#define MT_TOP_POS_SKU_MASK            GENMASK(31, 28)
+#define MT_TOP_POS_SKU_ADIE_DBDC_MASK  BIT(2)
+
+enum {
+       ADIE_SB,
+       ADIE_DBDC
+};
+
+static int
+mt76_wmac_spi_read(struct mt7915_dev *dev, u8 adie, u32 addr, u32 *val)
+{
+       int ret;
+       u32 cur;
+
+       ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+                               USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                               dev, MT_TOP_SPI_BUSY_CR(adie));
+       if (ret)
+               return ret;
+
+       mt76_wr(dev, MT_TOP_SPI_ADDR_CR(adie),
+               MT_TOP_SPI_READ_ADDR_FORMAT | addr);
+       mt76_wr(dev, MT_TOP_SPI_WRITE_DATA_CR(adie), 0);
+
+       ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+                               USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                               dev, MT_TOP_SPI_BUSY_CR(adie));
+       if (ret)
+               return ret;
+
+       *val = mt76_rr(dev, MT_TOP_SPI_READ_DATA_CR(adie));
+
+       return 0;
+}
+
+static int
+mt76_wmac_spi_write(struct mt7915_dev *dev, u8 adie, u32 addr, u32 val)
+{
+       int ret;
+       u32 cur;
+
+       ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+                               USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                               dev, MT_TOP_SPI_BUSY_CR(adie));
+       if (ret)
+               return ret;
+
+       mt76_wr(dev, MT_TOP_SPI_ADDR_CR(adie),
+               MT_TOP_SPI_WRITE_ADDR_FORMAT | addr);
+       mt76_wr(dev, MT_TOP_SPI_WRITE_DATA_CR(adie), val);
+
+       return read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+                                USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                                dev, MT_TOP_SPI_BUSY_CR(adie));
+}
+
+static int
+mt76_wmac_spi_rmw(struct mt7915_dev *dev, u8 adie,
+                 u32 addr, u32 mask, u32 val)
+{
+       u32 cur, ret;
+
+       ret = mt76_wmac_spi_read(dev, adie, addr, &cur);
+       if (ret)
+               return ret;
+
+       cur &= ~mask;
+       cur |= val;
+
+       return mt76_wmac_spi_write(dev, adie, addr, cur);
+}
+
+static int
+mt7986_wmac_adie_efuse_read(struct mt7915_dev *dev, u8 adie,
+                           u32 addr, u32 *data)
+{
+       int ret, temp;
+       u32 val, mask;
+
+       ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_EFUSE_CFG,
+                                 MT_ADIE_EFUSE_CTRL_MASK);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_EFUSE2_CTRL, BIT(30), 0x0);
+       if (ret)
+               return ret;
+
+       mask = (MT_ADIE_EFUSE_MODE_MASK | MT_ADIE_EFUSE_ADDR_MASK |
+               MT_ADIE_EFUSE_KICK_MASK);
+       val = FIELD_PREP(MT_ADIE_EFUSE_MODE_MASK, 0) |
+             FIELD_PREP(MT_ADIE_EFUSE_ADDR_MASK, addr) |
+             FIELD_PREP(MT_ADIE_EFUSE_KICK_MASK, 1);
+       ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_EFUSE2_CTRL, mask, val);
+       if (ret)
+               return ret;
+
+       ret = read_poll_timeout(mt76_wmac_spi_read, temp,
+                               !temp && !FIELD_GET(MT_ADIE_EFUSE_KICK_MASK, val),
+                               USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                               dev, adie, MT_ADIE_EFUSE2_CTRL, &val);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_EFUSE2_CTRL, &val);
+       if (ret)
+               return ret;
+
+       if (FIELD_GET(MT_ADIE_EFUSE_VALID_MASK, val) == 1)
+               ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_EFUSE_RDATA0,
+                                        data);
+
+       return ret;
+}
+
+static inline void mt76_wmac_spi_lock(struct mt7915_dev *dev)
+{
+       u32 cur;
+
+       read_poll_timeout(mt76_rr, cur,
+                         FIELD_GET(MT_SEMA_RFSPI_STATUS_MASK, cur),
+                         1000, 1000 * MSEC_PER_SEC, false, dev,
+                         MT_SEMA_RFSPI_STATUS);
+}
+
+static inline void mt76_wmac_spi_unlock(struct mt7915_dev *dev)
+{
+       mt76_wr(dev, MT_SEMA_RFSPI_RELEASE, 1);
+}
+
+static u32 mt76_wmac_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+{
+       val |= readl(base + offset) & ~mask;
+       writel(val, base + offset);
+
+       return val;
+}
+
+static u8 mt7986_wmac_check_adie_type(struct mt7915_dev *dev)
+{
+       u32 val;
+
+       val = readl(dev->sku + MT_TOP_POS_SKU);
+
+       return FIELD_GET(MT_TOP_POS_SKU_ADIE_DBDC_MASK, val);
+}
+
+static int mt7986_wmac_consys_reset(struct mt7915_dev *dev, bool enable)
+{
+       if (!enable)
+               return reset_control_assert(dev->rstc);
+
+       mt76_wmac_rmw(dev->sku, MT_TOP_POS_FAST_CTRL,
+                     MT_TOP_POS_FAST_EN_MASK,
+                     FIELD_PREP(MT_TOP_POS_FAST_EN_MASK, 0x1));
+
+       return reset_control_deassert(dev->rstc);
+}
+
+static int mt7986_wmac_gpio_setup(struct mt7915_dev *dev)
+{
+       struct pinctrl_state *state;
+       struct pinctrl *pinctrl;
+       int ret;
+       u8 type;
+
+       type = mt7986_wmac_check_adie_type(dev);
+       pinctrl = devm_pinctrl_get(dev->mt76.dev);
+       if (IS_ERR(pinctrl))
+               return PTR_ERR(pinctrl);
+
+       switch (type) {
+       case ADIE_SB:
+               state = pinctrl_lookup_state(pinctrl, "default");
+               if (IS_ERR_OR_NULL(state))
+                       return -EINVAL;
+               break;
+       case ADIE_DBDC:
+               state = pinctrl_lookup_state(pinctrl, "dbdc");
+               if (IS_ERR_OR_NULL(state))
+                       return -EINVAL;
+               break;
+       }
+
+       ret = pinctrl_select_state(pinctrl, state);
+       if (ret)
+               return ret;
+
+       usleep_range(500, 1000);
+
+       return 0;
+}
+
+static int mt7986_wmac_consys_lockup(struct mt7915_dev *dev, bool enable)
+{
+       int ret;
+       u32 cur;
+
+       mt76_wmac_rmw(dev->dcm, MT_INFRACFG_AP2CONN_SLPPROT,
+                     MT_INFRACFG_RX_EN_MASK,
+                     FIELD_PREP(MT_INFRACFG_RX_EN_MASK, enable));
+       ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_RX_EN_MASK),
+                               USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                               dev->dcm + MT_INFRACFG_AP2CONN_SLPPROT);
+       if (ret)
+               return ret;
+
+       mt76_wmac_rmw(dev->dcm, MT_INFRACFG_AP2CONN_SLPPROT,
+                     MT_INFRACFG_TX_EN_MASK,
+                     FIELD_PREP(MT_INFRACFG_TX_EN_MASK, enable));
+       ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_TX_RDY_MASK),
+                               USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                               dev->dcm + MT_INFRACFG_AP2CONN_SLPPROT);
+       if (ret)
+               return ret;
+
+       mt76_wmac_rmw(dev->dcm, MT_INFRACFG_CONN2AP_SLPPROT,
+                     MT_INFRACFG_RX_EN_MASK,
+                     FIELD_PREP(MT_INFRACFG_RX_EN_MASK, enable));
+       mt76_wmac_rmw(dev->dcm, MT_INFRACFG_CONN2AP_SLPPROT,
+                     MT_INFRACFG_TX_EN_MASK,
+                     FIELD_PREP(MT_INFRACFG_TX_EN_MASK, enable));
+
+       return 0;
+}
+
+static int mt7986_wmac_coninfra_check(struct mt7915_dev *dev)
+{
+       u32 cur;
+
+       return read_poll_timeout(mt76_rr, cur, (cur == 0x02070000),
+                                USEC_PER_MSEC, 50 * USEC_PER_MSEC,
+                                false, dev, MT_CONN_INFRA_BASE);
+}
+
+static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev)
+{
+       struct device *pdev = dev->mt76.dev;
+       struct reserved_mem *rmem;
+       struct device_node *np;
+       u32 val;
+
+       np = of_parse_phandle(pdev->of_node, "memory-region", 0);
+       if (!np)
+               return -EINVAL;
+
+       rmem = of_reserved_mem_lookup(np);
+       if (!rmem)
+               return -EINVAL;
+
+       val = (rmem->base >> 16) & MT_TOP_MCU_EMI_BASE_MASK;
+
+       /* Set conninfra subsys PLL check */
+       mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+                      MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1);
+       mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+                      MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1);
+
+       mt76_rmw_field(dev, MT_TOP_MCU_EMI_BASE,
+                      MT_TOP_MCU_EMI_BASE_MASK, val);
+
+       mt76_wr(dev, MT_INFRA_BUS_EMI_START, rmem->base);
+       mt76_wr(dev, MT_INFRA_BUS_EMI_END, rmem->size);
+
+       mt76_rr(dev, MT_CONN_INFRA_EFUSE);
+
+       /* Set conninfra sysram */
+       mt76_wr(dev, MT_TOP_RGU_SYSRAM_PDN, 0);
+       mt76_wr(dev, MT_TOP_RGU_SYSRAM_SLP, 1);
+
+       return 0;
+}
+
+static int mt7986_wmac_sku_setup(struct mt7915_dev *dev, u32 *adie_type)
+{
+       int ret;
+       u32 adie_main, adie_ext;
+
+       mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET,
+                      MT_CONN_INFRA_ADIE1_RESET_MASK, 0x1);
+       mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET,
+                      MT_CONN_INFRA_ADIE2_RESET_MASK, 0x1);
+
+       mt76_wmac_spi_lock(dev);
+
+       ret = mt76_wmac_spi_read(dev, 0, MT_ADIE_CHIP_ID, &adie_main);
+       if (ret)
+               goto out;
+
+       ret = mt76_wmac_spi_read(dev, 1, MT_ADIE_CHIP_ID, &adie_ext);
+       if (ret)
+               goto out;
+
+       *adie_type = FIELD_GET(MT_ADIE_CHIP_ID_MASK, adie_main) |
+                    (MT_ADIE_CHIP_ID_MASK & adie_ext);
+
+out:
+       mt76_wmac_spi_unlock(dev);
+
+       return 0;
+}
+
+static inline u16 mt7986_adie_idx(u8 adie, u32 adie_type)
+{
+       if (adie == 0)
+               return u32_get_bits(adie_type, MT_ADIE_IDX0);
+       else
+               return u32_get_bits(adie_type, MT_ADIE_IDX1);
+}
+
+static inline bool is_7975(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+       return mt7986_adie_idx(adie, adie_type) == 0x7975;
+}
+
+static inline bool is_7976(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+       return mt7986_adie_idx(adie, adie_type) == 0x7976;
+}
+
+static int mt7986_wmac_adie_thermal_cal(struct mt7915_dev *dev, u8 adie)
+{
+       int ret;
+       u32 data, val;
+
+       ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_THADC_ANALOG,
+                                         &data);
+       if (ret || FIELD_GET(MT_ADIE_ANA_EN_MASK, data)) {
+               val = FIELD_GET(MT_ADIE_VRPI_SEL_EFUSE_MASK, data);
+               ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC_BG,
+                                       MT_ADIE_VRPI_SEL_CR_MASK,
+                                       FIELD_PREP(MT_ADIE_VRPI_SEL_CR_MASK, val));
+               if (ret)
+                       return ret;
+
+               val = FIELD_GET(MT_ADIE_PGA_GAIN_EFUSE_MASK, data);
+               ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC,
+                                       MT_ADIE_PGA_GAIN_MASK,
+                                       FIELD_PREP(MT_ADIE_PGA_GAIN_MASK, val));
+               if (ret)
+                       return ret;
+       }
+
+       ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_THADC_SLOP,
+                                         &data);
+       if (ret || FIELD_GET(MT_ADIE_ANA_EN_MASK, data)) {
+               val = FIELD_GET(MT_ADIE_LDO_CTRL_EFUSE_MASK, data);
+
+               return mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC,
+                                        MT_ADIE_LDO_CTRL_MASK,
+                                        FIELD_PREP(MT_ADIE_LDO_CTRL_MASK, val));
+       }
+
+       return 0;
+}
+
+static int
+mt7986_read_efuse_xo_trim_7976(struct mt7915_dev *dev, u8 adie,
+                              bool is_40m, int *result)
+{
+       int ret;
+       u32 data, addr;
+
+       addr = is_40m ? MT_ADIE_XTAL_AXM_40M_OSC : MT_ADIE_XTAL_AXM_80M_OSC;
+       ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+       if (ret)
+               return ret;
+
+       if (!FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data)) {
+               *result = 64;
+       } else {
+               *result = FIELD_GET(MT_ADIE_TRIM_MASK, data);
+               addr = is_40m ? MT_ADIE_XTAL_TRIM1_40M_OSC :
+                               MT_ADIE_XTAL_TRIM1_80M_OSC;
+               ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+               if (ret)
+                       return ret;
+
+               if (FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data) &&
+                   FIELD_GET(MT_ADIE_XTAL_DECREASE_MASK, data))
+                       *result -= FIELD_GET(MT_ADIE_EFUSE_TRIM_MASK, data);
+               else if (FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data))
+                       *result += FIELD_GET(MT_ADIE_EFUSE_TRIM_MASK, data);
+
+               *result = max(0, min(127, *result));
+       }
+
+       return 0;
+}
+
+static int mt7986_wmac_adie_xtal_trim_7976(struct mt7915_dev *dev, u8 adie)
+{
+       int ret, trim_80m, trim_40m;
+       u32 data, val, mode;
+
+       ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_XO_TRIM_FLOW,
+                                         &data);
+       if (ret || !FIELD_GET(BIT(1), data))
+               return 0;
+
+       ret = mt7986_read_efuse_xo_trim_7976(dev, adie, false, &trim_80m);
+       if (ret)
+               return ret;
+
+       ret = mt7986_read_efuse_xo_trim_7976(dev, adie, true, &trim_40m);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_RG_STRAP_PIN_IN, &val);
+       if (ret)
+               return ret;
+
+       mode = FIELD_PREP(GENMASK(6, 4), val);
+       if (!mode || mode == 0x2) {
+               ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C1,
+                                       GENMASK(31, 24),
+                                       FIELD_PREP(GENMASK(31, 24), trim_80m));
+               if (ret)
+                       return ret;
+
+               ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C2,
+                                       GENMASK(31, 24),
+                                       FIELD_PREP(GENMASK(31, 24), trim_80m));
+       } else if (mode == 0x3 || mode == 0x4 || mode == 0x6) {
+               ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C1,
+                                       GENMASK(23, 16),
+                                       FIELD_PREP(GENMASK(23, 16), trim_40m));
+               if (ret)
+                       return ret;
+
+               ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C2,
+                                       GENMASK(23, 16),
+                                       FIELD_PREP(GENMASK(23, 16), trim_40m));
+       }
+
+       return ret;
+}
+
+static int mt7986_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
+{
+       int ret;
+
+       ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_TOP_THADC, 0x4a563b00);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, 0x1d59080f);
+       if (ret)
+               return ret;
+
+       return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, 0x34c00fe0);
+}
+
+static int
+mt7986_read_efuse_xo_trim_7975(struct mt7915_dev *dev, u8 adie,
+                              u32 addr, u32 *result)
+{
+       int ret;
+       u32 data;
+
+       ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+       if (ret)
+               return ret;
+
+       if ((data & MT_ADIE_XO_TRIM_EN_MASK)) {
+               if ((data & MT_ADIE_XTAL_DECREASE_MASK))
+                       *result -= (data & MT_ADIE_EFUSE_TRIM_MASK);
+               else
+                       *result += (data & MT_ADIE_EFUSE_TRIM_MASK);
+
+               *result = (*result & MT_ADIE_TRIM_MASK);
+       }
+
+       return 0;
+}
+
+static int mt7986_wmac_adie_xtal_trim_7975(struct mt7915_dev *dev, u8 adie)
+{
+       int ret;
+       u32 data, result = 0, value;
+
+       ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_7975_XTAL_EN,
+                                         &data);
+       if (ret || !(data & BIT(1)))
+               return 0;
+
+       ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_7975_XTAL_CAL,
+                                         &data);
+       if (ret)
+               return ret;
+
+       if (data & MT_ADIE_XO_TRIM_EN_MASK)
+               result = (data & MT_ADIE_TRIM_MASK);
+
+       ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM2,
+                                            &result);
+       if (ret)
+               return ret;
+
+       ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM3,
+                                            &result);
+       if (ret)
+               return ret;
+
+       ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM4,
+                                            &result);
+       if (ret)
+               return ret;
+
+       /* Update trim value to C1 and C2*/
+       value = FIELD_GET(MT_ADIE_7975_XO_CTRL2_C1_MASK, result) |
+               FIELD_GET(MT_ADIE_7975_XO_CTRL2_C2_MASK, result);
+       ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_CTRL2,
+                               MT_ADIE_7975_XO_CTRL2_MASK, value);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_7975_XTAL, &value);
+       if (ret)
+               return ret;
+
+       if (value & MT_ADIE_7975_XTAL_EN_MASK) {
+               ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_2,
+                                       MT_ADIE_7975_XO_2_FIX_EN, 0x0);
+               if (ret)
+                       return ret;
+       }
+
+       return mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_CTRL6,
+                                MT_ADIE_7975_XO_CTRL6_MASK, 0x1);
+}
+
+static int mt7986_wmac_adie_patch_7975(struct mt7915_dev *dev, u8 adie)
+{
+       int ret;
+
+       /* disable CAL LDO and fine tune RFDIG LDO */
+       ret = mt76_wmac_spi_write(dev, adie, 0x348, 0x00000002);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_write(dev, adie, 0x378, 0x00000002);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_write(dev, adie, 0x3a8, 0x00000002);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_write(dev, adie, 0x3d8, 0x00000002);
+       if (ret)
+               return ret;
+
+       /* set CKA driving and filter */
+       ret = mt76_wmac_spi_write(dev, adie, 0xa1c, 0x30000aaa);
+       if (ret)
+               return ret;
+
+       /* set CKB LDO to 1.4V */
+       ret = mt76_wmac_spi_write(dev, adie, 0xa84, 0x8470008a);
+       if (ret)
+               return ret;
+
+       /* turn on SX0 LTBUF */
+       ret = mt76_wmac_spi_write(dev, adie, 0x074, 0x00000002);
+       if (ret)
+               return ret;
+
+       /* CK_BUF_SW_EN = 1 (all buf in manual mode.) */
+       ret = mt76_wmac_spi_write(dev, adie, 0xaa4, 0x01001fc0);
+       if (ret)
+               return ret;
+
+       /* BT mode/WF normal mode 00000005 */
+       ret = mt76_wmac_spi_write(dev, adie, 0x070, 0x00000005);
+       if (ret)
+               return ret;
+
+       /* BG thermal sensor offset update */
+       ret = mt76_wmac_spi_write(dev, adie, 0x344, 0x00000088);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_write(dev, adie, 0x374, 0x00000088);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_write(dev, adie, 0x3a4, 0x00000088);
+       if (ret)
+               return ret;
+
+       ret = mt76_wmac_spi_write(dev, adie, 0x3d4, 0x00000088);
+       if (ret)
+               return ret;
+
+       /* set WCON VDD IPTAT to "0000" */
+       ret = mt76_wmac_spi_write(dev, adie, 0xa80, 0x44d07000);
+       if (ret)
+               return ret;
+
+       /* change back LTBUF SX3 drving to default value */
+       ret = mt76_wmac_spi_write(dev, adie, 0xa88, 0x3900aaaa);
+       if (ret)
+               return ret;
+
+       /* SM input cap off */
+       ret = mt76_wmac_spi_write(dev, adie, 0x2c4, 0x00000000);
+       if (ret)
+               return ret;
+
+       /* set CKB driving and filter */
+       return mt76_wmac_spi_write(dev, adie, 0x2c8, 0x00000072);
+}
+
+static int mt7986_wmac_adie_cfg(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+       int ret;
+
+       mt76_wmac_spi_lock(dev);
+       ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_CLK_EN, ~0);
+       if (ret)
+               goto out;
+
+       if (is_7975(dev, adie, adie_type)) {
+               ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_COCLK,
+                                       BIT(1), 0x1);
+               if (ret)
+                       goto out;
+
+               ret = mt7986_wmac_adie_thermal_cal(dev, adie);
+               if (ret)
+                       goto out;
+
+               ret = mt7986_wmac_adie_xtal_trim_7975(dev, adie);
+               if (ret)
+                       goto out;
+
+               ret = mt7986_wmac_adie_patch_7975(dev, adie);
+       } else if (is_7976(dev, adie, adie_type)) {
+               if (mt7986_wmac_check_adie_type(dev) == ADIE_DBDC) {
+                       ret = mt76_wmac_spi_write(dev, adie,
+                                                 MT_ADIE_WRI_CK_SEL, 0x1c);
+                       if (ret)
+                               goto out;
+               }
+
+               ret = mt7986_wmac_adie_thermal_cal(dev, adie);
+               if (ret)
+                       goto out;
+
+               ret = mt7986_wmac_adie_xtal_trim_7976(dev, adie);
+               if (ret)
+                       goto out;
+
+               ret = mt7986_wmac_adie_patch_7976(dev, adie);
+       }
+out:
+       mt76_wmac_spi_unlock(dev);
+
+       return ret;
+}
+
+static int
+mt7986_wmac_afe_cal(struct mt7915_dev *dev, u8 adie, bool dbdc, u32 adie_type)
+{
+       int ret;
+       u8 idx;
+
+       mt76_wmac_spi_lock(dev);
+       if (is_7975(dev, adie, adie_type))
+               ret = mt76_wmac_spi_write(dev, adie,
+                                         MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+                                         0x80000000);
+       else
+               ret = mt76_wmac_spi_write(dev, adie,
+                                         MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+                                         0x88888005);
+       if (ret)
+               goto out;
+
+       idx = dbdc ? ADIE_DBDC : adie;
+
+       mt76_rmw_field(dev, MT_AFE_DIG_EN_01(idx),
+                      MT_AFE_RG_WBG_EN_RCK_MASK, 0x1);
+       usleep_range(60, 100);
+
+       mt76_rmw(dev, MT_AFE_DIG_EN_01(idx),
+                MT_AFE_RG_WBG_EN_RCK_MASK, 0x0);
+
+       mt76_rmw_field(dev, MT_AFE_DIG_EN_03(idx),
+                      MT_AFE_RG_WBG_EN_BPLL_UP_MASK, 0x1);
+       usleep_range(30, 100);
+
+       mt76_rmw_field(dev, MT_AFE_DIG_EN_03(idx),
+                      MT_AFE_RG_WBG_EN_WPLL_UP_MASK, 0x1);
+       usleep_range(60, 100);
+
+       mt76_rmw_field(dev, MT_AFE_DIG_EN_01(idx),
+                      MT_AFE_RG_WBG_EN_TXCAL_MASK, 0x1f);
+       usleep_range(800, 1000);
+
+       mt76_rmw(dev, MT_AFE_DIG_EN_01(idx),
+                MT_AFE_RG_WBG_EN_TXCAL_MASK, 0x0);
+       mt76_rmw(dev, MT_AFE_DIG_EN_03(idx),
+                MT_AFE_RG_WBG_EN_PLL_UP_MASK, 0x0);
+
+       ret = mt76_wmac_spi_write(dev, adie, MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+                                 0x5);
+
+out:
+       mt76_wmac_spi_unlock(dev);
+
+       return ret;
+}
+
+static void mt7986_wmac_subsys_pll_initial(struct mt7915_dev *dev, u8 band)
+{
+       mt76_rmw(dev, MT_AFE_PLL_STB_TIME(band),
+                MT_AFE_PLL_STB_TIME_MASK, MT_AFE_PLL_STB_TIME_VAL);
+
+       mt76_rmw(dev, MT_AFE_DIG_EN_02(band),
+                MT_AFE_PLL_CFG_MASK, MT_AFE_PLL_CFG_VAL);
+
+       mt76_rmw(dev, MT_AFE_DIG_TOP_01(band),
+                MT_AFE_DIG_TOP_01_MASK, MT_AFE_DIG_TOP_01_VAL);
+}
+
+static void mt7986_wmac_subsys_setting(struct mt7915_dev *dev)
+{
+       /* Subsys pll init */
+       mt7986_wmac_subsys_pll_initial(dev, 0);
+       mt7986_wmac_subsys_pll_initial(dev, 1);
+
+       /* Set legacy OSC control stable time*/
+       mt76_rmw(dev, MT_CONN_INFRA_OSC_RC_EN,
+                MT_CONN_INFRA_OSC_RC_EN_MASK, 0x0);
+       mt76_rmw(dev, MT_CONN_INFRA_OSC_CTRL,
+                MT_CONN_INFRA_OSC_STB_TIME_MASK, 0x80706);
+
+       /* prevent subsys from power on/of in a short time interval */
+       mt76_rmw(dev, MT_TOP_WFSYS_PWR,
+                MT_TOP_PWR_ACK_MASK | MT_TOP_PWR_KEY_MASK,
+                MT_TOP_PWR_KEY);
+}
+
+static int mt7986_wmac_bus_timeout(struct mt7915_dev *dev)
+{
+       mt76_rmw_field(dev, MT_INFRA_BUS_OFF_TIMEOUT,
+                      MT_INFRA_BUS_TIMEOUT_LIMIT_MASK, 0x2);
+
+       mt76_rmw_field(dev, MT_INFRA_BUS_OFF_TIMEOUT,
+                      MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf);
+
+       mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT,
+                      MT_INFRA_BUS_TIMEOUT_LIMIT_MASK, 0xc);
+
+       mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT,
+                      MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf);
+
+       return mt7986_wmac_coninfra_check(dev);
+}
+
+static void mt7986_wmac_clock_enable(struct mt7915_dev *dev, u32 adie_type)
+{
+       u32 cur;
+
+       mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_1,
+                      MT_INFRA_CKGEN_DIV_SEL_MASK, 0x1);
+
+       mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_2,
+                      MT_INFRA_CKGEN_DIV_SEL_MASK, 0x1);
+
+       mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_1,
+                      MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+       mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_2,
+                      MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+       mt76_rmw_field(dev, MT_INFRA_CKGEN_RFSPI_WPLL_DIV,
+                      MT_INFRA_CKGEN_DIV_SEL_MASK, 0x8);
+
+       mt76_rmw_field(dev, MT_INFRA_CKGEN_RFSPI_WPLL_DIV,
+                      MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+       mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+                      MT_INFRA_CKGEN_BUS_CLK_SEL_MASK, 0x0);
+
+       mt76_rmw_field(dev, MT_CONN_INFRA_HW_CTRL,
+                      MT_CONN_INFRA_HW_CTRL_MASK, 0x1);
+
+       mt76_rmw(dev, MT_TOP_CONN_INFRA_WAKEUP,
+                MT_TOP_CONN_INFRA_WAKEUP_MASK, 0x1);
+
+       usleep_range(900, 1000);
+
+       mt76_wmac_spi_lock(dev);
+       if (is_7975(dev, 0, adie_type) || is_7976(dev, 0, adie_type)) {
+               mt76_rmw_field(dev, MT_ADIE_SLP_CTRL_CK0(0),
+                              MT_SLP_CTRL_EN_MASK, 0x1);
+
+               read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
+                                 USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                                 dev, MT_ADIE_SLP_CTRL_CK0(0));
+       }
+       if (is_7975(dev, 1, adie_type) || is_7976(dev, 1, adie_type)) {
+               mt76_rmw_field(dev, MT_ADIE_SLP_CTRL_CK0(1),
+                              MT_SLP_CTRL_EN_MASK, 0x1);
+
+               read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
+                                 USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                                 dev, MT_ADIE_SLP_CTRL_CK0(0));
+       }
+       mt76_wmac_spi_unlock(dev);
+
+       mt76_rmw(dev, MT_TOP_CONN_INFRA_WAKEUP,
+                MT_TOP_CONN_INFRA_WAKEUP_MASK, 0x0);
+       usleep_range(900, 1000);
+}
+
+static int mt7986_wmac_top_wfsys_wakeup(struct mt7915_dev *dev, bool enable)
+{
+       mt76_rmw_field(dev, MT_TOP_WFSYS_WAKEUP,
+                      MT_TOP_WFSYS_WAKEUP_MASK, enable);
+
+       usleep_range(900, 1000);
+
+       if (!enable)
+               return 0;
+
+       return mt7986_wmac_coninfra_check(dev);
+}
+
+static int mt7986_wmac_wm_enable(struct mt7915_dev *dev, bool enable)
+{
+       u32 cur;
+
+       mt76_rmw_field(dev, MT7986_TOP_WM_RESET,
+                      MT7986_TOP_WM_RESET_MASK, enable);
+       if (!enable)
+               return 0;
+
+       return read_poll_timeout(mt76_rr, cur, (cur == 0x1d1e),
+                                USEC_PER_MSEC, 5000 * USEC_PER_MSEC, false,
+                                dev, MT_TOP_CFG_ON_ROM_IDX);
+}
+
+static int mt7986_wmac_wfsys_poweron(struct mt7915_dev *dev, bool enable)
+{
+       u32 mask = MT_TOP_PWR_EN_MASK | MT_TOP_PWR_KEY_MASK;
+       u32 cur;
+
+       mt76_rmw(dev, MT_TOP_WFSYS_PWR, mask,
+                MT_TOP_PWR_KEY | FIELD_PREP(MT_TOP_PWR_EN_MASK, enable));
+
+       return read_poll_timeout(mt76_rr, cur,
+               (FIELD_GET(MT_TOP_WFSYS_RESET_STATUS_MASK, cur) == enable),
+               USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+               dev, MT_TOP_WFSYS_RESET_STATUS);
+}
+
+static int mt7986_wmac_wfsys_setting(struct mt7915_dev *dev)
+{
+       int ret;
+       u32 cur;
+
+       /* Turn off wfsys2conn bus sleep protect */
+       mt76_rmw(dev, MT_CONN_INFRA_WF_SLP_PROT,
+                MT_CONN_INFRA_WF_SLP_PROT_MASK, 0x0);
+
+       ret = mt7986_wmac_wfsys_poweron(dev, true);
+       if (ret)
+               return ret;
+
+       /* Check bus sleep protect */
+
+       ret = read_poll_timeout(mt76_rr, cur,
+                               !(cur & MT_CONN_INFRA_CONN_WF_MASK),
+                               USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                               dev, MT_CONN_INFRA_WF_SLP_PROT_RDY);
+       if (ret)
+               return ret;
+
+       ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_WFDMA2CONN_MASK),
+                               USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                               dev, MT_SLP_STATUS);
+       if (ret)
+               return ret;
+
+       return read_poll_timeout(mt76_rr, cur, (cur == 0x02060000),
+                                USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                                dev, MT_TOP_CFG_IP_VERSION_ADDR);
+}
+
+static void mt7986_wmac_wfsys_set_timeout(struct mt7915_dev *dev)
+{
+       u32 mask = MT_MCU_BUS_TIMEOUT_SET_MASK |
+                  MT_MCU_BUS_TIMEOUT_CG_EN_MASK |
+                  MT_MCU_BUS_TIMEOUT_EN_MASK;
+       u32 val = FIELD_PREP(MT_MCU_BUS_TIMEOUT_SET_MASK, 1) |
+                 FIELD_PREP(MT_MCU_BUS_TIMEOUT_CG_EN_MASK, 1) |
+                 FIELD_PREP(MT_MCU_BUS_TIMEOUT_EN_MASK, 1);
+
+       mt76_rmw(dev, MT_MCU_BUS_TIMEOUT, mask, val);
+
+       mt76_wr(dev, MT_MCU_BUS_REMAP, 0x810f0000);
+
+       mask = MT_MCU_BUS_DBG_TIMEOUT_SET_MASK |
+              MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK |
+              MT_MCU_BUS_DBG_TIMEOUT_EN_MASK;
+       val = FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_SET_MASK, 0x3aa) |
+             FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK, 1) |
+             FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_EN_MASK, 1);
+
+       mt76_rmw(dev, MT_MCU_BUS_DBG_TIMEOUT, mask, val);
+}
+
+static int mt7986_wmac_sku_update(struct mt7915_dev *dev, u32 adie_type)
+{
+       u32 val;
+
+       if (is_7976(dev, 0, adie_type) && is_7976(dev, 1, adie_type))
+               val = 0xf;
+       else if (is_7975(dev, 0, adie_type) && is_7975(dev, 1, adie_type))
+               val = 0xd;
+       else if (is_7976(dev, 0, adie_type))
+               val = 0x7;
+       else if (is_7975(dev, 1, adie_type))
+               val = 0x8;
+       else if (is_7976(dev, 1, adie_type))
+               val = 0xa;
+       else
+               return -EINVAL;
+
+       mt76_wmac_rmw(dev->sku, MT_TOP_POS_SKU, MT_TOP_POS_SKU_MASK,
+                     FIELD_PREP(MT_TOP_POS_SKU_MASK, val));
+
+       mt76_wr(dev, MT_CONNINFRA_SKU_DEC_ADDR, val);
+
+       return 0;
+}
+
+static int
+mt7986_wmac_adie_setup(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+       int ret;
+
+       if (!(is_7975(dev, adie, adie_type) || is_7976(dev, adie, adie_type)))
+               return 0;
+
+       ret = mt7986_wmac_adie_cfg(dev, adie, adie_type);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_afe_cal(dev, adie, false, adie_type);
+       if (ret)
+               return ret;
+
+       if (!adie && (mt7986_wmac_check_adie_type(dev) == ADIE_DBDC))
+               ret = mt7986_wmac_afe_cal(dev, adie, true, adie_type);
+
+       return ret;
+}
+
+static int mt7986_wmac_subsys_powerup(struct mt7915_dev *dev, u32 adie_type)
+{
+       int ret;
+
+       mt7986_wmac_subsys_setting(dev);
+
+       ret = mt7986_wmac_bus_timeout(dev);
+       if (ret)
+               return ret;
+
+       mt7986_wmac_clock_enable(dev, adie_type);
+
+       return 0;
+}
+
+static int mt7986_wmac_wfsys_powerup(struct mt7915_dev *dev)
+{
+       int ret;
+
+       ret = mt7986_wmac_wm_enable(dev, false);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_wfsys_setting(dev);
+       if (ret)
+               return ret;
+
+       mt7986_wmac_wfsys_set_timeout(dev);
+
+       return mt7986_wmac_wm_enable(dev, true);
+}
+
+int mt7986_wmac_enable(struct mt7915_dev *dev)
+{
+       int ret;
+       u32 adie_type;
+
+       ret = mt7986_wmac_consys_reset(dev, true);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_gpio_setup(dev);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_consys_lockup(dev, false);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_coninfra_check(dev);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_coninfra_setup(dev);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_sku_setup(dev, &adie_type);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_adie_setup(dev, 0, adie_type);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_adie_setup(dev, 1, adie_type);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_subsys_powerup(dev, adie_type);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_top_wfsys_wakeup(dev, true);
+       if (ret)
+               return ret;
+
+       ret = mt7986_wmac_wfsys_powerup(dev);
+       if (ret)
+               return ret;
+
+       return mt7986_wmac_sku_update(dev, adie_type);
+}
+
+void mt7986_wmac_disable(struct mt7915_dev *dev)
+{
+       u32 cur;
+
+       mt7986_wmac_top_wfsys_wakeup(dev, true);
+
+       /* Turn on wfsys2conn bus sleep protect */
+       mt76_rmw_field(dev, MT_CONN_INFRA_WF_SLP_PROT,
+                      MT_CONN_INFRA_WF_SLP_PROT_MASK, 0x1);
+
+       /* Check wfsys2conn bus sleep protect */
+       read_poll_timeout(mt76_rr, cur, !(cur ^ MT_CONN_INFRA_CONN),
+                         USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+                         dev, MT_CONN_INFRA_WF_SLP_PROT_RDY);
+
+       mt7986_wmac_wfsys_poweron(dev, false);
+
+       /* Turn back wpll setting */
+       mt76_rmw_field(dev, MT_AFE_DIG_EN_02(0), MT_AFE_MCU_BPLL_CFG_MASK, 0x2);
+       mt76_rmw_field(dev, MT_AFE_DIG_EN_02(0), MT_AFE_WPLL_CFG_MASK, 0x2);
+
+       /* Reset EMI */
+       mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+                      MT_CONN_INFRA_EMI_REQ_MASK, 0x1);
+       mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+                      MT_CONN_INFRA_EMI_REQ_MASK, 0x0);
+       mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+                      MT_CONN_INFRA_INFRA_REQ_MASK, 0x1);
+       mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+                      MT_CONN_INFRA_INFRA_REQ_MASK, 0x0);
+
+       mt7986_wmac_top_wfsys_wakeup(dev, false);
+       mt7986_wmac_consys_lockup(dev, true);
+       mt7986_wmac_consys_reset(dev, false);
+}
+
+static int mt7986_wmac_init(struct mt7915_dev *dev)
+{
+       struct device *pdev = dev->mt76.dev;
+       struct platform_device *pfdev = to_platform_device(pdev);
+
+       dev->dcm = devm_platform_ioremap_resource(pfdev, 1);
+       if (IS_ERR(dev->dcm))
+               return PTR_ERR(dev->dcm);
+
+       dev->sku = devm_platform_ioremap_resource(pfdev, 2);
+       if (IS_ERR(dev->sku))
+               return PTR_ERR(dev->sku);
+
+       dev->rstc = devm_reset_control_get(pdev, "consys");
+       if (IS_ERR(dev->rstc))
+               return PTR_ERR(dev->rstc);
+
+       return mt7986_wmac_enable(dev);
+}
+
+static int mt7986_wmac_probe(struct platform_device *pdev)
+{
+       void __iomem *mem_base;
+       struct mt7915_dev *dev;
+       struct mt76_dev *mdev;
+       int irq, ret;
+       u32 chip_id;
+
+       chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       mem_base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(mem_base)) {
+               dev_err(&pdev->dev, "Failed to get memory resource\n");
+               return PTR_ERR(mem_base);
+       }
+
+       dev = mt7915_mmio_probe(&pdev->dev, mem_base, chip_id);
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
+
+       mdev = &dev->mt76;
+       ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+                              IRQF_SHARED, KBUILD_MODNAME, dev);
+       if (ret)
+               goto free_device;
+
+       mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+       ret = mt7986_wmac_init(dev);
+       if (ret)
+               goto free_irq;
+
+       ret = mt7915_register_device(dev);
+       if (ret)
+               goto free_irq;
+
+       return 0;
+
+free_irq:
+       devm_free_irq(mdev->dev, irq, dev);
+
+free_device:
+       mt76_free_device(&dev->mt76);
+
+       return ret;
+}
+
+static int mt7986_wmac_remove(struct platform_device *pdev)
+{
+       struct mt7915_dev *dev = platform_get_drvdata(pdev);
+
+       mt7915_unregister_device(dev);
+
+       return 0;
+}
+
+static const struct of_device_id mt7986_wmac_of_match[] = {
+       { .compatible = "mediatek,mt7986-wmac", .data = (u32 *)0x7986 },
+       {},
+};
+
+struct platform_driver mt7986_wmac_driver = {
+       .driver = {
+               .name = "mt7986-wmac",
+               .of_match_table = mt7986_wmac_of_match,
+       },
+       .probe = mt7986_wmac_probe,
+       .remove = mt7986_wmac_remove,
+};
+
+MODULE_FIRMWARE(MT7986_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WM_MT7975);
+MODULE_FIRMWARE(MT7986_ROM_PATCH);
+MODULE_FIRMWARE(MT7986_ROM_PATCH_MT7975);
index 83da21d..20f6364 100644 (file)
@@ -198,7 +198,6 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
        u8 slot_time = 9, sifs = TM_DEFAULT_SIFS;
        u8 aifsn = TM_MIN_AIFSN;
        u32 i2t_time, tr2t_time, txv_time;
-       bool ext_phy = phy != &dev->phy;
        u16 cw = 0;
 
        if (ipg < sig_ext + slot_time + sifs)
@@ -228,22 +227,18 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
 
                ipg -= aifsn * slot_time;
 
-               if (ipg > TM_DEFAULT_SIFS) {
-                       if (ipg < TM_MAX_SIFS)
-                               sifs = ipg;
-                       else
-                               sifs = TM_MAX_SIFS;
-               }
+               if (ipg > TM_DEFAULT_SIFS)
+                       sifs = min_t(u32, ipg, TM_MAX_SIFS);
        }
 done:
-       txv_time = mt76_get_field(dev, MT_TMAC_ATCR(ext_phy),
+       txv_time = mt76_get_field(dev, MT_TMAC_ATCR(phy->band_idx),
                                  MT_TMAC_ATCR_TXV_TOUT);
        txv_time *= 50; /* normal clock time */
 
        i2t_time = (slot_time * 1000 - txv_time - BBP_PROC_TIME) / 50;
        tr2t_time = (sifs * 1000 - txv_time - BBP_PROC_TIME) / 50;
 
-       mt76_set(dev, MT_TMAC_TRCR0(ext_phy),
+       mt76_set(dev, MT_TMAC_TRCR0(phy->band_idx),
                 FIELD_PREP(MT_TMAC_TRCR0_TR2T_CHK, tr2t_time) |
                 FIELD_PREP(MT_TMAC_TRCR0_I2T_CHK, i2t_time));
 
@@ -276,6 +271,8 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
        case MT76_TM_TX_MODE_OFDM:
                if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
                        sband = &mphy->sband_5g.sband;
+               else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+                       sband = &mphy->sband_6g.sband;
                else
                        sband = &mphy->sband_2g.sband;
 
@@ -337,7 +334,6 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
 {
        int n_regs = ARRAY_SIZE(reg_backup_list);
        struct mt7915_dev *dev = phy->dev;
-       bool ext_phy = phy != &dev->phy;
        u32 *b = phy->test.reg_backup;
        int i;
 
@@ -361,7 +357,7 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
 
        if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
                for (i = 0; i < n_regs; i++)
-                       mt76_wr(dev, reg_backup_list[i].band[ext_phy], b[i]);
+                       mt76_wr(dev, reg_backup_list[i].band[phy->band_idx], b[i]);
                return;
        }
 
@@ -372,33 +368,33 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
 
                phy->test.reg_backup = b;
                for (i = 0; i < n_regs; i++)
-                       b[i] = mt76_rr(dev, reg_backup_list[i].band[ext_phy]);
+                       b[i] = mt76_rr(dev, reg_backup_list[i].band[phy->band_idx]);
        }
 
-       mt76_clear(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_MM_PROT |
+       mt76_clear(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_MM_PROT |
                   MT_AGG_PCR0_GF_PROT | MT_AGG_PCR0_ERP_PROT |
                   MT_AGG_PCR0_VHT_PROT | MT_AGG_PCR0_BW20_PROT |
                   MT_AGG_PCR0_BW40_PROT | MT_AGG_PCR0_BW80_PROT);
-       mt76_set(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_PTA_WIN_DIS);
+       mt76_set(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_PTA_WIN_DIS);
 
-       mt76_wr(dev, MT_AGG_PCR0(ext_phy, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
+       mt76_wr(dev, MT_AGG_PCR0(phy->band_idx, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
                MT_AGG_PCR1_RTS0_LEN_THRES);
 
-       mt76_clear(dev, MT_AGG_MRCR(ext_phy), MT_AGG_MRCR_BAR_CNT_LIMIT |
+       mt76_clear(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_BAR_CNT_LIMIT |
                   MT_AGG_MRCR_LAST_RTS_CTS_RN | MT_AGG_MRCR_RTS_FAIL_LIMIT |
                   MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT);
 
-       mt76_rmw(dev, MT_AGG_MRCR(ext_phy), MT_AGG_MRCR_RTS_FAIL_LIMIT |
+       mt76_rmw(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_RTS_FAIL_LIMIT |
                 MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT,
                 FIELD_PREP(MT_AGG_MRCR_RTS_FAIL_LIMIT, 1) |
                 FIELD_PREP(MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT, 1));
 
-       mt76_wr(dev, MT_TMAC_TFCR0(ext_phy), 0);
-       mt76_clear(dev, MT_TMAC_TCR0(ext_phy), MT_TMAC_TCR0_TBTT_STOP_CTRL);
+       mt76_wr(dev, MT_TMAC_TFCR0(phy->band_idx), 0);
+       mt76_clear(dev, MT_TMAC_TCR0(phy->band_idx), MT_TMAC_TCR0_TBTT_STOP_CTRL);
 
        /* config rx filter for testmode rx */
-       mt76_wr(dev, MT_WF_RFCR(ext_phy), 0xcf70a);
-       mt76_wr(dev, MT_WF_RFCR1(ext_phy), 0);
+       mt76_wr(dev, MT_WF_RFCR(phy->band_idx), 0xcf70a);
+       mt76_wr(dev, MT_WF_RFCR1(phy->band_idx), 0);
 }
 
 static void
@@ -456,7 +452,7 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
                        u8 tx_ant = td->tx_antenna_mask;
 
                        if (phy != &dev->phy)
-                               tx_ant >>= 2;
+                               tx_ant >>= dev->chainshift;
                        phy->test.spe_idx = spe_idx_map[tx_ant];
                }
        }
@@ -578,6 +574,8 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
 
                if (chandef->chan->band == NL80211_BAND_5GHZ)
                        sband = &phy->mt76->sband_5g.sband;
+               else if (chandef->chan->band == NL80211_BAND_6GHZ)
+                       sband = &phy->mt76->sband_6g.sband;
                else
                        sband = &phy->mt76->sband_2g.sband;
 
@@ -724,7 +722,6 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
 {
        struct mt7915_phy *phy = mphy->priv;
        struct mt7915_dev *dev = phy->dev;
-       bool ext_phy = phy != &dev->phy;
        enum mt76_rxq_id q;
        void *rx, *rssi;
        u16 fcs_err;
@@ -773,11 +770,11 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
 
        nla_nest_end(msg, rx);
 
-       cnt = mt76_rr(dev, MT_MIB_SDR3(ext_phy));
+       cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
        fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
                FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
 
-       q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
+       q = phy->band_idx ? MT_RXQ_EXT : MT_RXQ_MAIN;
        mphy->test.rx_stats.packets[q] += fcs_err;
        mphy->test.rx_stats.fcs_error[q] += fcs_err;
 
index 71154fc..adff2d7 100644 (file)
@@ -24,3 +24,14 @@ config MT7921S
          This adds support for MT7921S 802.11ax 2x2:2SS wireless devices.
 
          To compile this driver as a module, choose M here.
+
+config MT7921U
+       tristate "MediaTek MT7921U (USB) support"
+       select MT76_USB
+       select MT7921_COMMON
+       depends on MAC80211
+       depends on USB
+       help
+         This adds support for MT7921U 802.11ax 2x2:2SS wireless devices.
+
+         To compile this driver as a module, choose M here.
index 1187ace..0a14681 100644 (file)
@@ -3,6 +3,7 @@
 obj-$(CONFIG_MT7921_COMMON) += mt7921-common.o
 obj-$(CONFIG_MT7921E) += mt7921e.o
 obj-$(CONFIG_MT7921S) += mt7921s.o
+obj-$(CONFIG_MT7921U) += mt7921u.o
 
 CFLAGS_trace.o := -I$(src)
 
@@ -10,3 +11,4 @@ mt7921-common-y := mac.o mcu.o main.o init.o debugfs.o trace.o
 mt7921-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
 mt7921e-y := pci.o pci_mac.o pci_mcu.o dma.o
 mt7921s-y := sdio.o sdio_mac.o sdio_mcu.o
+mt7921u-y := usb.o usb_mac.o
index dd04909..bce7641 100644 (file)
@@ -129,23 +129,22 @@ mt7921_queues_acq(struct seq_file *s, void *data)
 
        mt7921_mutex_acquire(dev);
 
-       for (i = 0; i < 16; i++) {
-               int j, acs = i / 4, index = i % 4;
+       for (i = 0; i < 4; i++) {
                u32 ctrl, val, qlen = 0;
+               int j;
 
-               val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
-               ctrl = BIT(31) | BIT(15) | (acs << 8);
+               val = mt76_rr(dev, MT_PLE_AC_QEMPTY(i));
+               ctrl = BIT(31) | BIT(11) | (i << 24);
 
                for (j = 0; j < 32; j++) {
                        if (val & BIT(j))
                                continue;
 
-                       mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
-                               ctrl | (j + (index << 5)));
+                       mt76_wr(dev, MT_PLE_FL_Q0_CTRL, ctrl | j);
                        qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
                                               GENMASK(11, 0));
                }
-               seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+               seq_printf(s, "AC%d: queued=%d\n", i, qlen);
        }
 
        mt7921_mutex_release(dev);
@@ -268,6 +267,9 @@ mt7921_pm_set(void *data, u64 val)
        struct mt7921_dev *dev = data;
        struct mt76_connac_pm *pm = &dev->pm;
 
+       if (mt76_is_usb(&dev->mt76))
+               return -EOPNOTSUPP;
+
        mutex_lock(&dev->mt76.mutex);
 
        if (val == pm->enable_user)
@@ -312,6 +314,9 @@ mt7921_deep_sleep_set(void *data, u64 val)
        bool monitor = !!(dev->mphy.hw->conf.flags & IEEE80211_CONF_MONITOR);
        bool enable = !!val;
 
+       if (mt76_is_usb(&dev->mt76))
+               return -EOPNOTSUPP;
+
        mt7921_mutex_acquire(dev);
        if (pm->ds_enable_user == enable)
                goto out;
@@ -429,8 +434,13 @@ int mt7921_init_debugfs(struct mt7921_dev *dev)
        if (!dir)
                return -ENOMEM;
 
-       debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
-                                   mt7921_queues_read);
+       if (mt76_is_mmio(&dev->mt76))
+               debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues",
+                                           dir, mt7921_queues_read);
+       else
+               debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues",
+                                           dir, mt76_queues_read);
+
        debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
                                    mt7921_queues_acq);
        debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
index 39d6ce4..ca7e20f 100644 (file)
@@ -5,7 +5,7 @@
 #include "../dma.h"
 #include "mac.h"
 
-int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
+static int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
 {
        int i, err;
 
index fa6af85..91fc419 100644 (file)
@@ -165,7 +165,7 @@ out:
 
 static int mt7921_init_hardware(struct mt7921_dev *dev)
 {
-       int ret, idx, i;
+       int ret, i;
 
        set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
 
@@ -182,6 +182,13 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
                return ret;
        }
 
+       return 0;
+}
+
+static int mt7921_init_wcid(struct mt7921_dev *dev)
+{
+       int idx;
+
        /* Beacon and mgmt frames should occupy wcid 0 */
        idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1);
        if (idx)
@@ -195,6 +202,38 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
        return 0;
 }
 
+static void mt7921_init_work(struct work_struct *work)
+{
+       struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
+                                             init_work);
+       int ret;
+
+       ret = mt7921_init_hardware(dev);
+       if (ret)
+               return;
+
+       mt76_set_stream_caps(&dev->mphy, true);
+       mt7921_set_stream_he_caps(&dev->phy);
+
+       ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
+       if (ret) {
+               dev_err(dev->mt76.dev, "register device failed\n");
+               return;
+       }
+
+       ret = mt7921_init_debugfs(dev);
+       if (ret) {
+               dev_err(dev->mt76.dev, "register debugfs failed\n");
+               return;
+       }
+
+       /* we support chip reset now */
+       dev->hw_init_done = true;
+
+       mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
+}
+
 int mt7921_register_device(struct mt7921_dev *dev)
 {
        struct ieee80211_hw *hw = mt76_hw(dev);
@@ -222,19 +261,22 @@ int mt7921_register_device(struct mt7921_dev *dev)
        spin_lock_init(&dev->sta_poll_lock);
 
        INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
+       INIT_WORK(&dev->init_work, mt7921_init_work);
 
        dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
        dev->pm.stats.last_wake_event = jiffies;
        dev->pm.stats.last_doze_event = jiffies;
-       dev->pm.enable_user = true;
-       dev->pm.enable = true;
-       dev->pm.ds_enable_user = true;
-       dev->pm.ds_enable = true;
+       if (!mt76_is_usb(&dev->mt76)) {
+               dev->pm.enable_user = true;
+               dev->pm.enable = true;
+               dev->pm.ds_enable_user = true;
+               dev->pm.ds_enable = true;
+       }
 
-       if (mt76_is_sdio(&dev->mt76))
+       if (!mt76_is_mmio(&dev->mt76))
                hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
 
-       ret = mt7921_init_hardware(dev);
+       ret = mt7921_init_wcid(dev);
        if (ret)
                return ret;
 
@@ -262,23 +304,7 @@ int mt7921_register_device(struct mt7921_dev *dev)
        dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
        dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
 
-       mt76_set_stream_caps(&dev->mphy, true);
-       mt7921_set_stream_he_caps(&dev->phy);
-
-       ret = mt76_register_device(&dev->mt76, true, mt76_rates,
-                                  ARRAY_SIZE(mt76_rates));
-       if (ret)
-               return ret;
-
-       ret = mt7921_init_debugfs(dev);
-       if (ret)
-               return ret;
-
-       ret = mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
-       if (ret)
-               return ret;
-
-       dev->hw_init_done = true;
+       queue_work(system_wq, &dev->init_work);
 
        return 0;
 }
index d175583..233998c 100644 (file)
@@ -176,8 +176,8 @@ mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
        u32 ru_h, ru_l;
        u8 ru, offs = 0;
 
-       ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
-       ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
+       ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
+       ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
        ru = (u8)(ru_l | ru_h << 4);
 
        status->bw = RATE_INFO_BW_HE_RU;
@@ -247,19 +247,19 @@ mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
                         MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
                                 le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
 
-       he_mu->ru_ch1[0] = FIELD_GET(MT_CRXV_HE_RU0, le32_to_cpu(rxv[3]));
+       he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
 
        if (status->bw >= RATE_INFO_BW_40) {
                he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
                he_mu->ru_ch2[0] =
-                       FIELD_GET(MT_CRXV_HE_RU1, le32_to_cpu(rxv[3]));
+                       le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
        }
 
        if (status->bw >= RATE_INFO_BW_80) {
                he_mu->ru_ch1[1] =
-                       FIELD_GET(MT_CRXV_HE_RU2, le32_to_cpu(rxv[3]));
+                       le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
                he_mu->ru_ch2[1] =
-                       FIELD_GET(MT_CRXV_HE_RU3, le32_to_cpu(rxv[3]));
+                       le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
        }
 }
 
@@ -304,14 +304,16 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
        case MT_PHY_TYPE_HE_SU:
                he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
                             HE_BITS(DATA1_UL_DL_KNOWN) |
-                            HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
+                            HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+                            HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
 
                he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
                             HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
                break;
        case MT_PHY_TYPE_HE_EXT_SU:
                he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
-                            HE_BITS(DATA1_UL_DL_KNOWN);
+                            HE_BITS(DATA1_UL_DL_KNOWN) |
+                            HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
 
                he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
                break;
@@ -407,9 +409,9 @@ static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
        struct ieee80211_sta *sta;
        struct ieee80211_vif *vif;
        struct ieee80211_hdr hdr;
-       __le32 qos_ctrl, ht_ctrl;
+       u16 frame_control;
 
-       if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
+       if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
            MT_RXD3_NORMAL_U2M)
                return -EINVAL;
 
@@ -423,16 +425,15 @@ static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
        vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
 
        /* store the info from RXD and ethhdr to avoid being overridden */
-       hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
-       hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
-       qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
-       ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
-
+       frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
+       hdr.frame_control = cpu_to_le16(frame_control);
+       hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
        hdr.duration_id = 0;
+
        ether_addr_copy(hdr.addr1, vif->addr);
        ether_addr_copy(hdr.addr2, sta->addr);
-       switch (le16_to_cpu(hdr.frame_control) &
-               (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+       switch (frame_control & (IEEE80211_FCTL_TODS |
+                                IEEE80211_FCTL_FROMDS)) {
        case 0:
                ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
                break;
@@ -454,15 +455,22 @@ static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
        if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
            eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
                ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
-       else if (eth_hdr->h_proto >= cpu_to_be16(ETH_P_802_3_MIN))
+       else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
                ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
        else
                skb_pull(skb, 2);
 
        if (ieee80211_has_order(hdr.frame_control))
-               memcpy(skb_push(skb, 2), &ht_ctrl, 2);
-       if (ieee80211_is_data_qos(hdr.frame_control))
-               memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+               memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
+                      IEEE80211_HT_CTL_LEN);
+       if (ieee80211_is_data_qos(hdr.frame_control)) {
+               __le16 qos_ctrl;
+
+               qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
+               memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+                      IEEE80211_QOS_CTL_LEN);
+       }
+
        if (ieee80211_has_a4(hdr.frame_control))
                memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
        else
@@ -664,9 +672,6 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
                                             status->chain_signal[i]);
                }
 
-               if (status->signal == -128)
-                       status->flag |= RX_FLAG_NO_SIGNAL_VAL;
-
                stbc = FIELD_GET(MT_PRXV_STBC, v0);
                gi = FIELD_GET(MT_PRXV_SGI, v0);
                cck = false;
@@ -910,11 +915,18 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
                val = MT_TXD3_SN_VALID |
                      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
                txwi[3] |= cpu_to_le32(val);
+               txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
        }
 
-       val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
-             FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
-       txwi[7] |= cpu_to_le32(val);
+       if (mt76_is_mmio(&dev->mt76)) {
+               val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+                     FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+               txwi[7] |= cpu_to_le32(val);
+       } else {
+               val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
+                     FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
+               txwi[8] |= cpu_to_le32(val);
+       }
 }
 
 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
@@ -1014,7 +1026,7 @@ void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
        if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
                return;
 
-       tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+       tid = le32_get_bits(txwi[1], MT_TXD1_TID);
        if (tid >= 6) /* skip VO queue */
                return;
 
@@ -1153,18 +1165,13 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
        struct mt76_wcid *wcid;
        __le32 *txs_data = data;
        u16 wcidx;
-       u32 txs;
        u8 pid;
 
-       txs = le32_to_cpu(txs_data[0]);
-       if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+       if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
                return;
 
-       txs = le32_to_cpu(txs_data[2]);
-       wcidx = FIELD_GET(MT_TXS2_WCID, txs);
-
-       txs = le32_to_cpu(txs_data[3]);
-       pid = FIELD_GET(MT_TXS3_PID, txs);
+       wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+       pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
 
        if (pid < MT_PACKET_ID_FIRST)
                return;
@@ -1203,8 +1210,8 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
        enum rx_pkt_type type;
        u16 flag;
 
-       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
-       flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+       type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+       flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
 
        if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
                type = PKT_TYPE_NORMAL_MCU;
@@ -1617,3 +1624,94 @@ void mt7921_coredump_work(struct work_struct *work)
 
        mt7921_reset(&dev->mt76);
 }
+
+/* usb_sdio */
+static void
+mt7921_usb_sdio_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
+                          enum mt76_txq_id qid, struct ieee80211_sta *sta,
+                          struct ieee80211_key_conf *key, int pid,
+                          struct sk_buff *skb)
+{
+       __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
+
+       memset(txwi, 0, MT_SDIO_TXD_SIZE);
+       mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
+       skb_push(skb, MT_SDIO_TXD_SIZE);
+}
+
+int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+                                  enum mt76_txq_id qid, struct mt76_wcid *wcid,
+                                  struct ieee80211_sta *sta,
+                                  struct mt76_tx_info *tx_info)
+{
+       struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+       struct ieee80211_key_conf *key = info->control.hw_key;
+       struct sk_buff *skb = tx_info->skb;
+       int err, pad, pktid, type;
+
+       if (unlikely(tx_info->skb->len <= ETH_HLEN))
+               return -EINVAL;
+
+       if (!wcid)
+               wcid = &dev->mt76.global_wcid;
+
+       if (sta) {
+               struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+
+               if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+                       info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+                       msta->last_txs = jiffies;
+               }
+       }
+
+       pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+       mt7921_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
+
+       type = mt76_is_sdio(mdev) ? MT7921_SDIO_DATA : 0;
+       mt7921_skb_add_usb_sdio_hdr(dev, skb, type);
+       pad = round_up(skb->len, 4) - skb->len;
+       if (mt76_is_usb(mdev))
+               pad += 4;
+
+       err = mt76_skb_adjust_pad(skb, pad);
+       if (err)
+               /* Release pktid in case of error. */
+               idr_remove(&wcid->pktid, pktid);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_prepare_skb);
+
+void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+                                    struct mt76_queue_entry *e)
+{
+       __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
+       unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
+       struct ieee80211_sta *sta;
+       struct mt76_wcid *wcid;
+       u16 idx;
+
+       idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+       wcid = rcu_dereference(mdev->wcid[idx]);
+       sta = wcid_to_sta(wcid);
+
+       if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+               mt7921_tx_check_aggr(sta, txwi);
+
+       skb_pull(e->skb, headroom);
+       mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_complete_skb);
+
+bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+       struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+
+       mt7921_mutex_acquire(dev);
+       mt7921_mac_sta_poll(dev);
+       mt7921_mutex_release(dev);
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_status_data);
index 544a1c3..79447e2 100644 (file)
@@ -202,6 +202,7 @@ enum tx_mcu_port_q_idx {
 #define MT_SDIO_TXD_SIZE               (MT_TXD_SIZE + 8 * 4)
 #define MT_SDIO_TAIL_SIZE              8
 #define MT_SDIO_HDR_SIZE               4
+#define MT_USB_TAIL_SIZE               4
 
 #define MT_TXD0_Q_IDX                  GENMASK(31, 25)
 #define MT_TXD0_PKT_FMT                        GENMASK(24, 23)
@@ -284,6 +285,9 @@ enum tx_mcu_port_q_idx {
 #define MT_TXD7_HW_AMSDU               BIT(10)
 #define MT_TXD7_TX_TIME                        GENMASK(9, 0)
 
+#define MT_TXD8_L_TYPE                 GENMASK(5, 4)
+#define MT_TXD8_L_SUB_TYPE             GENMASK(3, 0)
+
 #define MT_TX_RATE_STBC                        BIT(13)
 #define MT_TX_RATE_NSS                 GENMASK(12, 10)
 #define MT_TX_RATE_MODE                        GENMASK(9, 6)
index b6e836a..fdaf245 100644 (file)
@@ -264,7 +264,7 @@ static int mt7921_start(struct ieee80211_hw *hw)
        return err;
 }
 
-static void mt7921_stop(struct ieee80211_hw *hw)
+void mt7921_stop(struct ieee80211_hw *hw)
 {
        struct mt7921_dev *dev = mt7921_hw_dev(hw);
        struct mt7921_phy *phy = mt7921_hw_phy(hw);
@@ -281,6 +281,7 @@ static void mt7921_stop(struct ieee80211_hw *hw)
        mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false);
        mt7921_mutex_release(dev);
 }
+EXPORT_SYMBOL_GPL(mt7921_stop);
 
 static int mt7921_add_interface(struct ieee80211_hw *hw,
                                struct ieee80211_vif *vif)
@@ -479,9 +480,27 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
        mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
 }
 
+static void
+mt7921_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct mt7921_dev *dev = priv;
+       struct ieee80211_hw *hw = mt76_hw(dev);
+       struct mt76_connac_pm *pm = &dev->pm;
+       bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+
+       mt7921_mcu_set_sniffer(dev, vif, monitor);
+       pm->enable = !monitor;
+       pm->ds_enable = !monitor;
+
+       mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+
+       if (monitor)
+               mt7921_mcu_set_beacon_filter(dev, vif, false);
+}
+
 void mt7921_set_runtime_pm(struct mt7921_dev *dev)
 {
-       struct ieee80211_hw *hw = dev->mphy.hw;
+       struct ieee80211_hw *hw = mt76_hw(dev);
        struct mt76_connac_pm *pm = &dev->pm;
        bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
 
@@ -516,17 +535,10 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
        }
 
        if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-               bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
-
-               if (!enabled)
-                       phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
-               else
-                       phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
-
-               mt76_rmw_field(dev, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN,
-                              enabled);
-               mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
-               mt7921_set_runtime_pm(dev);
+               ieee80211_iterate_active_interfaces(hw,
+                                                   IEEE80211_IFACE_ITER_RESUME_ALL,
+                                                   mt7921_sniffer_interface_iter, dev);
+               dev->mt76.rxfilter = mt76_rr(dev, MT_WF_RFCR(0));
        }
 
 out:
index 33a8368..da2be05 100644 (file)
@@ -863,10 +863,13 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
        else
                req.channel_band = chandef->chan->band;
 
-       if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+       if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+           dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+               req.switch_reason = CH_SWITCH_NORMAL;
+       else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
                req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
-       else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
-                chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+       else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
+                                         NL80211_IFTYPE_AP))
                req.switch_reason = CH_SWITCH_DFS;
        else
                req.switch_reason = CH_SWITCH_NORMAL;
@@ -1133,3 +1136,33 @@ int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
 
        return 0;
 }
+
+int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+                          bool enable)
+{
+       struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+       struct {
+               struct {
+                       u8 band_idx;
+                       u8 pad[3];
+               } __packed hdr;
+               struct sniffer_enable_tlv {
+                       __le16 tag;
+                       __le16 len;
+                       u8 enable;
+                       u8 pad[3];
+               } __packed enable;
+       } req = {
+               .hdr = {
+                       .band_idx = mvif->band_idx,
+               },
+               .enable = {
+                       .tag = cpu_to_le16(0),
+                       .len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)),
+                       .enable = enable,
+               },
+       };
+
+       return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
+                                true);
+}
index 9edc83f..7690364 100644 (file)
@@ -30,6 +30,7 @@
 
 #define MT7921_DRV_OWN_RETRY_COUNT     10
 #define MT7921_MCU_INIT_RETRY_COUNT    10
+#define MT7921_WFSYS_INIT_RETRY_COUNT  2
 
 #define MT7921_FIRMWARE_WM             "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
 #define MT7921_ROM_PATCH               "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
@@ -204,6 +205,8 @@ struct mt7921_dev {
        struct list_head sta_poll_list;
        spinlock_t sta_poll_lock;
 
+       struct work_struct init_work;
+
        u8 fw_debug;
 
        struct mt76_connac_pm pm;
@@ -352,17 +355,20 @@ static inline void mt7921_mcu_tx_cleanup(struct mt7921_dev *dev)
        mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
 }
 
-static inline void mt7921_skb_add_sdio_hdr(struct sk_buff *skb,
-                                          enum mt7921_sdio_pkt_type type)
+static inline void
+mt7921_skb_add_usb_sdio_hdr(struct mt7921_dev *dev, struct sk_buff *skb,
+                           int type)
 {
-       u32 hdr;
+       u32 hdr, len;
 
-       hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, skb->len + sizeof(hdr)) |
+       len = mt76_is_usb(&dev->mt76) ? skb->len : skb->len + sizeof(hdr);
+       hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, len) |
              FIELD_PREP(MT7921_SDIO_HDR_PKT_TYPE, type);
 
        put_unaligned_le32(hdr, skb_push(skb, sizeof(hdr)));
 }
 
+void mt7921_stop(struct ieee80211_hw *hw);
 int mt7921_mac_init(struct mt7921_dev *dev);
 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
 void mt7921_mac_reset_counters(struct mt7921_phy *phy);
@@ -384,7 +390,6 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
 
 void mt7921_tx_worker(struct mt76_worker *w);
 void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc);
 void mt7921_tx_token_put(struct mt7921_dev *dev);
 void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
                         struct sk_buff *skb);
@@ -409,7 +414,6 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
 int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
 void mt7921_pm_wake_work(struct work_struct *work);
 void mt7921_pm_power_save_work(struct work_struct *work);
-bool mt7921_wait_for_mcu_init(struct mt7921_dev *dev);
 void mt7921_coredump_work(struct work_struct *work);
 int mt7921_wfsys_reset(struct mt7921_dev *dev);
 int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr);
@@ -444,12 +448,26 @@ int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev);
 int mt7921s_mcu_init(struct mt7921_dev *dev);
 int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev);
 int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev);
-int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
-                          enum mt76_txq_id qid, struct mt76_wcid *wcid,
-                          struct ieee80211_sta *sta,
-                          struct mt76_tx_info *tx_info);
-void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update);
 void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data);
 void mt7921_set_runtime_pm(struct mt7921_dev *dev);
+int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+                          bool enable);
+
+int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+                                  enum mt76_txq_id qid, struct mt76_wcid *wcid,
+                                  struct ieee80211_sta *sta,
+                                  struct mt76_tx_info *tx_info);
+void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+                                    struct mt76_queue_entry *e);
+bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
+
+/* usb */
+#define MT_USB_TYPE_VENDOR     (USB_TYPE_VENDOR | 0x1f)
+#define MT_USB_TYPE_UHW_VENDOR (USB_TYPE_VENDOR | 0x1e)
+
+int mt7921u_mcu_power_on(struct mt7921_dev *dev);
+int mt7921u_wfsys_reset(struct mt7921_dev *dev);
+int mt7921u_dma_init(struct mt7921_dev *dev);
+int mt7921u_init_reset(struct mt7921_dev *dev);
+int mt7921u_mac_reset(struct mt7921_dev *dev);
 #endif
index a0c82d1..1a01d02 100644 (file)
@@ -105,6 +105,7 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
        int i;
        struct mt76_connac_pm *pm = &dev->pm;
 
+       cancel_work_sync(&dev->init_work);
        mt76_unregister_device(&dev->mt76);
        mt76_for_each_q_rx(&dev->mt76, i)
                napi_disable(&dev->mt76.napi[i]);
index 8ca5829..5ca14db 100644 (file)
@@ -137,7 +137,7 @@ mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
 
                wcid_idx = wcid->idx;
        } else {
-               wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+               wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
        }
 
        __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
@@ -164,11 +164,7 @@ mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
        mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
        mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
 
-       /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
-        * to the time ack is received or dropped by hw (air + hw queue time).
-        * Should avoid accessing WTBL to get Tx airtime, and use it instead.
-        */
-       count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+       count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
        if (WARN_ON_ONCE((void *)&free->info[count] > end))
                return;
 
@@ -231,7 +227,8 @@ bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
        __le32 *end = (__le32 *)&rxd[len / 4];
        enum rx_pkt_type type;
 
-       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+       type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
        switch (type) {
        case PKT_TYPE_TXRX_NOTIFY:
                mt7921e_mac_tx_free(dev, data, len);
@@ -252,7 +249,7 @@ void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
        __le32 *rxd = (__le32 *)skb->data;
        enum rx_pkt_type type;
 
-       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+       type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
 
        switch (type) {
        case PKT_TYPE_TXRX_NOTIFY:
index 411695f..6712ff6 100644 (file)
 #define MT_PLE_BASE                    0x820c0000
 #define MT_PLE(ofs)                    (MT_PLE_BASE + (ofs))
 
-#define MT_PLE_FL_Q0_CTRL              MT_PLE(0x1b0)
-#define MT_PLE_FL_Q1_CTRL              MT_PLE(0x1b4)
-#define MT_PLE_FL_Q2_CTRL              MT_PLE(0x1b8)
-#define MT_PLE_FL_Q3_CTRL              MT_PLE(0x1bc)
+#define MT_PLE_FL_Q0_CTRL              MT_PLE(0x3e0)
+#define MT_PLE_FL_Q1_CTRL              MT_PLE(0x3e4)
+#define MT_PLE_FL_Q2_CTRL              MT_PLE(0x3e8)
+#define MT_PLE_FL_Q3_CTRL              MT_PLE(0x3ec)
 
-#define MT_PLE_AC_QEMPTY(ac, n)                MT_PLE(0x300 + 0x10 * (ac) + \
-                                              ((n) << 2))
+#define MT_PLE_AC_QEMPTY(_n)           MT_PLE(0x500 + 0x40 * (_n))
 #define MT_PLE_AMSDU_PACK_MSDU_CNT(n)  MT_PLE(0x10e0 + ((n) << 2))
 
 #define MT_MDP_BASE                    0x820cd000
 #define MT_WFDMA0_GLO_CFG_RX_DMA_EN    BIT(2)
 #define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY  BIT(3)
 #define MT_WFDMA0_GLO_CFG_TX_WB_DDONE  BIT(6)
+#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9)
 #define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN   BIT(12)
 #define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15)
 #define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2   BIT(21)
 #define MT_WFDMA0_TX_RING16_EXT_CTRL   MT_WFDMA0(0x640)
 #define MT_WFDMA0_TX_RING17_EXT_CTRL   MT_WFDMA0(0x644)
 
+#define MT_WPDMA0_MAX_CNT_MASK         GENMASK(7, 0)
+#define MT_WPDMA0_BASE_PTR_MASK                GENMASK(31, 16)
+
 #define MT_WFDMA0_RX_RING0_EXT_CTRL    MT_WFDMA0(0x680)
 #define MT_WFDMA0_RX_RING1_EXT_CTRL    MT_WFDMA0(0x684)
 #define MT_WFDMA0_RX_RING2_EXT_CTRL    MT_WFDMA0(0x688)
 #define MT_WFDMA_DUMMY_CR              MT_MCU_WPDMA0(0x120)
 #define MT_WFDMA_NEED_REINIT           BIT(1)
 
+#define MT_CBTOP_RGU(ofs)              (0x70002000 + (ofs))
+#define MT_CBTOP_RGU_WF_SUBSYS_RST     MT_CBTOP_RGU(0x600)
+#define MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH BIT(0)
+
 #define MT_HW_BOUND                    0x70010020
 #define MT_HW_CHIPID                   0x70010200
 #define MT_HW_REV                      0x70010204
 #define MT_PCIE_MAC(ofs)               (MT_PCIE_MAC_BASE + (ofs))
 #define MT_PCIE_MAC_INT_ENABLE         MT_PCIE_MAC(0x188)
 
-#define MT_DMA_SHDL(ofs)               (0xd6000 + (ofs))
+#define MT_DMA_SHDL(ofs)               (0x7c026000 + (ofs))
 #define MT_DMASHDL_SW_CONTROL          MT_DMA_SHDL(0x004)
 #define MT_DMASHDL_DMASHDL_BYPASS      BIT(28)
 #define MT_DMASHDL_OPTIONAL            MT_DMA_SHDL(0x008)
 #define MT_DMASHDL_PAGE                        MT_DMA_SHDL(0x00c)
+#define MT_DMASHDL_GROUP_SEQ_ORDER     BIT(16)
 #define MT_DMASHDL_REFILL              MT_DMA_SHDL(0x010)
+#define MT_DMASHDL_REFILL_MASK         GENMASK(31, 16)
 #define MT_DMASHDL_PKT_MAX_SIZE                MT_DMA_SHDL(0x01c)
 #define MT_DMASHDL_PKT_MAX_SIZE_PLE    GENMASK(11, 0)
 #define MT_DMASHDL_PKT_MAX_SIZE_PSE    GENMASK(27, 16)
 
 #define MT_DMASHDL_SCHED_SET(_n)       MT_DMA_SHDL(0x070 + ((_n) << 2))
 
+#define MT_WFDMA_HOST_CONFIG           0x7c027030
+#define MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN  BIT(6)
+
+#define MT_UMAC(ofs)                   (0x74000000 + (ofs))
+#define MT_UDMA_TX_QSEL                        MT_UMAC(0x008)
+#define MT_FW_DL_EN                    BIT(3)
+
+#define MT_UDMA_WLCFG_1                        MT_UMAC(0x00c)
+#define MT_WL_RX_AGG_PKT_LMT           GENMASK(7, 0)
+#define MT_WL_TX_TMOUT_LMT             GENMASK(27, 8)
+
+#define MT_UDMA_WLCFG_0                        MT_UMAC(0x18)
+#define MT_WL_RX_AGG_TO                        GENMASK(7, 0)
+#define MT_WL_RX_AGG_LMT               GENMASK(15, 8)
+#define MT_WL_TX_TMOUT_FUNC_EN         BIT(16)
+#define MT_WL_TX_DPH_CHK_EN            BIT(17)
+#define MT_WL_RX_MPSZ_PAD0             BIT(18)
+#define MT_WL_RX_FLUSH                 BIT(19)
+#define MT_TICK_1US_EN                 BIT(20)
+#define MT_WL_RX_AGG_EN                        BIT(21)
+#define MT_WL_RX_EN                    BIT(22)
+#define MT_WL_TX_EN                    BIT(23)
+#define MT_WL_RX_BUSY                  BIT(30)
+#define MT_WL_TX_BUSY                  BIT(31)
+
+#define MT_UDMA_CONN_INFRA_STATUS      MT_UMAC(0xa20)
+#define MT_UDMA_CONN_WFSYS_INIT_DONE   BIT(22)
+#define MT_UDMA_CONN_INFRA_STATUS_SEL  MT_UMAC(0xa24)
+
+#define MT_SSUSB_EPCTL_CSR(ofs)                (0x74011800 + (ofs))
+#define MT_SSUSB_EPCTL_CSR_EP_RST_OPT  MT_SSUSB_EPCTL_CSR(0x090)
+
+#define MT_UWFDMA0(ofs)                        (0x7c024000 + (ofs))
+#define MT_UWFDMA0_GLO_CFG             MT_UWFDMA0(0x208)
+#define MT_UWFDMA0_GLO_CFG_EXT0                MT_UWFDMA0(0x2b0)
+#define MT_UWFDMA0_TX_RING_EXT_CTRL(_n)        MT_UWFDMA0(0x600 + ((_n) << 2))
+
 #define MT_CONN_STATUS                 0x7c053c10
 #define MT_WIFI_PATCH_DL_STATE         BIT(0)
 
 #define WFSYS_SW_INIT_DONE             BIT(4)
 
 #define MT_CONN_ON_MISC                        0x7c0600f0
+#define MT_TOP_MISC2_FW_PWR_ON         BIT(0)
 #define MT_TOP_MISC2_FW_N9_RDY         GENMASK(1, 0)
 
 #endif
index a6ae29c..af26d59 100644 (file)
@@ -41,6 +41,7 @@ static void mt7921s_unregister_device(struct mt7921_dev *dev)
 {
        struct mt76_connac_pm *pm = &dev->pm;
 
+       cancel_work_sync(&dev->init_work);
        mt76_unregister_device(&dev->mt76);
        cancel_delayed_work_sync(&pm->ps_work);
        cancel_work_sync(&pm->wake_work);
@@ -91,9 +92,9 @@ static int mt7921s_probe(struct sdio_func *func,
                .survey_flags = SURVEY_INFO_TIME_TX |
                                SURVEY_INFO_TIME_RX |
                                SURVEY_INFO_TIME_BSS_RX,
-               .tx_prepare_skb = mt7921s_tx_prepare_skb,
-               .tx_complete_skb = mt7921s_tx_complete_skb,
-               .tx_status_data = mt7921s_tx_status_data,
+               .tx_prepare_skb = mt7921_usb_sdio_tx_prepare_skb,
+               .tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
+               .tx_status_data = mt7921_usb_sdio_tx_status_data,
                .rx_skb = mt7921_queue_rx_skb,
                .sta_ps = mt7921_sta_ps,
                .sta_add = mt7921_mac_sta_add,
index 4fd1d47..1b3adb3 100644 (file)
@@ -140,86 +140,3 @@ out:
 
        return err;
 }
-
-static void
-mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
-                  enum mt76_txq_id qid, struct ieee80211_sta *sta,
-                  struct ieee80211_key_conf *key, int pid,
-                  struct sk_buff *skb)
-{
-       __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
-
-       memset(txwi, 0, MT_SDIO_TXD_SIZE);
-       mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
-       skb_push(skb, MT_SDIO_TXD_SIZE);
-}
-
-int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
-                          enum mt76_txq_id qid, struct mt76_wcid *wcid,
-                          struct ieee80211_sta *sta,
-                          struct mt76_tx_info *tx_info)
-{
-       struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
-       struct ieee80211_key_conf *key = info->control.hw_key;
-       struct sk_buff *skb = tx_info->skb;
-       int err, pad, pktid;
-
-       if (unlikely(tx_info->skb->len <= ETH_HLEN))
-               return -EINVAL;
-
-       if (!wcid)
-               wcid = &dev->mt76.global_wcid;
-
-       if (sta) {
-               struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
-
-               if (time_after(jiffies, msta->last_txs + HZ / 4)) {
-                       info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
-                       msta->last_txs = jiffies;
-               }
-       }
-
-       pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
-       mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
-
-       mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA);
-       pad = round_up(skb->len, 4) - skb->len;
-
-       err = mt76_skb_adjust_pad(skb, pad);
-       if (err)
-               /* Release pktid in case of error. */
-               idr_remove(&wcid->pktid, pktid);
-
-       return err;
-}
-
-void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
-       __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
-       unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
-       struct ieee80211_sta *sta;
-       struct mt76_wcid *wcid;
-       u16 idx;
-
-       idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
-       wcid = rcu_dereference(mdev->wcid[idx]);
-       sta = wcid_to_sta(wcid);
-
-       if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
-               mt7921_tx_check_aggr(sta, txwi);
-
-       skb_pull(e->skb, headroom);
-       mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
-bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update)
-{
-       struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
-
-       mt7921_mutex_acquire(dev);
-       mt7921_mac_sta_poll(dev);
-       mt7921_mutex_release(dev);
-
-       return false;
-}
index 5d8af18..54a5c71 100644 (file)
@@ -36,7 +36,7 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
        if (cmd == MCU_CMD(FW_SCATTER))
                type = MT7921_SDIO_FWDL;
 
-       mt7921_skb_add_sdio_hdr(skb, type);
+       mt7921_skb_add_usb_sdio_hdr(dev, skb, type);
        pad = round_up(skb->len, 4) - skb->len;
        __skb_put_zero(skb, pad);
 
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
new file mode 100644 (file)
index 0000000..b7771e9
--- /dev/null
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7921.h"
+#include "mcu.h"
+#include "mac.h"
+
+static const struct usb_device_id mt7921u_device_table[] = {
+       { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff) },
+       { },
+};
+
+static u32 mt7921u_rr(struct mt76_dev *dev, u32 addr)
+{
+       u32 ret;
+
+       mutex_lock(&dev->usb.usb_ctrl_mtx);
+       ret = ___mt76u_rr(dev, MT_VEND_READ_EXT,
+                         USB_DIR_IN | MT_USB_TYPE_VENDOR, addr);
+       mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+       return ret;
+}
+
+static void mt7921u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+       mutex_lock(&dev->usb.usb_ctrl_mtx);
+       ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+                   USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val);
+       mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt7921u_rmw(struct mt76_dev *dev, u32 addr,
+                      u32 mask, u32 val)
+{
+       mutex_lock(&dev->usb.usb_ctrl_mtx);
+       val |= ___mt76u_rr(dev, MT_VEND_READ_EXT,
+                          USB_DIR_IN | MT_USB_TYPE_VENDOR, addr) & ~mask;
+       ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+                   USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val);
+       mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+       return val;
+}
+
+static void mt7921u_copy(struct mt76_dev *dev, u32 offset,
+                        const void *data, int len)
+{
+       struct mt76_usb *usb = &dev->usb;
+       int ret, i = 0, batch_len;
+       const u8 *val = data;
+
+       len = round_up(len, 4);
+
+       mutex_lock(&usb->usb_ctrl_mtx);
+       while (i < len) {
+               batch_len = min_t(int, usb->data_len, len - i);
+               memcpy(usb->data, val + i, batch_len);
+               ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+                                            USB_DIR_OUT | MT_USB_TYPE_VENDOR,
+                                            (offset + i) >> 16, offset + i,
+                                            usb->data, batch_len);
+               if (ret < 0)
+                       break;
+
+               i += batch_len;
+       }
+       mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+int mt7921u_mcu_power_on(struct mt7921_dev *dev)
+{
+       int ret;
+
+       ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+                                  USB_DIR_OUT | MT_USB_TYPE_VENDOR,
+                                  0x0, 0x1, NULL, 0);
+       if (ret)
+               return ret;
+
+       if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
+                           MT_TOP_MISC2_FW_PWR_ON, 500)) {
+               dev_err(dev->mt76.dev, "Timeout for power on\n");
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+static int
+mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+                        int cmd, int *seq)
+{
+       struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+       u32 pad, ep;
+       int ret;
+
+       ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq);
+       if (ret)
+               return ret;
+
+       if (cmd != MCU_CMD(FW_SCATTER))
+               ep = MT_EP_OUT_INBAND_CMD;
+       else
+               ep = MT_EP_OUT_AC_BE;
+
+       mt7921_skb_add_usb_sdio_hdr(dev, skb, 0);
+       pad = round_up(skb->len, 4) + 4 - skb->len;
+       __skb_put_zero(skb, pad);
+
+       ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL,
+                            1000, ep);
+       dev_kfree_skb(skb);
+
+       return ret;
+}
+
+static int mt7921u_mcu_init(struct mt7921_dev *dev)
+{
+       static const struct mt76_mcu_ops mcu_ops = {
+               .headroom = MT_SDIO_HDR_SIZE + sizeof(struct mt7921_mcu_txd),
+               .tailroom = MT_USB_TAIL_SIZE,
+               .mcu_skb_send_msg = mt7921u_mcu_send_message,
+               .mcu_parse_response = mt7921_mcu_parse_response,
+               .mcu_restart = mt76_connac_mcu_restart,
+       };
+       int ret;
+
+       dev->mt76.mcu_ops = &mcu_ops;
+
+       mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+       ret = mt7921_run_firmware(dev);
+       if (ret)
+               return ret;
+
+       set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+       mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+       return 0;
+}
+
+static void mt7921u_stop(struct ieee80211_hw *hw)
+{
+       struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+       mt76u_stop_tx(&dev->mt76);
+       mt7921_stop(hw);
+}
+
+static void mt7921u_cleanup(struct mt7921_dev *dev)
+{
+       clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+       mt7921u_wfsys_reset(dev);
+       mt7921_mcu_exit(dev);
+       mt76u_queues_deinit(&dev->mt76);
+}
+
+static int mt7921u_probe(struct usb_interface *usb_intf,
+                        const struct usb_device_id *id)
+{
+       static const struct mt76_driver_ops drv_ops = {
+               .txwi_size = MT_SDIO_TXD_SIZE,
+               .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
+               .survey_flags = SURVEY_INFO_TIME_TX |
+                               SURVEY_INFO_TIME_RX |
+                               SURVEY_INFO_TIME_BSS_RX,
+               .tx_prepare_skb = mt7921_usb_sdio_tx_prepare_skb,
+               .tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
+               .tx_status_data = mt7921_usb_sdio_tx_status_data,
+               .rx_skb = mt7921_queue_rx_skb,
+               .sta_ps = mt7921_sta_ps,
+               .sta_add = mt7921_mac_sta_add,
+               .sta_assoc = mt7921_mac_sta_assoc,
+               .sta_remove = mt7921_mac_sta_remove,
+               .update_survey = mt7921_update_channel,
+       };
+       static const struct mt7921_hif_ops hif_ops = {
+               .mcu_init = mt7921u_mcu_init,
+               .init_reset = mt7921u_init_reset,
+               .reset = mt7921u_mac_reset,
+       };
+       static struct mt76_bus_ops bus_ops = {
+               .rr = mt7921u_rr,
+               .wr = mt7921u_wr,
+               .rmw = mt7921u_rmw,
+               .read_copy = mt76u_read_copy,
+               .write_copy = mt7921u_copy,
+               .type = MT76_BUS_USB,
+       };
+       struct usb_device *udev = interface_to_usbdev(usb_intf);
+       struct ieee80211_ops *ops;
+       struct ieee80211_hw *hw;
+       struct mt7921_dev *dev;
+       struct mt76_dev *mdev;
+       int ret;
+
+       ops = devm_kmemdup(&usb_intf->dev, &mt7921_ops, sizeof(mt7921_ops),
+                          GFP_KERNEL);
+       if (!ops)
+               return -ENOMEM;
+
+       ops->stop = mt7921u_stop;
+
+       mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops);
+       if (!mdev)
+               return -ENOMEM;
+
+       dev = container_of(mdev, struct mt7921_dev, mt76);
+       dev->hif_ops = &hif_ops;
+
+       udev = usb_get_dev(udev);
+       usb_reset_device(udev);
+
+       usb_set_intfdata(usb_intf, dev);
+
+       ret = __mt76u_init(mdev, usb_intf, &bus_ops);
+       if (ret < 0)
+               goto error;
+
+       mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+                   (mt76_rr(dev, MT_HW_REV) & 0xff);
+       dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+       if (mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY)) {
+               ret = mt7921u_wfsys_reset(dev);
+               if (ret)
+                       goto error;
+       }
+
+       ret = mt7921u_mcu_power_on(dev);
+       if (ret)
+               goto error;
+
+       ret = mt76u_alloc_mcu_queue(&dev->mt76);
+       if (ret)
+               goto error;
+
+       ret = mt76u_alloc_queues(&dev->mt76);
+       if (ret)
+               goto error;
+
+       ret = mt7921u_dma_init(dev);
+       if (ret)
+               return ret;
+
+       hw = mt76_hw(dev);
+       /* check hw sg support in order to enable AMSDU */
+       hw->max_tx_fragments = mdev->usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1;
+
+       ret = mt7921_register_device(dev);
+       if (ret)
+               goto error;
+
+       return 0;
+
+error:
+       mt76u_queues_deinit(&dev->mt76);
+
+       usb_set_intfdata(usb_intf, NULL);
+       usb_put_dev(interface_to_usbdev(usb_intf));
+
+       mt76_free_device(&dev->mt76);
+
+       return ret;
+}
+
+static void mt7921u_disconnect(struct usb_interface *usb_intf)
+{
+       struct mt7921_dev *dev = usb_get_intfdata(usb_intf);
+
+       cancel_work_sync(&dev->init_work);
+       if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+               return;
+
+       mt76_unregister_device(&dev->mt76);
+       mt7921u_cleanup(dev);
+
+       usb_set_intfdata(usb_intf, NULL);
+       usb_put_dev(interface_to_usbdev(usb_intf));
+
+       mt76_free_device(&dev->mt76);
+}
+
+MODULE_DEVICE_TABLE(usb, mt7921u_device_table);
+MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7921_ROM_PATCH);
+
+static struct usb_driver mt7921u_driver = {
+       .name           = KBUILD_MODNAME,
+       .id_table       = mt7921u_device_table,
+       .probe          = mt7921u_probe,
+       .disconnect     = mt7921u_disconnect,
+       .soft_unbind    = 1,
+       .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7921u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
new file mode 100644 (file)
index 0000000..99bcbd8
--- /dev/null
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7921.h"
+#include "mcu.h"
+#include "mac.h"
+
+static u32 mt7921u_uhw_rr(struct mt76_dev *dev, u32 addr)
+{
+       u32 ret;
+
+       mutex_lock(&dev->usb.usb_ctrl_mtx);
+       ret = ___mt76u_rr(dev, MT_VEND_DEV_MODE,
+                         USB_DIR_IN | MT_USB_TYPE_UHW_VENDOR, addr);
+       mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+       return ret;
+}
+
+static void mt7921u_uhw_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+       mutex_lock(&dev->usb.usb_ctrl_mtx);
+       ___mt76u_wr(dev, MT_VEND_WRITE,
+                   USB_DIR_OUT | MT_USB_TYPE_UHW_VENDOR, addr, val);
+       mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static void mt7921u_dma_prefetch(struct mt7921_dev *dev)
+{
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
+                MT_WPDMA0_MAX_CNT_MASK, 4);
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
+                MT_WPDMA0_BASE_PTR_MASK, 0x80);
+
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
+                MT_WPDMA0_MAX_CNT_MASK, 4);
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
+                MT_WPDMA0_BASE_PTR_MASK, 0xc0);
+
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
+                MT_WPDMA0_MAX_CNT_MASK, 4);
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
+                MT_WPDMA0_BASE_PTR_MASK, 0x100);
+
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
+                MT_WPDMA0_MAX_CNT_MASK, 4);
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
+                MT_WPDMA0_BASE_PTR_MASK, 0x140);
+
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
+                MT_WPDMA0_MAX_CNT_MASK, 4);
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
+                MT_WPDMA0_BASE_PTR_MASK, 0x180);
+
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
+                MT_WPDMA0_MAX_CNT_MASK, 4);
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
+                MT_WPDMA0_BASE_PTR_MASK, 0x280);
+
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
+                MT_WPDMA0_MAX_CNT_MASK, 4);
+       mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
+                MT_WPDMA0_BASE_PTR_MASK,  0x2c0);
+}
+
+static void mt7921u_wfdma_init(struct mt7921_dev *dev)
+{
+       mt7921u_dma_prefetch(dev);
+
+       mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_OMIT_RX_INFO);
+       mt76_set(dev, MT_UWFDMA0_GLO_CFG,
+                MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+                MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
+                MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL |
+                MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+                MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+       /* disable dmashdl */
+       mt76_clear(dev, MT_UWFDMA0_GLO_CFG_EXT0,
+                  MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
+       mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+
+       mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
+}
+
+static int mt7921u_dma_rx_evt_ep4(struct mt7921_dev *dev)
+{
+       if (!mt76_poll(dev, MT_UWFDMA0_GLO_CFG,
+                      MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
+               return -ETIMEDOUT;
+
+       mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+       mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+                MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN);
+       mt76_set(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+       return 0;
+}
+
+static void mt7921u_epctl_rst_opt(struct mt7921_dev *dev, bool reset)
+{
+       u32 val;
+
+       /* usb endpoint reset opt
+        * bits[4,9]: out blk ep 4-9
+        * bits[20,21]: in blk ep 4-5
+        * bits[22]: in int ep 6
+        */
+       val = mt7921u_uhw_rr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT);
+       if (reset)
+               val |= GENMASK(9, 4) | GENMASK(22, 20);
+       else
+               val &= ~(GENMASK(9, 4) | GENMASK(22, 20));
+       mt7921u_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val);
+}
+
+int mt7921u_dma_init(struct mt7921_dev *dev)
+{
+       int err;
+
+       mt7921u_wfdma_init(dev);
+
+       mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
+
+       mt76_set(dev, MT_UDMA_WLCFG_0,
+                MT_WL_RX_EN | MT_WL_TX_EN |
+                MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN);
+       mt76_clear(dev, MT_UDMA_WLCFG_0,
+                  MT_WL_RX_AGG_TO | MT_WL_RX_AGG_LMT);
+       mt76_clear(dev, MT_UDMA_WLCFG_1, MT_WL_RX_AGG_PKT_LMT);
+
+       err = mt7921u_dma_rx_evt_ep4(dev);
+       if (err)
+               return err;
+
+       mt7921u_epctl_rst_opt(dev, false);
+
+       return 0;
+}
+
+int mt7921u_wfsys_reset(struct mt7921_dev *dev)
+{
+       u32 val;
+       int i;
+
+       mt7921u_epctl_rst_opt(dev, false);
+
+       val = mt7921u_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
+       val |= MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+       mt7921u_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
+
+       usleep_range(10, 20);
+
+       val = mt7921u_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
+       val &= ~MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+       mt7921u_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
+
+       mt7921u_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0);
+       for (i = 0; i < MT7921_WFSYS_INIT_RETRY_COUNT; i++) {
+               val = mt7921u_uhw_rr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS);
+               if (val & MT_UDMA_CONN_WFSYS_INIT_DONE)
+                       break;
+
+               msleep(100);
+       }
+
+       if (i == MT7921_WFSYS_INIT_RETRY_COUNT)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+int mt7921u_init_reset(struct mt7921_dev *dev)
+{
+       set_bit(MT76_RESET, &dev->mphy.state);
+
+       wake_up(&dev->mt76.mcu.wait);
+       mt7921_mcu_exit(dev);
+
+       mt76u_stop_rx(&dev->mt76);
+       mt76u_stop_tx(&dev->mt76);
+
+       mt7921u_wfsys_reset(dev);
+
+       clear_bit(MT76_RESET, &dev->mphy.state);
+
+       return mt76u_resume_rx(&dev->mt76);
+}
+
+int mt7921u_mac_reset(struct mt7921_dev *dev)
+{
+       int err;
+
+       mt76_txq_schedule_all(&dev->mphy);
+       mt76_worker_disable(&dev->mt76.tx_worker);
+
+       set_bit(MT76_RESET, &dev->mphy.state);
+       set_bit(MT76_MCU_RESET, &dev->mphy.state);
+
+       wake_up(&dev->mt76.mcu.wait);
+       mt7921_mcu_exit(dev);
+
+       mt76u_stop_rx(&dev->mt76);
+       mt76u_stop_tx(&dev->mt76);
+
+       mt7921u_wfsys_reset(dev);
+
+       clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+       err = mt76u_resume_rx(&dev->mt76);
+       if (err)
+               goto out;
+
+       err = mt7921u_mcu_power_on(dev);
+       if (err)
+               goto out;
+
+       err = mt7921u_dma_init(dev);
+       if (err)
+               goto out;
+
+       mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+       mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+       err = mt7921_run_firmware(dev);
+       if (err)
+               goto out;
+
+       mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+       err = mt7921_mcu_set_eeprom(dev);
+       if (err)
+               goto out;
+
+       err = mt7921_mac_init(dev);
+       if (err)
+               goto out;
+
+       err = __mt7921_start(&dev->phy);
+out:
+       clear_bit(MT76_RESET, &dev->mphy.state);
+
+       mt76_worker_enable(&dev->mt76.tx_worker);
+
+       return err;
+}
index 9fcf507..a2601aa 100644 (file)
@@ -118,7 +118,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
                __le32 *rxd = (__le32 *)buf;
 
                /* parse rxd to get the actual packet length */
-               len = FIELD_GET(GENMASK(15, 0), le32_to_cpu(rxd[0]));
+               len = le32_get_bits(rxd[0], GENMASK(15, 0));
                e->skb = mt76s_build_rx_skb(buf, len, round_up(len + 4, 4));
                if (!e->skb)
                        break;
index 1a01ad7..382b456 100644 (file)
@@ -409,7 +409,6 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        struct mt76_dev *dev = phy->dev;
        struct mt76_testmode_data *td = &phy->test;
        struct nlattr *tb[NUM_MT76_TM_ATTRS];
-       bool ext_phy = phy != &dev->phy;
        u32 state;
        int err;
        int i;
@@ -447,8 +446,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
            mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) ||
            mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_STBC], &td->tx_rate_stbc, 0, 1) ||
            mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_LTF], &td->tx_ltf, 0, 2) ||
-           mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask,
-                          1 << (ext_phy * 2), phy->antenna_mask << (ext_phy * 2)) ||
+           mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA],
+                          &td->tx_antenna_mask, 0, 0xff) ||
            mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_SPE_IDX], &td->tx_spe_idx, 0, 27) ||
            mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
                           &td->tx_duty_cycle, 0, 99) ||
index 0a7006c..a85e192 100644 (file)
@@ -15,9 +15,8 @@ static bool disable_usb_sg;
 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
 
-static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
-                                 u8 req_type, u16 val, u16 offset,
-                                 void *buf, size_t len)
+int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
+                          u16 val, u16 offset, void *buf, size_t len)
 {
        struct usb_interface *uintf = to_usb_interface(dev->dev);
        struct usb_device *udev = interface_to_usbdev(uintf);
@@ -45,6 +44,7 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
                req, offset, ret);
        return ret;
 }
+EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
 
 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
                         u8 req_type, u16 val, u16 offset,
@@ -62,22 +62,21 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
 }
 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
 
-static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
+u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
 {
        struct mt76_usb *usb = &dev->usb;
        u32 data = ~0;
        int ret;
 
-       ret = __mt76u_vendor_request(dev, req,
-                                    USB_DIR_IN | USB_TYPE_VENDOR,
-                                    addr >> 16, addr, usb->data,
-                                    sizeof(__le32));
+       ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
+                                    addr, usb->data, sizeof(__le32));
        if (ret == sizeof(__le32))
                data = get_unaligned_le32(usb->data);
        trace_usb_reg_rr(dev, addr, data);
 
        return data;
 }
+EXPORT_SYMBOL_GPL(___mt76u_rr);
 
 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
 {
@@ -95,7 +94,8 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
                break;
        }
 
-       return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
+       return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
+                          addr & ~MT_VEND_TYPE_MASK);
 }
 
 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
@@ -109,29 +109,17 @@ static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
        return ret;
 }
 
-static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
-{
-       u32 ret;
-
-       mutex_lock(&dev->usb.usb_ctrl_mtx);
-       ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
-       mutex_unlock(&dev->usb.usb_ctrl_mtx);
-
-       return ret;
-}
-
-static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
-                       u32 addr, u32 val)
+void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
+                u32 addr, u32 val)
 {
        struct mt76_usb *usb = &dev->usb;
 
        put_unaligned_le32(val, usb->data);
-       __mt76u_vendor_request(dev, req,
-                              USB_DIR_OUT | USB_TYPE_VENDOR,
-                              addr >> 16, addr, usb->data,
-                              sizeof(__le32));
+       __mt76u_vendor_request(dev, req, req_type, addr >> 16,
+                              addr, usb->data, sizeof(__le32));
        trace_usb_reg_wr(dev, addr, val);
 }
+EXPORT_SYMBOL_GPL(___mt76u_wr);
 
 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 {
@@ -145,7 +133,8 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
                req = MT_VEND_MULTI_WRITE;
                break;
        }
-       ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
+       ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
+                   addr & ~MT_VEND_TYPE_MASK, val);
 }
 
 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@@ -155,13 +144,6 @@ static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
        mutex_unlock(&dev->usb.usb_ctrl_mtx);
 }
 
-static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
-{
-       mutex_lock(&dev->usb.usb_ctrl_mtx);
-       ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
-       mutex_unlock(&dev->usb.usb_ctrl_mtx);
-}
-
 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
                     u32 mask, u32 val)
 {
@@ -173,17 +155,6 @@ static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
        return val;
 }
 
-static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
-                        u32 mask, u32 val)
-{
-       mutex_lock(&dev->usb.usb_ctrl_mtx);
-       val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
-       ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
-       mutex_unlock(&dev->usb.usb_ctrl_mtx);
-
-       return val;
-}
-
 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
                       const void *data, int len)
 {
@@ -216,33 +187,8 @@ static void mt76u_copy(struct mt76_dev *dev, u32 offset,
        mutex_unlock(&usb->usb_ctrl_mtx);
 }
 
-static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
-                          const void *data, int len)
-{
-       struct mt76_usb *usb = &dev->usb;
-       int ret, i = 0, batch_len;
-       const u8 *val = data;
-
-       len = round_up(len, 4);
-       mutex_lock(&usb->usb_ctrl_mtx);
-       while (i < len) {
-               batch_len = min_t(int, usb->data_len, len - i);
-               memcpy(usb->data, val + i, batch_len);
-               ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
-                                            USB_DIR_OUT | USB_TYPE_VENDOR,
-                                            (offset + i) >> 16, offset + i,
-                                            usb->data, batch_len);
-               if (ret < 0)
-                       break;
-
-               i += batch_len;
-       }
-       mutex_unlock(&usb->usb_ctrl_mtx);
-}
-
-static void
-mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
-                   void *data, int len)
+void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
+                    void *data, int len)
 {
        struct mt76_usb *usb = &dev->usb;
        int i = 0, batch_len, ret;
@@ -264,6 +210,7 @@ mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
        }
        mutex_unlock(&usb->usb_ctrl_mtx);
 }
+EXPORT_SYMBOL_GPL(mt76u_read_copy);
 
 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
                     const u16 offset, const u32 val)
@@ -1112,24 +1059,13 @@ static const struct mt76_queue_ops usb_queue_ops = {
        .kick = mt76u_tx_kick,
 };
 
-int mt76u_init(struct mt76_dev *dev,
-              struct usb_interface *intf, bool ext)
+int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+                struct mt76_bus_ops *ops)
 {
-       static struct mt76_bus_ops mt76u_ops = {
-               .read_copy = mt76u_read_copy_ext,
-               .wr_rp = mt76u_wr_rp,
-               .rd_rp = mt76u_rd_rp,
-               .type = MT76_BUS_USB,
-       };
        struct usb_device *udev = interface_to_usbdev(intf);
        struct mt76_usb *usb = &dev->usb;
        int err;
 
-       mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
-       mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
-       mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
-       mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
-
        INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
 
        usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
@@ -1141,7 +1077,7 @@ int mt76u_init(struct mt76_dev *dev,
                return -ENOMEM;
 
        mutex_init(&usb->usb_ctrl_mtx);
-       dev->bus = &mt76u_ops;
+       dev->bus = ops;
        dev->queue_ops = &usb_queue_ops;
 
        dev_set_drvdata(&udev->dev, dev);
@@ -1167,6 +1103,23 @@ int mt76u_init(struct mt76_dev *dev,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(__mt76u_init);
+
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
+{
+       static struct mt76_bus_ops bus_ops = {
+               .rr = mt76u_rr,
+               .wr = mt76u_wr,
+               .rmw = mt76u_rmw,
+               .read_copy = mt76u_read_copy,
+               .write_copy = mt76u_copy,
+               .wr_rp = mt76u_wr_rp,
+               .rd_rp = mt76u_rd_rp,
+               .type = MT76_BUS_USB,
+       };
+
+       return __mt76u_init(dev, intf, &bus_ops);
+}
 EXPORT_SYMBOL_GPL(mt76u_init);
 
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
index 04735da..da54e51 100644 (file)
@@ -396,36 +396,6 @@ void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
        }
 }
 
-static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
-{
-       u32 u4b_tmp;
-       u8 delay = 5;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
-       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
-       rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
-       u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
-       while (u4b_tmp != 0 && delay > 0) {
-               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
-               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
-               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
-               u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
-               delay--;
-       }
-       if (delay == 0) {
-               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
-               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
-               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
-               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
-               rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
-                       "Switch RF timeout !!!\n");
-               return;
-       }
-       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
-       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
-}
-
 static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                            enum rf_pwrstate rfpwr_state)
 {
@@ -519,7 +489,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                jiffies_to_msecs(jiffies -
                                                 ppsc->last_awake_jiffies));
                        ppsc->last_sleep_jiffies = jiffies;
-                       _rtl92ce_phy_set_rf_sleep(hw);
+                       _rtl92c_phy_set_rf_sleep(hw);
                        break;
                }
        default:
index b53daf1..876c14d 100644 (file)
@@ -334,6 +334,7 @@ static const struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
        {RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
        {RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
+       {RTL_USB_DEVICE(0x0846, 0x9042, rtl92cu_hal_cfg)}, /*On Netwrks N150MA*/
 
        /****** 8188CUS Slim Combo ********/
        {RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
index f6bff0e..f3fe167 100644 (file)
@@ -872,7 +872,7 @@ static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
        else
                falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail;
 
-       /*reset OFDM FA coutner*/
+       /*reset OFDM FA counter*/
        rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1);
        rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0);
        /* reset CCK FA counter*/
@@ -1464,7 +1464,7 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
        const u8 *delta_swing_table_idx_tup_b;
        const u8 *delta_swing_table_idx_tdown_b;
 
-       /*2. Initilization ( 7 steps in total )*/
+       /*2. Initialization ( 7 steps in total )*/
        rtl8812ae_get_delta_swing_table(hw,
                &delta_swing_table_idx_tup_a,
                &delta_swing_table_idx_tdown_a,
@@ -2502,7 +2502,7 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
        rtlpriv->dm.dbginfo.num_non_be_pkt = 0;
 
        /*===============================
-        * list paramter for different platform
+        * list parameter for different platform
         *===============================
         */
        pb_is_cur_rdl_state = &rtlpriv->dm.is_cur_rdlstate;
index 2551e22..cac053f 100644 (file)
@@ -211,6 +211,10 @@ static void rtw_coex_wl_ccklock_detect(struct rtw_dev *rtwdev)
 
        bool is_cck_lock_rate = false;
 
+       if (coex_stat->wl_coex_mode != COEX_WLINK_2G1PORT &&
+           coex_stat->wl_coex_mode != COEX_WLINK_2GFREE)
+               return;
+
        if (coex_dm->bt_status == COEX_BTSTATUS_INQ_PAGE ||
            coex_stat->bt_setup_link) {
                coex_stat->wl_cck_lock = false;
@@ -460,6 +464,29 @@ static void rtw_coex_gnt_workaround(struct rtw_dev *rtwdev, bool force, u8 mode)
        rtw_coex_set_gnt_fix(rtwdev);
 }
 
+static void rtw_coex_monitor_bt_ctr(struct rtw_dev *rtwdev)
+{
+       struct rtw_coex *coex = &rtwdev->coex;
+       struct rtw_coex_stat *coex_stat = &coex->stat;
+       u32 tmp;
+
+       tmp = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS);
+       coex_stat->hi_pri_tx = FIELD_GET(MASKLWORD, tmp);
+       coex_stat->hi_pri_rx = FIELD_GET(MASKHWORD, tmp);
+
+       tmp = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS_1);
+       coex_stat->lo_pri_tx = FIELD_GET(MASKLWORD, tmp);
+       coex_stat->lo_pri_rx = FIELD_GET(MASKHWORD, tmp);
+
+       rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL,
+                  BIT_R_GRANTALL_WLMASK | BIT_STATIS_BT_EN);
+
+       rtw_dbg(rtwdev, RTW_DBG_COEX,
+               "[BTCoex], Hi-Pri Rx/Tx: %d/%d, Lo-Pri Rx/Tx: %d/%d\n",
+               coex_stat->hi_pri_rx, coex_stat->hi_pri_tx,
+               coex_stat->lo_pri_rx, coex_stat->lo_pri_tx);
+}
+
 static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
 {
        struct rtw_chip_info *chip = rtwdev->chip;
@@ -780,7 +807,9 @@ static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
 static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
 {
        struct rtw_chip_info *chip = rtwdev->chip;
+       struct rtw_efuse *efuse = &rtwdev->efuse;
        struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm;
+       struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
        u8 link = 0;
        u8 center_chan = 0;
        u8 bw;
@@ -791,7 +820,9 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
        if (type != COEX_MEDIA_DISCONNECT)
                center_chan = rtwdev->hal.current_channel;
 
-       if (center_chan == 0) {
+       if (center_chan == 0 ||
+           (efuse->share_ant && center_chan <= 14 &&
+            coex_stat->wl_coex_mode != COEX_WLINK_2GFREE)) {
                link = 0;
                center_chan = 0;
                bw = 0;
@@ -930,6 +961,23 @@ static void rtw_coex_set_gnt_wl(struct rtw_dev *rtwdev, u8 state)
        rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x0300, state);
 }
 
+static void rtw_coex_mimo_ps(struct rtw_dev *rtwdev, bool force, bool state)
+{
+       struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+       if (!force && state == coex_stat->wl_mimo_ps)
+               return;
+
+       coex_stat->wl_mimo_ps = state;
+
+       rtw_set_txrx_1ss(rtwdev, state);
+
+       rtw_coex_update_wl_ch_info(rtwdev, (u8)coex_stat->wl_connected);
+
+       rtw_dbg(rtwdev, RTW_DBG_COEX,
+               "[BTCoex], %s(): state = %d\n", __func__, state);
+}
+
 static void rtw_btc_wltoggle_table_a(struct rtw_dev *rtwdev, bool force,
                                     u8 table_case)
 {
@@ -1106,7 +1154,8 @@ static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
 
                ps_type = COEX_PS_WIFI_NATIVE;
                rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0);
-       } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) {
+       } else if ((byte1 & BIT(4) && !(byte1 & BIT(5))) ||
+                  coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
                rtw_dbg(rtwdev, RTW_DBG_COEX,
                        "[BTCoex], %s(): Force LPS (byte1 = 0x%x)\n", __func__,
                        byte1);
@@ -1802,6 +1851,54 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
        rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
 }
 
+static void rtw_coex_action_bt_game_hid(struct rtw_dev *rtwdev)
+{
+       struct rtw_coex *coex = &rtwdev->coex;
+       struct rtw_coex_stat *coex_stat = &coex->stat;
+       struct rtw_efuse *efuse = &rtwdev->efuse;
+       struct rtw_coex_dm *coex_dm = &coex->dm;
+       struct rtw_chip_info *chip = rtwdev->chip;
+       u8 table_case, tdma_case;
+
+       rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
+       rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+
+       if (efuse->share_ant) {
+               coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+               if (coex_stat->bt_whck_test)
+                       table_case = 2;
+               else if (coex_stat->wl_linkscan_proc || coex_stat->bt_hid_exist)
+                       table_case = 33;
+               else if (coex_stat->bt_setup_link || coex_stat->bt_inq_page)
+                       table_case = 0;
+               else if (coex_stat->bt_a2dp_exist)
+                       table_case = 34;
+               else
+                       table_case = 33;
+
+               tdma_case = 0;
+       } else {
+               if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]))
+                       tdma_case = 112;
+               else
+                       tdma_case = 113;
+
+               table_case = 121;
+       }
+
+       if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+               if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX)
+                       rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[6]);
+               else
+                       rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[5]);
+       } else {
+               rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+       }
+
+       rtw_coex_table(rtwdev, false, table_case);
+       rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
 static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
 {
        struct rtw_coex *coex = &rtwdev->coex;
@@ -1816,13 +1913,8 @@ static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
 
        if (efuse->share_ant) {
                /* Shared-Ant */
-               if (coex_stat->bt_multi_link) {
-                       table_case = 10;
-                       tdma_case = 17;
-               } else {
-                       table_case = 10;
-                       tdma_case = 5;
-               }
+               table_case = 10;
+               tdma_case = 5;
        } else {
                /* Non-Shared-Ant */
                if (coex_stat->bt_multi_link) {
@@ -2224,8 +2316,10 @@ static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
 
 static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
 {
+       struct rtw_coex *coex = &rtwdev->coex;
        struct rtw_efuse *efuse = &rtwdev->efuse;
        struct rtw_chip_info *chip = rtwdev->chip;
+       struct rtw_coex_stat *coex_stat = &coex->stat;
        u8 table_case, tdma_case;
 
        rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2235,6 +2329,9 @@ static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
 
        rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false);
 
+       if (coex_stat->bt_game_hid_exist && coex_stat->wl_linkscan_proc)
+               coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+
        if (efuse->share_ant) {
                /* Shared-Ant */
                table_case = 0;
@@ -2278,6 +2375,7 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
        struct rtw_coex *coex = &rtwdev->coex;
        struct rtw_efuse *efuse = &rtwdev->efuse;
        struct rtw_chip_info *chip = rtwdev->chip;
+       struct rtw_coex_stat *coex_stat = &coex->stat;
        u8 table_case, tdma_case;
 
        if (coex->under_5g)
@@ -2286,7 +2384,6 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
        rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
 
        rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
-       rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
 
        if (efuse->share_ant) {
                /* Shared-Ant */
@@ -2298,6 +2395,16 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
                tdma_case = 100;
        }
 
+       if (coex_stat->bt_game_hid_exist) {
+               coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+               if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX)
+                       rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[6]);
+               else
+                       rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[5]);
+       } else {
+               rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+       }
+
        rtw_coex_table(rtwdev, false, table_case);
        rtw_coex_tdma(rtwdev, false, tdma_case);
 }
@@ -2422,6 +2529,7 @@ static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev)
 static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
 {
        struct rtw_coex *coex = &rtwdev->coex;
+       struct rtw_chip_info *chip = rtwdev->chip;
        struct rtw_coex_dm *coex_dm = &coex->dm;
        struct rtw_coex_stat *coex_stat = &coex->stat;
        bool rf4ce_en = false;
@@ -2494,6 +2602,11 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
                goto exit;
        }
 
+       if (coex_stat->bt_game_hid_exist && coex_stat->wl_connected) {
+               rtw_coex_action_bt_game_hid(rtwdev);
+               goto exit;
+       }
+
        if (coex_stat->bt_whck_test) {
                rtw_coex_action_bt_whql_test(rtwdev);
                goto exit;
@@ -2530,6 +2643,18 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
        }
 
 exit:
+
+       if (chip->wl_mimo_ps_support) {
+               if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+                       if (coex_dm->reason == COEX_RSN_2GMEDIA)
+                               rtw_coex_mimo_ps(rtwdev, true, true);
+                       else
+                               rtw_coex_mimo_ps(rtwdev, false, true);
+               } else {
+                       rtw_coex_mimo_ps(rtwdev, false, false);
+               }
+       }
+
        rtw_coex_gnt_workaround(rtwdev, false, coex_stat->wl_coex_mode);
        rtw_coex_limited_wl(rtwdev);
 }
@@ -3139,6 +3264,135 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
        rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO);
 }
 
+#define COEX_BT_HIDINFO_MTK    0x46
+static const u8 coex_bt_hidinfo_ps[] = {0x57, 0x69, 0x72};
+static const u8 coex_bt_hidinfo_xb[] = {0x58, 0x62, 0x6f};
+
+void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
+{
+       struct rtw_coex *coex = &rtwdev->coex;
+       struct rtw_chip_info *chip = rtwdev->chip;
+       struct rtw_coex_stat *coex_stat = &coex->stat;
+       struct rtw_coex_hid *hidinfo;
+       struct rtw_coex_hid_info_a *hida;
+       struct rtw_coex_hid_handle_list *hl, *bhl;
+       u8 sub_id = buf[2], gamehid_cnt = 0, handle, i;
+       bool cur_game_hid_exist, complete;
+
+       if (!chip->wl_mimo_ps_support &&
+           (sub_id == COEX_BT_HIDINFO_LIST || sub_id == COEX_BT_HIDINFO_A))
+               return;
+
+       rtw_dbg(rtwdev, RTW_DBG_COEX,
+               "[BTCoex], HID info notify, sub_id = 0x%x\n", sub_id);
+
+       switch (sub_id) {
+       case COEX_BT_HIDINFO_LIST:
+               hl = &coex_stat->hid_handle_list;
+               bhl = (struct rtw_coex_hid_handle_list *)buf;
+               if (!memcmp(hl, bhl, sizeof(*hl)))
+                       return;
+               coex_stat->hid_handle_list = *bhl;
+               memset(&coex_stat->hid_info, 0, sizeof(coex_stat->hid_info));
+               for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+                       hidinfo = &coex_stat->hid_info[i];
+                       if (hl->handle[i] != COEX_BT_HIDINFO_NOTCON &&
+                           hl->handle[i] != 0)
+                               hidinfo->hid_handle = hl->handle[i];
+               }
+               break;
+       case COEX_BT_HIDINFO_A:
+               hida = (struct rtw_coex_hid_info_a *)buf;
+               handle = hida->handle;
+               for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+                       hidinfo = &coex_stat->hid_info[i];
+                       if (hidinfo->hid_handle == handle) {
+                               hidinfo->hid_vendor = hida->vendor;
+                               memcpy(hidinfo->hid_name, hida->name,
+                                      sizeof(hidinfo->hid_name));
+                               hidinfo->hid_info_completed = true;
+                               break;
+                       }
+               }
+               break;
+       }
+       for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+               hidinfo = &coex_stat->hid_info[i];
+               complete = hidinfo->hid_info_completed;
+               handle = hidinfo->hid_handle;
+               if (!complete || handle == COEX_BT_HIDINFO_NOTCON ||
+                   handle == 0 || handle >= COEX_BT_BLE_HANDLE_THRS) {
+                       hidinfo->is_game_hid = false;
+                       continue;
+               }
+
+               if (hidinfo->hid_vendor == COEX_BT_HIDINFO_MTK) {
+                       if ((memcmp(hidinfo->hid_name,
+                                   coex_bt_hidinfo_ps,
+                                   COEX_BT_HIDINFO_NAME)) == 0)
+                               hidinfo->is_game_hid = true;
+                       else if ((memcmp(hidinfo->hid_name,
+                                        coex_bt_hidinfo_xb,
+                                        COEX_BT_HIDINFO_NAME)) == 0)
+                               hidinfo->is_game_hid = true;
+                       else
+                               hidinfo->is_game_hid = false;
+               } else {
+                       hidinfo->is_game_hid = false;
+               }
+               if (hidinfo->is_game_hid)
+                       gamehid_cnt++;
+       }
+
+       if (gamehid_cnt > 0)
+               cur_game_hid_exist = true;
+       else
+               cur_game_hid_exist = false;
+
+       if (cur_game_hid_exist != coex_stat->bt_game_hid_exist) {
+               coex_stat->bt_game_hid_exist = cur_game_hid_exist;
+               rtw_dbg(rtwdev, RTW_DBG_COEX,
+                       "[BTCoex], HID info changed!bt_game_hid_exist = %d!\n",
+                       coex_stat->bt_game_hid_exist);
+               rtw_coex_run_coex(rtwdev, COEX_RSN_BTSTATUS);
+       }
+}
+
+void rtw_coex_query_bt_hid_list(struct rtw_dev *rtwdev)
+{
+       struct rtw_coex *coex = &rtwdev->coex;
+       struct rtw_chip_info *chip = rtwdev->chip;
+       struct rtw_coex_stat *coex_stat = &coex->stat;
+       struct rtw_coex_hid *hidinfo;
+       u8 i, handle;
+       bool complete;
+
+       if (!chip->wl_mimo_ps_support || coex_stat->wl_under_ips ||
+           (coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl))
+               return;
+
+       if (!coex_stat->bt_hid_exist &&
+           !((coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION) &&
+             (coex_stat->hi_pri_tx + coex_stat->hi_pri_rx >
+              COEX_BT_GAMEHID_CNT)))
+               return;
+
+       rtw_fw_coex_query_hid_info(rtwdev, COEX_BT_HIDINFO_LIST, 0);
+
+       for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+               hidinfo = &coex_stat->hid_info[i];
+               complete = hidinfo->hid_info_completed;
+               handle = hidinfo->hid_handle;
+               if (handle == 0 || handle == COEX_BT_HIDINFO_NOTCON ||
+                   handle >= COEX_BT_BLE_HANDLE_THRS || complete)
+                       continue;
+
+               rtw_fw_coex_query_hid_info(rtwdev,
+                                          COEX_BT_HIDINFO_A,
+                                          handle);
+       }
+}
+
 void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
 {
        struct rtw_coex *coex = &rtwdev->coex;
@@ -3175,6 +3429,17 @@ void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type)
        rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
 }
 
+void rtw_coex_wl_status_check(struct rtw_dev *rtwdev)
+{
+       struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+       if ((coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl) ||
+           coex_stat->wl_under_ips)
+               return;
+
+       rtw_coex_monitor_bt_ctr(rtwdev);
+}
+
 void rtw_coex_bt_relink_work(struct work_struct *work)
 {
        struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
@@ -3637,6 +3902,7 @@ static const char *rtw_coex_get_wl_coex_mode(u8 coex_wl_link_mode)
        switch (coex_wl_link_mode) {
        case_WLINK(2G1PORT);
        case_WLINK(5G);
+       case_WLINK(2GFREE);
        default:
                return "Unknown";
        }
@@ -3658,7 +3924,6 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
        u16 score_board_WB, score_board_BW;
        u32 wl_reg_6c0, wl_reg_6c4, wl_reg_6c8, wl_reg_778, wl_reg_6cc;
        u32 lte_coex, bt_coex;
-       u32 bt_hi_pri, bt_lo_pri;
        int i;
 
        score_board_BW = rtw_coex_read_scbd(rtwdev);
@@ -3669,17 +3934,6 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
        wl_reg_6cc = rtw_read32(rtwdev, REG_BT_COEX_TABLE_H);
        wl_reg_778 = rtw_read8(rtwdev, REG_BT_STAT_CTRL);
 
-       bt_hi_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS);
-       bt_lo_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS_1);
-       rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL,
-                  BIT_R_GRANTALL_WLMASK | BIT_STATIS_BT_EN);
-
-       coex_stat->hi_pri_tx = FIELD_GET(MASKLWORD, bt_hi_pri);
-       coex_stat->hi_pri_rx = FIELD_GET(MASKHWORD, bt_hi_pri);
-
-       coex_stat->lo_pri_tx = FIELD_GET(MASKLWORD, bt_lo_pri);
-       coex_stat->lo_pri_rx = FIELD_GET(MASKHWORD, bt_lo_pri);
-
        sys_lte = rtw_read8(rtwdev, 0x73);
        lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38);
        bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54);
index fc61a0c..07fa7aa 100644 (file)
@@ -11,6 +11,7 @@
 
 #define COEX_MIN_DELAY         10 /* delay unit in ms */
 #define COEX_RFK_TIMEOUT       600 /* RFK timeout in ms */
+#define COEX_BT_GAMEHID_CNT    800
 
 #define COEX_RF_OFF    0x0
 #define COEX_RF_ON     0x1
@@ -172,6 +173,7 @@ enum coex_bt_profile {
 enum coex_wl_link_mode {
        COEX_WLINK_2G1PORT      = 0x0,
        COEX_WLINK_5G           = 0x3,
+       COEX_WLINK_2GFREE       = 0x7,
        COEX_WLINK_MAX
 };
 
@@ -401,9 +403,12 @@ void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type);
 void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type);
 void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type);
 void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
+void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
 void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
 void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type);
 void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type);
+void rtw_coex_wl_status_check(struct rtw_dev *rtwdev);
+void rtw_coex_query_bt_hid_list(struct rtw_dev *rtwdev);
 void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m);
 
 static inline bool rtw_coex_disabled(struct rtw_dev *rtwdev)
index e429428..1a52ff5 100644 (file)
@@ -390,7 +390,7 @@ static ssize_t rtw_debugfs_set_h2c(struct file *filp,
                     &param[0], &param[1], &param[2], &param[3],
                     &param[4], &param[5], &param[6], &param[7]);
        if (num != 8) {
-               rtw_info(rtwdev, "invalid H2C command format for debug\n");
+               rtw_warn(rtwdev, "invalid H2C command format for debug\n");
                return -EINVAL;
        }
 
@@ -715,8 +715,10 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
        seq_printf(m, "Current CH(fc) = %u\n", rtwdev->hal.current_channel);
        seq_printf(m, "Current BW = %u\n", rtwdev->hal.current_band_width);
        seq_printf(m, "Current IGI = 0x%x\n", dm_info->igi_history[0]);
-       seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n\n",
+       seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n",
                   stats->tx_throughput, stats->rx_throughput);
+       seq_printf(m, "1SS for TX and RX = %c\n\n", rtwdev->hal.txrx_1ss ?
+                  'Y' : 'N');
 
        seq_puts(m, "==========[Tx Phy Info]========\n");
        seq_puts(m, "[Tx Rate] = ");
index 61f8369..066792d 100644 (file)
@@ -23,6 +23,7 @@ enum rtw_debug_mask {
        RTW_DBG_PATH_DIV        = 0x00004000,
        RTW_DBG_ADAPTIVITY      = 0x00008000,
        RTW_DBG_HW_SCAN         = 0x00010000,
+       RTW_DBG_STATE           = 0x00020000,
 
        RTW_DBG_ALL             = 0xffffffff
 };
index 4c8e5ea..aa2aeb5 100644 (file)
@@ -233,6 +233,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
        case C2H_BT_INFO:
                rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
                break;
+       case C2H_BT_HID_INFO:
+               rtw_coex_bt_hid_info_notify(rtwdev, c2h->payload, len);
+               break;
        case C2H_WLAN_INFO:
                rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
                break;
@@ -538,6 +541,18 @@ void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
        rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
 }
 
+void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data)
+{
+       u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+       SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_HID_INFO);
+
+       SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, sub_id);
+       SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, data);
+
+       rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
 void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
 {
        u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -2131,7 +2146,7 @@ void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
        rtw_hw_scan_complete(rtwdev, vif, aborted);
 
        if (aborted)
-               rtw_info(rtwdev, "HW scan aborted with code: %d\n", rc);
+               rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
 }
 
 void rtw_store_op_chan(struct rtw_dev *rtwdev)
index 654c3c2..b59d2cb 100644 (file)
@@ -47,6 +47,7 @@ enum rtw_c2h_cmd_id {
        C2H_CCX_TX_RPT = 0x03,
        C2H_BT_INFO = 0x09,
        C2H_BT_MP_INFO = 0x0b,
+       C2H_BT_HID_INFO = 0x45,
        C2H_RA_RPT = 0x0c,
        C2H_HW_FEATURE_REPORT = 0x19,
        C2H_WLAN_INFO = 0x27,
@@ -529,6 +530,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
 #define H2C_CMD_QUERY_BT_MP_INFO       0x67
 #define H2C_CMD_BT_WIFI_CONTROL                0x69
 #define H2C_CMD_WIFI_CALIBRATION       0x6d
+#define H2C_CMD_QUERY_BT_HID_INFO      0x73
 
 #define H2C_CMD_KEEP_ALIVE             0x03
 #define H2C_CMD_DISCONNECT_DECISION    0x04
@@ -681,6 +683,11 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
 #define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value)                              \
        le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
 
+#define SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, value)                          \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, value)                          \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
 #define SET_KEEP_ALIVE_ENABLE(h2c_pkt, value)                                 \
        le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
 #define SET_KEEP_ALIVE_ADOPT(h2c_pkt, value)                                  \
@@ -780,6 +787,8 @@ void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl);
 void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable);
 void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
                           u8 para1, u8 para2, u8 para3, u8 para4, u8 para5);
+void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data);
+
 void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data);
 void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
 void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
index 647d266..5cdc54c 100644 (file)
@@ -208,7 +208,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
 
        mutex_unlock(&rtwdev->mutex);
 
-       rtw_info(rtwdev, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+       rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
        return 0;
 }
 
@@ -219,7 +219,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
        struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
        u32 config = 0;
 
-       rtw_info(rtwdev, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+       rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
 
        mutex_lock(&rtwdev->mutex);
 
@@ -245,8 +245,8 @@ static int rtw_ops_change_interface(struct ieee80211_hw *hw,
 {
        struct rtw_dev *rtwdev = hw->priv;
 
-       rtw_info(rtwdev, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
-                vif->addr, vif->type, type, vif->p2p, p2p);
+       rtw_dbg(rtwdev, RTW_DBG_STATE, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
+               vif->addr, vif->type, type, vif->p2p, p2p);
 
        rtw_ops_remove_interface(hw, vif);
 
index 2757aa0..8b9899e 100644 (file)
@@ -207,6 +207,9 @@ static void rtw_watch_dog_work(struct work_struct *work)
        else
                clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
 
+       rtw_coex_wl_status_check(rtwdev);
+       rtw_coex_query_bt_hid_list(rtwdev);
+
        if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags))
                rtw_coex_wl_status_change_notify(rtwdev, 0);
 
@@ -314,8 +317,8 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
 
        rtwdev->sta_cnt++;
        rtwdev->beacon_loss = false;
-       rtw_info(rtwdev, "sta %pM joined with macid %d\n",
-                sta->addr, si->mac_id);
+       rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM joined with macid %d\n",
+               sta->addr, si->mac_id);
 
        return 0;
 }
@@ -336,8 +339,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
        kfree(si->mask);
 
        rtwdev->sta_cnt--;
-       rtw_info(rtwdev, "sta %pM with macid %d left\n",
-                sta->addr, si->mac_id);
+       rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM with macid %d left\n",
+               sta->addr, si->mac_id);
 }
 
 struct rtw_fwcd_hdr {
@@ -1135,7 +1138,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
                        ldpc_en = HT_LDPC_EN;
        }
 
-       if (efuse->hw_cap.nss == 1)
+       if (efuse->hw_cap.nss == 1 || rtwdev->hal.txrx_1ss)
                ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
 
        if (hal->current_band_type == RTW_BAND_5G) {
@@ -1570,6 +1573,37 @@ static void rtw_unset_supported_band(struct ieee80211_hw *hw,
        kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
 }
 
+static void rtw_vif_smps_iter(void *data, u8 *mac,
+                             struct ieee80211_vif *vif)
+{
+       struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+
+       if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+               return;
+
+       if (rtwdev->hal.txrx_1ss)
+               ieee80211_request_smps(vif, IEEE80211_SMPS_STATIC);
+       else
+               ieee80211_request_smps(vif, IEEE80211_SMPS_OFF);
+}
+
+void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool txrx_1ss)
+{
+       struct rtw_chip_info *chip = rtwdev->chip;
+       struct rtw_hal *hal = &rtwdev->hal;
+
+       if (!chip->ops->config_txrx_mode || rtwdev->hal.txrx_1ss == txrx_1ss)
+               return;
+
+       rtwdev->hal.txrx_1ss = txrx_1ss;
+       if (txrx_1ss)
+               chip->ops->config_txrx_mode(rtwdev, BB_PATH_A, BB_PATH_A, false);
+       else
+               chip->ops->config_txrx_mode(rtwdev, hal->antenna_tx,
+                                           hal->antenna_rx, false);
+       rtw_iterate_vifs_atomic(rtwdev, rtw_vif_smps_iter, rtwdev);
+}
+
 static void __update_firmware_feature(struct rtw_dev *rtwdev,
                                      struct rtw_fw_state *fw)
 {
index 36e1e40..17815af 100644 (file)
@@ -874,6 +874,8 @@ struct rtw_chip_ops {
                               enum rtw_bb_path tx_path_1ss,
                               enum rtw_bb_path tx_path_cck,
                               bool is_tx2_path);
+       void (*config_txrx_mode)(struct rtw_dev *rtwdev, u8 tx_path,
+                                u8 rx_path, bool is_tx2_path);
 
        /* for coex */
        void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -1240,6 +1242,7 @@ struct rtw_chip_info {
        bool scbd_support;
        bool new_scbd10_def; /* true: fix 2M(8822c) */
        bool ble_hid_profile_support;
+       bool wl_mimo_ps_support;
        u8 pstdma_type; /* 0: LPSoff, 1:LPSon */
        u8 bt_rssi_type;
        u8 ant_isolation;
@@ -1352,6 +1355,42 @@ struct rtw_coex_dm {
 #define COEX_BTINFO_LENGTH_MAX 10
 #define COEX_BTINFO_LENGTH     7
 
+#define COEX_BT_HIDINFO_LIST   0x0
+#define COEX_BT_HIDINFO_A      0x1
+#define COEX_BT_HIDINFO_NAME   3
+
+#define COEX_BT_HIDINFO_LENGTH 6
+#define COEX_BT_HIDINFO_HANDLE_NUM     4
+#define COEX_BT_HIDINFO_C2H_HANDLE     0
+#define COEX_BT_HIDINFO_C2H_VENDOR     1
+#define COEX_BT_BLE_HANDLE_THRS        0x10
+#define COEX_BT_HIDINFO_NOTCON 0xff
+
+struct rtw_coex_hid {
+       u8 hid_handle;
+       u8 hid_vendor;
+       u8 hid_name[COEX_BT_HIDINFO_NAME];
+       bool hid_info_completed;
+       bool is_game_hid;
+};
+
+struct rtw_coex_hid_handle_list {
+       u8 cmd_id;
+       u8 len;
+       u8 subid;
+       u8 handle_cnt;
+       u8 handle[COEX_BT_HIDINFO_HANDLE_NUM];
+} __packed;
+
+struct rtw_coex_hid_info_a {
+       u8 cmd_id;
+       u8 len;
+       u8 subid;
+       u8 handle;
+       u8 vendor;
+       u8 name[COEX_BT_HIDINFO_NAME];
+} __packed;
+
 struct rtw_coex_stat {
        bool bt_disabled;
        bool bt_disabled_pre;
@@ -1382,6 +1421,8 @@ struct rtw_coex_stat {
        bool bt_slave;
        bool bt_418_hid_exist;
        bool bt_ble_hid_exist;
+       bool bt_game_hid_exist;
+       bool bt_hid_handle_cnt;
        bool bt_mailbox_reply;
 
        bool wl_under_lps;
@@ -1402,6 +1443,7 @@ struct rtw_coex_stat {
        bool wl_connecting;
        bool wl_slot_toggle;
        bool wl_slot_toggle_change; /* if toggle to no-toggle */
+       bool wl_mimo_ps;
 
        u32 bt_supported_version;
        u32 bt_supported_feature;
@@ -1459,6 +1501,9 @@ struct rtw_coex_stat {
 
        u32 darfrc;
        u32 darfrch;
+
+       struct rtw_coex_hid hid_info[COEX_BT_HIDINFO_HANDLE_NUM];
+       struct rtw_coex_hid_handle_list hid_handle_list;
 };
 
 struct rtw_coex {
@@ -1867,6 +1912,7 @@ struct rtw_hal {
        u32 antenna_tx;
        u32 antenna_rx;
        u8 bfee_sts_cap;
+       bool txrx_1ss;
 
        /* protect tx power section */
        struct mutex tx_power_mutex;
@@ -2123,5 +2169,5 @@ void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
 int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
                u32 fwcd_item);
 int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size);
-
+void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool config_1ss);
 #endif
index 3fdbaf7..ad2b323 100644 (file)
@@ -2753,6 +2753,7 @@ struct rtw_chip_info rtw8723d_hw_spec = {
        .scbd_support = true,
        .new_scbd10_def = true,
        .ble_hid_profile_support = false,
+       .wl_mimo_ps_support = false,
        .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
        .bt_rssi_type = COEX_BTRSSI_RATIO,
        .ant_isolation = 15,
index b1f4afb..99eee12 100644 (file)
@@ -499,7 +499,7 @@ static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
        }
 
        if (lna_idx >= lna_gain_table_size) {
-               rtw_info(rtwdev, "incorrect lna index (%d)\n", lna_idx);
+               rtw_warn(rtwdev, "incorrect lna index (%d)\n", lna_idx);
                return -120;
        }
 
@@ -1925,6 +1925,7 @@ struct rtw_chip_info rtw8821c_hw_spec = {
        .scbd_support = true,
        .new_scbd10_def = false,
        .ble_hid_profile_support = false,
+       .wl_mimo_ps_support = false,
        .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
        .bt_rssi_type = COEX_BTRSSI_RATIO,
        .ant_isolation = 15,
index dd4fbb8..eee7bf0 100644 (file)
@@ -1012,12 +1012,12 @@ static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
                antenna_tx, antenna_rx);
 
        if (!rtw8822b_check_rf_path(antenna_tx)) {
-               rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
+               rtw_warn(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
                return -EINVAL;
        }
 
        if (!rtw8822b_check_rf_path(antenna_rx)) {
-               rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
+               rtw_warn(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
                return -EINVAL;
        }
 
@@ -2554,6 +2554,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
        .scbd_support = true,
        .new_scbd10_def = false,
        .ble_hid_profile_support = false,
+       .wl_mimo_ps_support = false,
        .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
        .bt_rssi_type = COEX_BTRSSI_RATIO,
        .ant_isolation = 15,
index 35c46e5..cd74607 100644 (file)
@@ -2798,7 +2798,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
        case BB_PATH_AB:
                break;
        default:
-               rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
+               rtw_warn(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
                return -EINVAL;
        }
 
@@ -2808,7 +2808,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
        case BB_PATH_AB:
                break;
        default:
-               rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
+               rtw_warn(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
                return -EINVAL;
        }
 
@@ -2996,19 +2996,34 @@ static void rtw8822c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
         * enable "DAC off if GNT_WL = 0" for non-shared-antenna
         * disable 0x1c30[22] = 0,
         * enable: 0x1c30[22] = 1, 0x1c38[12] = 0, 0x1c38[28] = 1
-        *
-        * disable WL-S1 BB chage RF mode if GNT_BT
+        */
+       if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+               rtw_write8_mask(rtwdev, REG_ANAPAR + 2,
+                               BIT_ANAPAR_BTPS >> 16, 0);
+       } else {
+               rtw_write8_mask(rtwdev, REG_ANAPAR + 2,
+                               BIT_ANAPAR_BTPS >> 16, 1);
+               rtw_write8_mask(rtwdev, REG_RSTB_SEL + 1,
+                               BIT_DAC_OFF_ENABLE, 0);
+               rtw_write8_mask(rtwdev, REG_RSTB_SEL + 3,
+                               BIT_DAC_OFF_ENABLE, 1);
+       }
+
+       /* disable WL-S1 BB chage RF mode if GNT_BT
         * since RF TRx mask can do it
         */
-       rtw_write8_mask(rtwdev, REG_ANAPAR + 2, BIT_ANAPAR_BTPS >> 16, 1);
-       rtw_write8_mask(rtwdev, REG_RSTB_SEL + 1, BIT_DAC_OFF_ENABLE, 0);
-       rtw_write8_mask(rtwdev, REG_RSTB_SEL + 3, BIT_DAC_OFF_ENABLE, 1);
-       rtw_write8_mask(rtwdev, REG_IGN_GNTBT4, BIT_PI_IGNORE_GNT_BT, 1);
+       rtw_write8_mask(rtwdev, REG_IGN_GNTBT4,
+                       BIT_PI_IGNORE_GNT_BT, 1);
 
        /* disable WL-S0 BB chage RF mode if wifi is at 5G,
         * or antenna path is separated
         */
-       if (coex_stat->wl_coex_mode == COEX_WLINK_5G ||
+       if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+               rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1,
+                               BIT_PI_IGNORE_GNT_BT, 1);
+               rtw_write8_mask(rtwdev, REG_NOMASK_TXBT,
+                               BIT_NOMASK_TXBT_ENABLE, 1);
+       } else if (coex_stat->wl_coex_mode == COEX_WLINK_5G ||
            coex->under_5g || !efuse->share_ant) {
                if (coex_stat->kt_ver >= 3) {
                        rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1,
@@ -4962,6 +4977,7 @@ static struct rtw_chip_ops rtw8822c_ops = {
        .cfo_init               = rtw8822c_cfo_init,
        .cfo_track              = rtw8822c_cfo_track,
        .config_tx_path         = rtw8822c_config_tx_path,
+       .config_txrx_mode       = rtw8822c_config_trx_mode,
 
        .coex_set_init          = rtw8822c_coex_cfg_init,
        .coex_set_ant_switch    = NULL,
@@ -5007,6 +5023,8 @@ static const struct coex_table_para table_sant_8822c[] = {
        {0x66556aaa, 0x6a5a6aaa}, /*case-30*/
        {0xffffffff, 0x5aaa5aaa},
        {0x56555555, 0x5a5a5aaa},
+       {0xdaffdaff, 0xdaffdaff},
+       {0xddffddff, 0xddffddff},
 };
 
 /* Non-Shared-Antenna Coex Table */
@@ -5107,7 +5125,8 @@ static const struct coex_rf_para rf_para_tx_8822c[] = {
        {8, 17, true, 4},
        {7, 18, true, 4},
        {6, 19, true, 4},
-       {5, 20, true, 4}
+       {5, 20, true, 4},
+       {0, 21, true, 4}   /* for gamg hid */
 };
 
 static const struct coex_rf_para rf_para_rx_8822c[] = {
@@ -5116,7 +5135,8 @@ static const struct coex_rf_para rf_para_rx_8822c[] = {
        {3, 24, true, 5},
        {2, 26, true, 5},
        {1, 27, true, 5},
-       {0, 28, true, 5}
+       {0, 28, true, 5},
+       {0, 28, true, 5}   /* for gamg hid */
 };
 
 static_assert(ARRAY_SIZE(rf_para_tx_8822c) == ARRAY_SIZE(rf_para_rx_8822c));
@@ -5354,11 +5374,12 @@ struct rtw_chip_info rtw8822c_hw_spec = {
        .wowlan_stub = &rtw_wowlan_stub_8822c,
        .max_sched_scan_ssids = 4,
 #endif
-       .coex_para_ver = 0x2103181c,
-       .bt_desired_ver = 0x1c,
+       .coex_para_ver = 0x22020720,
+       .bt_desired_ver = 0x20,
        .scbd_support = true,
        .new_scbd10_def = true,
        .ble_hid_profile_support = true,
+       .wl_mimo_ps_support = true,
        .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
        .bt_rssi_type = COEX_BTRSSI_DBM,
        .ant_isolation = 15,
index 3383726..c472f15 100644 (file)
@@ -91,10 +91,10 @@ int rtw_set_sar_specs(struct rtw_dev *rtwdev,
                        return -EINVAL;
 
                power = sar->sub_specs[i].power;
-               rtw_info(rtwdev, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
-                        rtw_common_sar_freq_ranges[idx].start_freq,
-                        rtw_common_sar_freq_ranges[idx].end_freq,
-                        power, BIT(RTW_COMMON_SAR_FCT));
+               rtw_dbg(rtwdev, RTW_DBG_REGD, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
+                       rtw_common_sar_freq_ranges[idx].start_freq,
+                       rtw_common_sar_freq_ranges[idx].end_freq,
+                       power, BIT(RTW_COMMON_SAR_FCT));
 
                for (j = 0; j < RTW_RF_PATH_MAX; j++) {
                        for (k = 0; k < RTW_RATE_SECTION_MAX; k++) {
index efcc1b0..94d1089 100644 (file)
@@ -353,7 +353,7 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
 
        bw = si->bw_mode;
        rate_id = si->rate_id;
-       stbc = si->stbc_en;
+       stbc = rtwdev->hal.txrx_1ss ? false : si->stbc_en;
        ldpc = si->ldpc_en;
 
 out:
index 07f2671..6845839 100644 (file)
@@ -1478,7 +1478,7 @@ static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
                }
        }
 
-       rtw89_mac_cfg_gnt(rtwdev, &dm->gnt);
+       rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
 }
 
 #define BTC_TDMA_WLROLE_MAX 2
@@ -2233,7 +2233,7 @@ static void _set_gnt_bt(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
                }
        }
 
-       rtw89_mac_cfg_gnt(rtwdev, &dm->gnt);
+       rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
 }
 
 static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map,
@@ -2300,7 +2300,7 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
 
        switch (type) {
        case BTC_ANT_WPOWERON:
-               rtw89_mac_cfg_ctrl_path(rtwdev, false);
+               rtw89_chip_cfg_ctrl_path(rtwdev, false);
                break;
        case BTC_ANT_WINIT:
                if (bt->enable.now) {
@@ -2310,21 +2310,21 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
                        _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
                        _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
                }
-               rtw89_mac_cfg_ctrl_path(rtwdev, true);
+               rtw89_chip_cfg_ctrl_path(rtwdev, true);
                _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_BT, BTC_PLT_BT);
                break;
        case BTC_ANT_WONLY:
                _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
                _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
-               rtw89_mac_cfg_ctrl_path(rtwdev, true);
+               rtw89_chip_cfg_ctrl_path(rtwdev, true);
                _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
                break;
        case BTC_ANT_WOFF:
-               rtw89_mac_cfg_ctrl_path(rtwdev, false);
+               rtw89_chip_cfg_ctrl_path(rtwdev, false);
                _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
                break;
        case BTC_ANT_W2G:
-               rtw89_mac_cfg_ctrl_path(rtwdev, true);
+               rtw89_chip_cfg_ctrl_path(rtwdev, true);
                if (rtwdev->dbcc_en) {
                        for (i = 0; i < RTW89_PHY_MAX; i++) {
                                b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G);
@@ -2352,32 +2352,32 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
                }
                break;
        case BTC_ANT_W5G:
-               rtw89_mac_cfg_ctrl_path(rtwdev, true);
+               rtw89_chip_cfg_ctrl_path(rtwdev, true);
                _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
                _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
                _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
                break;
        case BTC_ANT_W25G:
-               rtw89_mac_cfg_ctrl_path(rtwdev, true);
+               rtw89_chip_cfg_ctrl_path(rtwdev, true);
                _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
                _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
                _set_bt_plut(rtwdev, BTC_PHY_ALL,
                             BTC_PLT_GNT_WL, BTC_PLT_GNT_WL);
                break;
        case BTC_ANT_FREERUN:
-               rtw89_mac_cfg_ctrl_path(rtwdev, true);
+               rtw89_chip_cfg_ctrl_path(rtwdev, true);
                _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
                _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
                _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
                break;
        case BTC_ANT_WRFK:
-               rtw89_mac_cfg_ctrl_path(rtwdev, true);
+               rtw89_chip_cfg_ctrl_path(rtwdev, true);
                _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
                _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
                _set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
                break;
        case BTC_ANT_BRFK:
-               rtw89_mac_cfg_ctrl_path(rtwdev, false);
+               rtw89_chip_cfg_ctrl_path(rtwdev, false);
                _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
                _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
                _set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
@@ -4623,12 +4623,12 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
        ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->wlcx_desired);
        seq_printf(m, "(%s, desired:%d.%d.%d), ",
                   (wl->ver_info.fw_coex >= chip->wlcx_desired ?
-                  "Match" : "Mis-Match"), ver_main, ver_sub, ver_hotfix);
+                  "Match" : "Mismatch"), ver_main, ver_sub, ver_hotfix);
 
        seq_printf(m, "BT_FW_coex:%d(%s, desired:%d)\n",
                   bt->ver_info.fw_coex,
                   (bt->ver_info.fw_coex >= chip->btcx_desired ?
-                  "Match" : "Mis-Match"), chip->btcx_desired);
+                  "Match" : "Mismatch"), chip->btcx_desired);
 
        if (bt->enable.now && bt->ver_info.fw == 0)
                rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
@@ -5075,7 +5075,7 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
        seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap,
                   (BTC_CX_FW_OFFLOAD ? "Y" : "N"),
                   (dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ?
-                   "" : "(Mis-Match!!)"));
+                   "" : "(Mismatch!!)"));
 
        if (dm->rf_trx_para.wl_tx_power == 0xff)
                seq_printf(m,
index a9544b0..bcefc96 100644 (file)
@@ -232,6 +232,7 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
        u8 center_chan;
        u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
        u8 primary_chan_idx = 0;
+       u32 offset;
        u8 band;
        u8 subband;
 
@@ -256,23 +257,16 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
                }
                break;
        case NL80211_CHAN_WIDTH_80:
-               bandwidth = RTW89_CHANNEL_WIDTH_80;
+       case NL80211_CHAN_WIDTH_160:
+               bandwidth = nl_to_rtw89_bandwidth(width);
                if (primary_freq > center_freq) {
-                       if (primary_freq - center_freq == 10) {
-                               primary_chan_idx = RTW89_SC_20_UPPER;
-                               center_chan -= 2;
-                       } else {
-                               primary_chan_idx = RTW89_SC_20_UPMOST;
-                               center_chan -= 6;
-                       }
+                       offset = (primary_freq - center_freq - 10) / 20;
+                       primary_chan_idx = RTW89_SC_20_UPPER + offset * 2;
+                       center_chan -= 2 + offset * 4;
                } else {
-                       if (center_freq - primary_freq == 10) {
-                               primary_chan_idx = RTW89_SC_20_LOWER;
-                               center_chan += 2;
-                       } else {
-                               primary_chan_idx = RTW89_SC_20_LOWEST;
-                               center_chan += 6;
-                       }
+                       offset = (center_freq - primary_freq - 10) / 20;
+                       primary_chan_idx = RTW89_SC_20_LOWER + offset * 2;
+                       center_chan += 2 + offset * 4;
                }
                break;
        default:
@@ -293,23 +287,63 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
                break;
        }
 
-       switch (center_chan) {
+       switch (band) {
        default:
-       case 1 ... 14:
-               subband = RTW89_CH_2G;
-               break;
-       case 36 ... 64:
-               subband = RTW89_CH_5G_BAND_1;
+       case RTW89_BAND_2G:
+               switch (center_chan) {
+               default:
+               case 1 ... 14:
+                       subband = RTW89_CH_2G;
+                       break;
+               }
                break;
-       case 100 ... 144:
-               subband = RTW89_CH_5G_BAND_3;
+       case RTW89_BAND_5G:
+               switch (center_chan) {
+               default:
+               case 36 ... 64:
+                       subband = RTW89_CH_5G_BAND_1;
+                       break;
+               case 100 ... 144:
+                       subband = RTW89_CH_5G_BAND_3;
+                       break;
+               case 149 ... 177:
+                       subband = RTW89_CH_5G_BAND_4;
+                       break;
+               }
                break;
-       case 149 ... 177:
-               subband = RTW89_CH_5G_BAND_4;
+       case RTW89_BAND_6G:
+               switch (center_chan) {
+               default:
+               case 1 ... 29:
+                       subband = RTW89_CH_6G_BAND_IDX0;
+                       break;
+               case 33 ... 61:
+                       subband = RTW89_CH_6G_BAND_IDX1;
+                       break;
+               case 65 ... 93:
+                       subband = RTW89_CH_6G_BAND_IDX2;
+                       break;
+               case 97 ... 125:
+                       subband = RTW89_CH_6G_BAND_IDX3;
+                       break;
+               case 129 ... 157:
+                       subband = RTW89_CH_6G_BAND_IDX4;
+                       break;
+               case 161 ... 189:
+                       subband = RTW89_CH_6G_BAND_IDX5;
+                       break;
+               case 193 ... 221:
+                       subband = RTW89_CH_6G_BAND_IDX6;
+                       break;
+               case 225 ... 253:
+                       subband = RTW89_CH_6G_BAND_IDX7;
+                       break;
+               }
                break;
        }
 
        chan_param->center_chan = center_chan;
+       chan_param->center_freq = center_freq;
        chan_param->primary_chan = channel->hw_value;
        chan_param->bandwidth = bandwidth;
        chan_param->pri_ch_idx = primary_chan_idx;
@@ -338,7 +372,9 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
 
        hal->current_band_width = bandwidth;
        hal->current_channel = center_chan;
+       hal->current_freq = ch_param.center_freq;
        hal->prev_primary_channel = hal->current_primary_channel;
+       hal->prev_band_type = hal->current_band_type;
        hal->current_primary_channel = ch_param.primary_chan;
        hal->current_band_type = ch_param.band_type;
        hal->current_subband = ch_param.subband_type;
@@ -719,6 +755,22 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
        return PACKET_MAX;
 }
 
+static void
+rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
+                  struct rtw89_core_tx_request *tx_req)
+{
+       if (!rtwdev->fw.tx_wake)
+               return;
+
+       if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
+               return;
+
+       if (tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT)
+               return;
+
+       rtw89_mac_notify_wake(rtwdev);
+}
+
 static void
 rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
                               struct rtw89_core_tx_request *tx_req)
@@ -817,6 +869,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
        rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true);
        rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true);
        rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
+       rtw89_core_tx_wake(rtwdev, &tx_req);
+
        ret = rtw89_hci_tx_write(rtwdev, &tx_req);
        if (ret) {
                rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
@@ -1126,13 +1180,7 @@ static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev,
                rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
        }
 
-       if (desc_info->bw == RTW89_CHANNEL_WIDTH_80)
-               bw = RATE_INFO_BW_80;
-       else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40)
-               bw = RATE_INFO_BW_40;
-       else
-               bw = RATE_INFO_BW_20;
-
+       bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
        gi_ltf = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, false);
        ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt &&
              status->rate_idx == rate_idx &&
@@ -1213,6 +1261,15 @@ static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status)
        if (rx_status->band == NL80211_BAND_2GHZ ||
            rx_status->encoding != RX_ENC_LEGACY)
                return;
+
+       /* Some control frames' freq(ACKs in this case) are reported wrong due
+        * to FW notify timing, set to lowest rate to prevent overflow.
+        */
+       if (rx_status->rate_idx < RTW89_HW_RATE_OFDM6) {
+               rx_status->rate_idx = 0;
+               return;
+       }
+
        /* No 4 CCK rates for non-2G */
        rx_status->rate_idx -= 4;
 }
@@ -1389,6 +1446,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
                                        struct ieee80211_rx_status *rx_status)
 {
        struct ieee80211_hw *hw = rtwdev->hw;
+       struct rtw89_hal *hal = &rtwdev->hal;
        u16 data_rate;
        u8 data_rate_mode;
 
@@ -1396,6 +1454,13 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
        rx_status->freq = hw->conf.chandef.chan->center_freq;
        rx_status->band = hw->conf.chandef.chan->band;
 
+       if (rtwdev->scanning && rtwdev->fw.scan_offload) {
+               rx_status->freq =
+                       ieee80211_channel_to_frequency(hal->current_channel,
+                                                      hal->current_band_type);
+               rx_status->band = rtwdev->hal.current_band_type;
+       }
+
        if (desc_info->icv_err || desc_info->crc32_err)
                rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
 
@@ -1403,12 +1468,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
            !(desc_info->sw_dec || desc_info->icv_err))
                rx_status->flag |= RX_FLAG_DECRYPTED;
 
-       if (desc_info->bw == RTW89_CHANNEL_WIDTH_80)
-               rx_status->bw = RATE_INFO_BW_80;
-       else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40)
-               rx_status->bw = RATE_INFO_BW_40;
-       else
-               rx_status->bw = RATE_INFO_BW_20;
+       rx_status->bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
 
        data_rate = desc_info->data_rate;
        data_rate_mode = GET_DATA_RATE_MODE(data_rate);
@@ -1641,11 +1701,12 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
        unsigned long i;
        int ret;
 
+       rcu_read_lock();
        for (i = 0; i < frame_cnt; i++) {
                skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq);
                if (!skb) {
                        rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n");
-                       return;
+                       goto out;
                }
                rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb);
                ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL);
@@ -1655,6 +1716,8 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
                        break;
                }
        }
+out:
+       rcu_read_unlock();
 }
 
 static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
@@ -1730,6 +1793,16 @@ static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinv
        ieee80211_txq_schedule_end(hw, ac);
 }
 
+static void rtw89_ips_work(struct work_struct *work)
+{
+       struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+                                               ips_work);
+
+       mutex_lock(&rtwdev->mutex);
+       rtw89_enter_ips(rtwdev);
+       mutex_unlock(&rtwdev->mutex);
+}
+
 static void rtw89_core_txq_work(struct work_struct *w)
 {
        struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, txq_work);
@@ -2197,9 +2270,14 @@ static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev,
 static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
                               struct ieee80211_sta_vht_cap *vht_cap)
 {
-       static const __le16 highest[RF_PATH_MAX] = {
+       static const __le16 highest_bw80[RF_PATH_MAX] = {
                cpu_to_le16(433), cpu_to_le16(867), cpu_to_le16(1300), cpu_to_le16(1733),
        };
+       static const __le16 highest_bw160[RF_PATH_MAX] = {
+               cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467),
+       };
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+       const __le16 *highest = chip->support_bw160 ? highest_bw160 : highest_bw80;
        struct rtw89_hal *hal = &rtwdev->hal;
        u16 tx_mcs_map = 0, rx_mcs_map = 0;
        u8 sts_cap = 3;
@@ -2228,6 +2306,9 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
        vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
                        IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
        vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+       if (chip->support_bw160)
+               vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+                               IEEE80211_VHT_CAP_SHORT_GI_160;
        vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
        vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
        vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
@@ -2298,8 +2379,15 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
                                  IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
                if (i == NL80211_IFTYPE_STATION)
                        mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
-               phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
-                                 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+               if (band == NL80211_BAND_2GHZ) {
+                       phy_cap_info[0] =
+                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+               } else {
+                       phy_cap_info[0] =
+                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+                       if (chip->support_bw160)
+                               phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+               }
                phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
                                  IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
                                  IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
@@ -2328,6 +2416,9 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
                phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
                                  IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
                                  IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+               if (chip->support_bw160)
+                       phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+                                          IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
                phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
                                  IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
                                  IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
@@ -2338,6 +2429,22 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
                        phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
                he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
                he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
+               if (chip->support_bw160) {
+                       he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
+                       he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
+               }
+
+               if (band == NL80211_BAND_6GHZ) {
+                       __le16 capa;
+
+                       capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
+                                               IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+                              le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+                                               IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+                              le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+                                               IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+                       iftype_data[idx].he_6ghz_capa.capa = capa;
+               }
 
                idx++;
        }
@@ -2526,10 +2633,16 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
 {
        struct rtw89_btc *btc = &rtwdev->btc;
        int ret;
+       u8 band;
 
        INIT_LIST_HEAD(&rtwdev->ba_list);
        INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
        INIT_LIST_HEAD(&rtwdev->early_h2c_list);
+       for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+               if (!(rtwdev->chip->support_bands & BIT(band)))
+                       continue;
+               INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
+       }
        INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
        INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
        INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -2540,11 +2653,13 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
        INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
        rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
        spin_lock_init(&rtwdev->ba_lock);
+       spin_lock_init(&rtwdev->rpwm_lock);
        mutex_init(&rtwdev->mutex);
        mutex_init(&rtwdev->rf_mutex);
        rtwdev->total_sta_assoc = 0;
 
        INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
+       INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
        skb_queue_head_init(&rtwdev->c2h_queue);
        rtw89_core_ppdu_sts_init(rtwdev);
        rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
@@ -2580,12 +2695,48 @@ void rtw89_core_deinit(struct rtw89_dev *rtwdev)
 }
 EXPORT_SYMBOL(rtw89_core_deinit);
 
+void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+                          const u8 *mac_addr, bool hw_scan)
+{
+       struct rtw89_hal *hal = &rtwdev->hal;
+
+       rtwdev->scanning = true;
+       rtw89_leave_lps(rtwdev);
+       if (hw_scan && rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
+               rtw89_leave_ips(rtwdev);
+
+       ether_addr_copy(rtwvif->mac_addr, mac_addr);
+       rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
+       rtw89_chip_rfk_scan(rtwdev, true);
+       rtw89_hci_recalc_int_mit(rtwdev);
+
+       rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
+}
+
+void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
+                             struct ieee80211_vif *vif, bool hw_scan)
+{
+       struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+       ether_addr_copy(rtwvif->mac_addr, vif->addr);
+       rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+
+       rtw89_chip_rfk_scan(rtwdev, false);
+       rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
+
+       rtwdev->scanning = false;
+       rtwdev->dig.bypass_dig = true;
+       if (hw_scan && rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
+               ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
+}
+
 static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
 {
+       const struct rtw89_chip_info *chip = rtwdev->chip;
        u8 cv;
 
        cv = rtw89_read32_mask(rtwdev, R_AX_SYS_CFG1, B_AX_CHIP_VER_MASK);
-       if (cv <= CHIP_CBV) {
+       if (chip->chip_id == RTL8852A && cv <= CHIP_CBV) {
                if (rtw89_read32(rtwdev, R_AX_GPIO0_7_FUNC_SEL) == RTW89_R32_DEAD)
                        cv = CHIP_CAV;
                else
@@ -2695,6 +2846,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
        ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
        ieee80211_hw_set(hw, SUPPORTS_PS);
        ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+       ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
 
        hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                                     BIT(NL80211_IFTYPE_AP);
@@ -2703,6 +2855,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
 
        hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
 
+       hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
+       hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN;
+
        wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
 
        ret = rtw89_core_set_supported_band(rtwdev);
index deb91f6..7717221 100644 (file)
@@ -13,6 +13,7 @@
 #include <net/mac80211.h>
 
 struct rtw89_dev;
+struct rtw89_pci_info;
 
 extern const struct ieee80211_ops rtw89_ops;
 
@@ -61,6 +62,15 @@ enum rtw89_subband {
        RTW89_CH_5G_BAND_3 = 3,
        RTW89_CH_5G_BAND_4 = 4,
 
+       RTW89_CH_6G_BAND_IDX0, /* Low */
+       RTW89_CH_6G_BAND_IDX1, /* Low */
+       RTW89_CH_6G_BAND_IDX2, /* Mid */
+       RTW89_CH_6G_BAND_IDX3, /* Mid */
+       RTW89_CH_6G_BAND_IDX4, /* High */
+       RTW89_CH_6G_BAND_IDX5, /* High */
+       RTW89_CH_6G_BAND_IDX6, /* Ultra-high */
+       RTW89_CH_6G_BAND_IDX7, /* Ultra-high */
+
        RTW89_SUBBAND_NR,
 };
 
@@ -362,6 +372,25 @@ enum rtw89_hw_rate {
  */
 #define RTW89_5G_CH_NUM 53
 
+/* 6G channels,
+ * 1, 3, 5, 7, 9, 11, 13, 15,
+ * 17, 19, 21, 23, 25, 27, 29, 33,
+ * 35, 37, 39, 41, 43, 45, 47, 49,
+ * 51, 53, 55, 57, 59, 61, 65, 67,
+ * 69, 71, 73, 75, 77, 79, 81, 83,
+ * 85, 87, 89, 91, 93, 97, 99, 101,
+ * 103, 105, 107, 109, 111, 113, 115, 117,
+ * 119, 121, 123, 125, 129, 131, 133, 135,
+ * 137, 139, 141, 143, 145, 147, 149, 151,
+ * 153, 155, 157, 161, 163, 165, 167, 169,
+ * 171, 173, 175, 177, 179, 181, 183, 185,
+ * 187, 189, 193, 195, 197, 199, 201, 203,
+ * 205, 207, 209, 211, 213, 215, 217, 219,
+ * 221, 225, 227, 229, 231, 233, 235, 237,
+ * 239, 241, 243, 245, 247, 249, 251, 253,
+ */
+#define RTW89_6G_CH_NUM 120
+
 enum rtw89_rate_section {
        RTW89_RS_CCK,
        RTW89_RS_OFDM,
@@ -544,7 +573,8 @@ enum rtw89_ps_mode {
 };
 
 #define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1)
-#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
+#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
+#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
 #define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
 
 enum rtw89_ru_bandwidth {
@@ -560,12 +590,17 @@ enum rtw89_sc_offset {
        RTW89_SC_20_LOWER       = 2,
        RTW89_SC_20_UPMOST      = 3,
        RTW89_SC_20_LOWEST      = 4,
+       RTW89_SC_20_UP2X        = 5,
+       RTW89_SC_20_LOW2X       = 6,
+       RTW89_SC_20_UP3X        = 7,
+       RTW89_SC_20_LOW3X       = 8,
        RTW89_SC_40_UPPER       = 9,
        RTW89_SC_40_LOWER       = 10,
 };
 
 struct rtw89_channel_params {
        u8 center_chan;
+       u32 center_freq;
        u8 primary_chan;
        u8 bandwidth;
        u8 pri_ch_idx;
@@ -574,7 +609,7 @@ struct rtw89_channel_params {
 };
 
 struct rtw89_channel_help_params {
-       u16 tx_en;
+       u32 tx_en;
 };
 
 struct rtw89_port_reg {
@@ -1957,6 +1992,8 @@ struct rtw89_vif {
        struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
        struct rtw89_traffic_stats stats;
        struct rtw89_phy_rate_pattern rate_pattern;
+       struct cfg80211_scan_request *scan_req;
+       struct ieee80211_scan_ies *scan_ies;
 };
 
 enum rtw89_lv1_rcvy_step {
@@ -2028,7 +2065,15 @@ struct rtw89_chip_ops {
                           struct ieee80211_rx_status *status);
        void (*bb_ctrl_btc_preagc)(struct rtw89_dev *rtwdev, bool bt_en);
        void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
-                                      s16 pw_ofst, enum rtw89_mac_idx mac_idx);
+                                      s8 pw_ofst, enum rtw89_mac_idx mac_idx);
+       int (*pwr_on_func)(struct rtw89_dev *rtwdev);
+       int (*pwr_off_func)(struct rtw89_dev *rtwdev);
+       int (*cfg_ctrl_path)(struct rtw89_dev *rtwdev, bool wl);
+       int (*mac_cfg_gnt)(struct rtw89_dev *rtwdev,
+                          const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+       int (*stop_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx,
+                          u32 *tx_en, enum rtw89_sch_tx_sel sel);
+       int (*resume_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
 
        void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
        void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
@@ -2149,6 +2194,7 @@ struct rtw89_ple_quota {
        u16 bb_rpt;
        u16 wd_rel;
        u16 cpu_io;
+       u16 tx_rpt;
 };
 
 struct rtw89_dle_mem {
@@ -2189,6 +2235,8 @@ struct rtw89_phy_table {
        const struct rtw89_reg2_def *regs;
        u32 n_regs;
        enum rtw89_rf_path rf_path;
+       void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
+                      enum rtw89_rf_path rf_path, void *data);
 };
 
 struct rtw89_txpwr_table {
@@ -2198,6 +2246,21 @@ struct rtw89_txpwr_table {
                     const struct rtw89_txpwr_table *tbl);
 };
 
+struct rtw89_page_regs {
+       u32 hci_fc_ctrl;
+       u32 ch_page_ctrl;
+       u32 ach_page_ctrl;
+       u32 ach_page_info;
+       u32 pub_page_info3;
+       u32 pub_page_ctrl1;
+       u32 pub_page_ctrl2;
+       u32 pub_page_info1;
+       u32 pub_page_info2;
+       u32 wp_page_ctrl1;
+       u32 wp_page_ctrl2;
+       u32 wp_page_info1;
+};
+
 struct rtw89_chip_info {
        enum rtw89_core_chip_id chip_id;
        const struct rtw89_chip_ops *ops;
@@ -2209,6 +2272,7 @@ struct rtw89_chip_info {
        const struct rtw89_dle_mem *dle_mem;
        u32 rf_base_addr[2];
        u8 support_bands;
+       bool support_bw160;
        u8 rf_path_num;
        u8 tx_nss;
        u8 rx_nss;
@@ -2220,6 +2284,8 @@ struct rtw89_chip_info {
        u32 physical_efuse_size;
        u32 logical_efuse_size;
        u32 limit_efuse_size;
+       u32 dav_phy_efuse_size;
+       u32 dav_log_efuse_size;
        u32 phycap_addr;
        u32 phycap_size;
 
@@ -2236,10 +2302,15 @@ struct rtw89_chip_info {
        const s8 (*txpwr_lmt_5g)[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
                                [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
                                [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+       const s8 (*txpwr_lmt_6g)[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
+                               [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+                               [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
        const s8 (*txpwr_lmt_ru_2g)[RTW89_RU_NUM][RTW89_NTX_NUM]
                                   [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
        const s8 (*txpwr_lmt_ru_5g)[RTW89_RU_NUM][RTW89_NTX_NUM]
                                   [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+       const s8 (*txpwr_lmt_ru_6g)[RTW89_RU_NUM][RTW89_NTX_NUM]
+                                  [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
 
        u8 txpwr_factor_rf;
        u8 txpwr_factor_mac;
@@ -2262,10 +2333,24 @@ struct rtw89_chip_info {
        u8 rf_para_dlink_num;
        const struct rtw89_btc_rf_trx_para *rf_para_dlink;
        u8 ps_mode_supported;
+
+       u32 hci_func_en_addr;
+       u32 h2c_ctrl_reg;
+       const u32 *h2c_regs;
+       u32 c2h_ctrl_reg;
+       const u32 *c2h_regs;
+       const struct rtw89_page_regs *page_regs;
+       const struct rtw89_reg_def *dcfo_comp;
+       u8 dcfo_comp_sft;
+};
+
+union rtw89_bus_info {
+       const struct rtw89_pci_info *pci;
 };
 
 struct rtw89_driver_info {
        const struct rtw89_chip_info *chip;
+       union rtw89_bus_info bus;
 };
 
 enum rtw89_hcifc_mode {
@@ -2333,6 +2418,8 @@ struct rtw89_fw_info {
        struct rtw89_fw_suit wowlan;
        bool fw_log_enable;
        bool old_ht_ra_format;
+       bool scan_offload;
+       bool tx_wake;
 };
 
 struct rtw89_cam_info {
@@ -2369,10 +2456,12 @@ struct rtw89_hal {
        u32 rx_fltr;
        u8 cv;
        u8 current_channel;
+       u32 current_freq;
        u8 prev_primary_channel;
        u8 current_primary_channel;
        enum rtw89_subband current_subband;
        u8 current_band_width;
+       u8 prev_band_type;
        u8 current_band_type;
        u32 sw_amsdu_max_size;
        u32 antenna_tx;
@@ -2383,6 +2472,7 @@ struct rtw89_hal {
 };
 
 #define RTW89_MAX_MAC_ID_NUM 128
+#define RTW89_MAX_PKT_OFLD_NUM 255
 
 enum rtw89_flags {
        RTW89_FLAG_POWERON,
@@ -2578,22 +2668,30 @@ struct rtw89_cfo_tracking_info {
        s32 residual_cfo_acc;
        u8 phy_cfotrk_state;
        u8 phy_cfotrk_cnt;
+       bool divergence_lock_en;
+       u8 x_cap_lb;
+       u8 x_cap_ub;
+       u8 lock_cnt;
 };
 
 /* 2GL, 2GH, 5GL1, 5GH1, 5GM1, 5GM2, 5GH1, 5GH2 */
 #define TSSI_TRIM_CH_GROUP_NUM 8
+#define TSSI_TRIM_CH_GROUP_NUM_6G 16
 
 #define TSSI_CCK_CH_GROUP_NUM 6
 #define TSSI_MCS_2G_CH_GROUP_NUM 5
 #define TSSI_MCS_5G_CH_GROUP_NUM 14
+#define TSSI_MCS_6G_CH_GROUP_NUM 32
 #define TSSI_MCS_CH_GROUP_NUM \
        (TSSI_MCS_2G_CH_GROUP_NUM + TSSI_MCS_5G_CH_GROUP_NUM)
 
 struct rtw89_tssi_info {
        u8 thermal[RF_PATH_MAX];
        s8 tssi_trim[RF_PATH_MAX][TSSI_TRIM_CH_GROUP_NUM];
+       s8 tssi_trim_6g[RF_PATH_MAX][TSSI_TRIM_CH_GROUP_NUM_6G];
        s8 tssi_cck[RF_PATH_MAX][TSSI_CCK_CH_GROUP_NUM];
        s8 tssi_mcs[RF_PATH_MAX][TSSI_MCS_CH_GROUP_NUM];
+       s8 tssi_6g_mcs[RF_PATH_MAX][TSSI_MCS_6G_CH_GROUP_NUM];
        s8 extra_ofst[RF_PATH_MAX];
        bool tssi_tracking_check[RF_PATH_MAX];
        u8 default_txagc_offset[RF_PATH_MAX];
@@ -2791,12 +2889,23 @@ struct rtw89_early_h2c {
        u16 h2c_len;
 };
 
+struct rtw89_hw_scan_info {
+       struct ieee80211_vif *scanning_vif;
+       struct list_head pkt_list[NUM_NL80211_BANDS];
+       u8 op_pri_ch;
+       u8 op_chan;
+       u8 op_bw;
+       u8 op_band;
+};
+
 struct rtw89_dev {
        struct ieee80211_hw *hw;
        struct device *dev;
 
        bool dbcc_en;
+       struct rtw89_hw_scan_info scan_info;
        const struct rtw89_chip_info *chip;
+       const struct rtw89_pci_info *pci_info;
        struct rtw89_hal hal;
        struct rtw89_mac_info mac;
        struct rtw89_fw_info fw;
@@ -2817,11 +2926,14 @@ struct rtw89_dev {
        /* txqs to setup ba session */
        struct list_head ba_list;
        struct work_struct ba_work;
+       /* used to protect rpwm */
+       spinlock_t rpwm_lock;
 
        struct rtw89_cam_info cam_info;
 
        struct sk_buff_head c2h_queue;
        struct work_struct c2h_work;
+       struct work_struct ips_work;
 
        struct list_head early_h2c_list;
 
@@ -2830,6 +2942,7 @@ struct rtw89_dev {
        DECLARE_BITMAP(hw_port, RTW89_PORT_NUM);
        DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM);
        DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS);
+       DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
 
        struct rtw89_phy_stat phystat;
        struct rtw89_dack_info dack;
@@ -2869,7 +2982,7 @@ struct rtw89_dev {
        int napi_budget_countdown;
 
        /* HCI related data, keep last */
-       u8 priv[0] __aligned(sizeof(void *));
+       u8 priv[] __aligned(sizeof(void *));
 };
 
 static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
@@ -3150,6 +3263,37 @@ static inline struct rtw89_sta *sta_to_rtwsta_safe(struct ieee80211_sta *sta)
        return sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
 }
 
+static inline u8 rtw89_hw_to_rate_info_bw(enum rtw89_bandwidth hw_bw)
+{
+       if (hw_bw == RTW89_CHANNEL_WIDTH_160)
+               return RATE_INFO_BW_160;
+       else if (hw_bw == RTW89_CHANNEL_WIDTH_80)
+               return RATE_INFO_BW_80;
+       else if (hw_bw == RTW89_CHANNEL_WIDTH_40)
+               return RATE_INFO_BW_40;
+       else
+               return RATE_INFO_BW_20;
+}
+
+static inline
+enum rtw89_bandwidth nl_to_rtw89_bandwidth(enum nl80211_chan_width width)
+{
+       switch (width) {
+       default:
+               WARN(1, "Not support bandwidth %d\n", width);
+               fallthrough;
+       case NL80211_CHAN_WIDTH_20_NOHT:
+       case NL80211_CHAN_WIDTH_20:
+               return RTW89_CHANNEL_WIDTH_20;
+       case NL80211_CHAN_WIDTH_40:
+               return RTW89_CHANNEL_WIDTH_40;
+       case NL80211_CHAN_WIDTH_80:
+               return RTW89_CHANNEL_WIDTH_80;
+       case NL80211_CHAN_WIDTH_160:
+               return RTW89_CHANNEL_WIDTH_160;
+       }
+}
+
 static inline
 struct rtw89_addr_cam_entry *rtw89_get_addr_cam_of(struct rtw89_vif *rtwvif,
                                                   struct rtw89_sta *rtwsta)
@@ -3329,6 +3473,39 @@ static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg)
                chip->ops->ctrl_btg(rtwdev, btg);
 }
 
+static inline
+void rtw89_chip_mac_cfg_gnt(struct rtw89_dev *rtwdev,
+                           const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+
+       chip->ops->mac_cfg_gnt(rtwdev, gnt_cfg);
+}
+
+static inline void rtw89_chip_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
+{
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+
+       chip->ops->cfg_ctrl_path(rtwdev, wl);
+}
+
+static inline
+int rtw89_chip_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
+                          u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+
+       return chip->ops->stop_sch_tx(rtwdev, mac_idx, tx_en, sel);
+}
+
+static inline
+int rtw89_chip_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+
+       return chip->ops->resume_sch_tx(rtwdev, mac_idx, tx_en);
+}
+
 static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
 {
        __le16 fc = hdr->frame_control;
@@ -3415,5 +3592,9 @@ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
 int rtw89_core_start(struct rtw89_dev *rtwdev);
 void rtw89_core_stop(struct rtw89_dev *rtwdev);
 void rtw89_core_update_beacon_work(struct work_struct *work);
+void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+                          const u8 *mac_addr, bool hw_scan);
+void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
+                             struct ieee80211_vif *vif, bool hw_scan);
 
 #endif
index f14b726..1745815 100644 (file)
@@ -23,6 +23,7 @@ enum rtw89_debug_mask {
        RTW89_DBG_FW = BIT(12),
        RTW89_DBG_BTC = BIT(13),
        RTW89_DBG_BF = BIT(14),
+       RTW89_DBG_HW_SCAN = BIT(15),
 };
 
 enum rtw89_debug_mac_reg_sel {
index c0b80f3..7bd4f85 100644 (file)
@@ -4,6 +4,7 @@
 
 #include "debug.h"
 #include "efuse.h"
+#include "mac.h"
 #include "reg.h"
 
 enum rtw89_efuse_bank {
@@ -16,6 +17,9 @@ static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev,
 {
        u8 val;
 
+       if (rtwdev->chip->chip_id != RTL8852A)
+               return 0;
+
        val = rtw89_read32_mask(rtwdev, R_AX_EFUSE_CTRL_1,
                                B_AX_EF_CELL_SEL_MASK);
        if (bank == val)
@@ -32,14 +36,61 @@ static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev,
        return -EBUSY;
 }
 
-static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
-                                        u32 dump_addr, u32 dump_size)
+static void rtw89_enable_otp_burst_mode(struct rtw89_dev *rtwdev, bool en)
+{
+       if (en)
+               rtw89_write32_set(rtwdev, R_AX_EFUSE_CTRL_1_V1, B_AX_EF_BURST);
+       else
+               rtw89_write32_clr(rtwdev, R_AX_EFUSE_CTRL_1_V1, B_AX_EF_BURST);
+}
+
+static void rtw89_enable_efuse_pwr_cut_ddv(struct rtw89_dev *rtwdev)
+{
+       enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+       struct rtw89_hal *hal = &rtwdev->hal;
+
+       if (chip_id == RTL8852A)
+               return;
+
+       rtw89_write8_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+       rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+
+       fsleep(1000);
+
+       rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+       rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+       if (chip_id == RTL8852B && hal->cv == CHIP_CAV)
+               rtw89_enable_otp_burst_mode(rtwdev, true);
+}
+
+static void rtw89_disable_efuse_pwr_cut_ddv(struct rtw89_dev *rtwdev)
+{
+       enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+       struct rtw89_hal *hal = &rtwdev->hal;
+
+       if (chip_id == RTL8852A)
+               return;
+
+       if (chip_id == RTL8852B && hal->cv == CHIP_CAV)
+               rtw89_enable_otp_burst_mode(rtwdev, false);
+
+       rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+       rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+       fsleep(1000);
+
+       rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+       rtw89_write8_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+}
+
+static int rtw89_dump_physical_efuse_map_ddv(struct rtw89_dev *rtwdev, u8 *map,
+                                            u32 dump_addr, u32 dump_size)
 {
        u32 efuse_ctl;
        u32 addr;
        int ret;
 
-       rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI);
+       rtw89_enable_efuse_pwr_cut_ddv(rtwdev);
 
        for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
                efuse_ctl = u32_encode_bits(addr, B_AX_EF_ADDR_MASK);
@@ -54,6 +105,74 @@ static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
                *map++ = (u8)(efuse_ctl & 0xff);
        }
 
+       rtw89_disable_efuse_pwr_cut_ddv(rtwdev);
+
+       return 0;
+}
+
+static int rtw89_dump_physical_efuse_map_dav(struct rtw89_dev *rtwdev, u8 *map,
+                                            u32 dump_addr, u32 dump_size)
+{
+       u32 addr;
+       u8 val8;
+       int err;
+       int ret;
+
+       for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
+               ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0x40, FULL_BIT_MASK);
+               if (ret)
+                       return ret;
+               ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_LOW_ADDR,
+                                             addr & 0xff, XTAL_SI_LOW_ADDR_MASK);
+               if (ret)
+                       return ret;
+               ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, addr >> 8,
+                                             XTAL_SI_HIGH_ADDR_MASK);
+               if (ret)
+                       return ret;
+               ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0,
+                                             XTAL_SI_MODE_SEL_MASK);
+               if (ret)
+                       return ret;
+
+               ret = read_poll_timeout_atomic(rtw89_mac_read_xtal_si, err,
+                                              !err && (val8 & XTAL_SI_RDY),
+                                              1, 10000, false,
+                                              rtwdev, XTAL_SI_CTRL, &val8);
+               if (ret) {
+                       rtw89_warn(rtwdev, "failed to read dav efuse\n");
+                       return ret;
+               }
+
+               ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_READ_VAL, &val8);
+               if (ret)
+                       return ret;
+               *map++ = val8;
+       }
+
+       return 0;
+}
+
+static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
+                                        u32 dump_addr, u32 dump_size, bool dav)
+{
+       int ret;
+
+       if (!map || dump_size == 0)
+               return 0;
+
+       rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI);
+
+       if (dav) {
+               ret = rtw89_dump_physical_efuse_map_dav(rtwdev, map, dump_addr, dump_size);
+               if (ret)
+                       return ret;
+       } else {
+               ret = rtw89_dump_physical_efuse_map_ddv(rtwdev, map, dump_addr, dump_size);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -78,6 +197,9 @@ static int rtw89_dump_logical_efuse_map(struct rtw89_dev *rtwdev, u8 *phy_map,
        u8 word_en;
        int i;
 
+       if (!phy_map)
+               return 0;
+
        while (phy_idx < physical_size - sec_ctrl_size) {
                hdr1 = phy_map[phy_idx];
                hdr2 = phy_map[phy_idx + 1];
@@ -109,8 +231,13 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
 {
        u32 phy_size = rtwdev->chip->physical_efuse_size;
        u32 log_size = rtwdev->chip->logical_efuse_size;
+       u32 dav_phy_size = rtwdev->chip->dav_phy_efuse_size;
+       u32 dav_log_size = rtwdev->chip->dav_log_efuse_size;
+       u32 full_log_size = log_size + dav_log_size;
        u8 *phy_map = NULL;
        u8 *log_map = NULL;
+       u8 *dav_phy_map = NULL;
+       u8 *dav_log_map = NULL;
        int ret;
 
        if (rtw89_read16(rtwdev, R_AX_SYS_WL_EFUSE_CTRL) & B_AX_AUTOLOAD_SUS)
@@ -119,27 +246,41 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
                rtw89_warn(rtwdev, "failed to check efuse autoload\n");
 
        phy_map = kmalloc(phy_size, GFP_KERNEL);
-       log_map = kmalloc(log_size, GFP_KERNEL);
+       log_map = kmalloc(full_log_size, GFP_KERNEL);
+       if (dav_phy_size && dav_log_size) {
+               dav_phy_map = kmalloc(dav_phy_size, GFP_KERNEL);
+               dav_log_map = log_map + log_size;
+       }
 
-       if (!phy_map || !log_map) {
+       if (!phy_map || !log_map || (dav_phy_size && !dav_phy_map)) {
                ret = -ENOMEM;
                goto out_free;
        }
 
-       ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size);
+       ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size, false);
        if (ret) {
                rtw89_warn(rtwdev, "failed to dump efuse physical map\n");
                goto out_free;
        }
+       ret = rtw89_dump_physical_efuse_map(rtwdev, dav_phy_map, 0, dav_phy_size, true);
+       if (ret) {
+               rtw89_warn(rtwdev, "failed to dump efuse dav physical map\n");
+               goto out_free;
+       }
 
-       memset(log_map, 0xff, log_size);
+       memset(log_map, 0xff, full_log_size);
        ret = rtw89_dump_logical_efuse_map(rtwdev, phy_map, log_map);
        if (ret) {
                rtw89_warn(rtwdev, "failed to dump efuse logical map\n");
                goto out_free;
        }
+       ret = rtw89_dump_logical_efuse_map(rtwdev, dav_phy_map, dav_log_map);
+       if (ret) {
+               rtw89_warn(rtwdev, "failed to dump efuse dav logical map\n");
+               goto out_free;
+       }
 
-       rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, log_size);
+       rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, full_log_size);
 
        ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map);
        if (ret) {
@@ -148,6 +289,7 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
        }
 
 out_free:
+       kfree(dav_phy_map);
        kfree(log_map);
        kfree(phy_map);
 
@@ -169,7 +311,7 @@ int rtw89_parse_phycap_map(struct rtw89_dev *rtwdev)
                return -ENOMEM;
 
        ret = rtw89_dump_physical_efuse_map(rtwdev, phycap_map,
-                                           phycap_addr, phycap_size);
+                                           phycap_addr, phycap_size, false);
        if (ret) {
                rtw89_warn(rtwdev, "failed to dump phycap map\n");
                goto out_free;
index 7fa60fd..6deaf8e 100644 (file)
@@ -201,6 +201,14 @@ static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
        if (chip->chip_id == RTL8852A &&
            RTW89_FW_SUIT_VER_CODE(fw_suit) <= RTW89_FW_VER_CODE(0, 13, 29, 0))
                rtwdev->fw.old_ht_ra_format = true;
+
+       if (chip->chip_id == RTL8852A &&
+           RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0))
+               rtwdev->fw.scan_offload = true;
+
+       if (chip->chip_id == RTL8852A &&
+           RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0))
+               rtwdev->fw.tx_wake = true;
 }
 
 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
@@ -1467,6 +1475,198 @@ fail:
        return -EBUSY;
 }
 
+#define H2C_LEN_PKT_OFLD 4
+int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
+{
+       struct sk_buff *skb;
+       u8 *cmd;
+
+       skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD);
+       if (!skb) {
+               rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
+               return -ENOMEM;
+       }
+       skb_put(skb, H2C_LEN_PKT_OFLD);
+       cmd = skb->data;
+
+       RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
+       RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
+
+       rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+                             H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+                             H2C_FUNC_PACKET_OFLD, 1, 1,
+                             H2C_LEN_PKT_OFLD);
+
+       if (rtw89_h2c_tx(rtwdev, skb, false)) {
+               rtw89_err(rtwdev, "failed to send h2c\n");
+               goto fail;
+       }
+
+       return 0;
+fail:
+       dev_kfree_skb_any(skb);
+
+       return -EBUSY;
+}
+
+int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
+                                struct sk_buff *skb_ofld)
+{
+       struct sk_buff *skb;
+       u8 *cmd;
+       u8 alloc_id;
+
+       alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
+                                             RTW89_MAX_PKT_OFLD_NUM);
+       if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
+               return -ENOSPC;
+
+       *id = alloc_id;
+
+       skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD + skb_ofld->len);
+       if (!skb) {
+               rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
+               return -ENOMEM;
+       }
+       skb_put(skb, H2C_LEN_PKT_OFLD);
+       cmd = skb->data;
+
+       RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
+       RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
+       RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
+       skb_put_data(skb, skb_ofld->data, skb_ofld->len);
+
+       rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+                             H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+                             H2C_FUNC_PACKET_OFLD, 1, 1,
+                             H2C_LEN_PKT_OFLD + skb_ofld->len);
+
+       if (rtw89_h2c_tx(rtwdev, skb, false)) {
+               rtw89_err(rtwdev, "failed to send h2c\n");
+               goto fail;
+       }
+
+       return 0;
+fail:
+       dev_kfree_skb_any(skb);
+
+       return -EBUSY;
+}
+
+#define H2C_LEN_SCAN_LIST_OFFLOAD 4
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+                                  struct list_head *chan_list)
+{
+       struct rtw89_mac_chinfo *ch_info;
+       struct sk_buff *skb;
+       int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
+       u8 *cmd;
+
+       skb = rtw89_fw_h2c_alloc_skb_with_hdr(skb_len);
+       if (!skb) {
+               rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
+               return -ENOMEM;
+       }
+       skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
+       cmd = skb->data;
+
+       RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
+       /* in unit of 4 bytes */
+       RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
+
+       list_for_each_entry(ch_info, chan_list, list) {
+               cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
+
+               RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
+               RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
+               RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
+               RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
+               RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
+               RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
+               RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
+               RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
+               RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
+               RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
+               RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
+               RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
+               RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
+               RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
+               RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
+               RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
+               RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
+               RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
+               RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
+               RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
+               RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
+               RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
+       }
+
+       rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+                             H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+                             H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
+
+       if (rtw89_h2c_tx(rtwdev, skb, false)) {
+               rtw89_err(rtwdev, "failed to send h2c\n");
+               goto fail;
+       }
+
+       return 0;
+fail:
+       dev_kfree_skb_any(skb);
+
+       return -EBUSY;
+}
+
+#define H2C_LEN_SCAN_OFFLOAD 20
+int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
+                             struct rtw89_scan_option *option,
+                             struct rtw89_vif *rtwvif)
+{
+       struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+       struct sk_buff *skb;
+       u8 *cmd;
+
+       skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_SCAN_OFFLOAD);
+       if (!skb) {
+               rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
+               return -ENOMEM;
+       }
+       skb_put(skb, H2C_LEN_SCAN_OFFLOAD);
+       cmd = skb->data;
+
+       RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id);
+       RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port);
+       RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0);
+       RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable);
+       RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true);
+       RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode);
+       RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE);
+       RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE);
+       if (option->target_ch_mode) {
+               RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw);
+               RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd,
+                                                      scan_info->op_pri_ch);
+               RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd,
+                                                          scan_info->op_chan);
+       }
+
+       rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+                             H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+                             H2C_FUNC_SCANOFLD, 1, 1,
+                             H2C_LEN_SCAN_OFFLOAD);
+
+       if (rtw89_h2c_tx(rtwdev, skb, false)) {
+               rtw89_err(rtwdev, "failed to send h2c\n");
+               goto fail;
+       }
+
+       return 0;
+fail:
+       dev_kfree_skb_any(skb);
+
+       return -EBUSY;
+}
+
 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
                        struct rtw89_fw_h2c_rf_reg_info *info,
                        u16 len, u8 page)
@@ -1632,15 +1832,13 @@ void rtw89_fw_c2h_work(struct work_struct *work)
 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
                                  struct rtw89_mac_h2c_info *info)
 {
-       static const u32 h2c_reg[RTW89_H2CREG_MAX] = {
-               R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1,
-               R_AX_H2CREG_DATA2, R_AX_H2CREG_DATA3
-       };
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+       const u32 *h2c_reg = chip->h2c_regs;
        u8 i, val, len;
        int ret;
 
        ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
-                               rtwdev, R_AX_H2CREG_CTRL);
+                               rtwdev, chip->h2c_ctrl_reg);
        if (ret) {
                rtw89_warn(rtwdev, "FW does not process h2c registers\n");
                return ret;
@@ -1654,7 +1852,7 @@ static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
        for (i = 0; i < RTW89_H2CREG_MAX; i++)
                rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]);
 
-       rtw89_write8(rtwdev, R_AX_H2CREG_CTRL, B_AX_H2CREG_TRIGGER);
+       rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
 
        return 0;
 }
@@ -1662,10 +1860,8 @@ static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
                                 struct rtw89_mac_c2h_info *info)
 {
-       static const u32 c2h_reg[RTW89_C2HREG_MAX] = {
-               R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1,
-               R_AX_C2HREG_DATA2, R_AX_C2HREG_DATA3
-       };
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+       const u32 *c2h_reg = chip->c2h_regs;
        u32 ret;
        u8 i, val;
 
@@ -1673,7 +1869,7 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
 
        ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
                                       RTW89_C2H_TIMEOUT, false, rtwdev,
-                                      R_AX_C2HREG_CTRL);
+                                      chip->c2h_ctrl_reg);
        if (ret) {
                rtw89_warn(rtwdev, "c2h reg timeout\n");
                return ret;
@@ -1682,7 +1878,7 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
        for (i = 0; i < RTW89_C2HREG_MAX; i++)
                info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
 
-       rtw89_write8(rtwdev, R_AX_C2HREG_CTRL, 0);
+       rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
 
        info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg);
        info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) -
@@ -1739,3 +1935,322 @@ void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
 
        rtw89_fw_prog_cnt_dump(rtwdev);
 }
+
+static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
+{
+       struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
+       struct rtw89_pktofld_info *info, *tmp;
+       u8 idx;
+
+       for (idx = RTW89_BAND_2G; idx < NUM_NL80211_BANDS; idx++) {
+               if (!(rtwdev->chip->support_bands & BIT(idx)))
+                       continue;
+
+               list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
+                       rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+                       rtw89_core_release_bit_map(rtwdev->pkt_offload,
+                                                  info->id);
+                       list_del(&info->list);
+                       kfree(info);
+               }
+       }
+}
+
+static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
+                                    struct rtw89_vif *rtwvif,
+                                    struct sk_buff *skb)
+{
+       struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+       struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
+       struct rtw89_pktofld_info *info;
+       struct sk_buff *new;
+       int ret = 0;
+       u8 band;
+
+       for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+               if (!(rtwdev->chip->support_bands & BIT(band)))
+                       continue;
+
+               new = skb_copy(skb, GFP_KERNEL);
+               if (!new) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               skb_put_data(new, ies->ies[band], ies->len[band]);
+               skb_put_data(new, ies->common_ies, ies->common_ie_len);
+
+               info = kzalloc(sizeof(*info), GFP_KERNEL);
+               if (!info) {
+                       ret = -ENOMEM;
+                       kfree_skb(new);
+                       goto out;
+               }
+
+               list_add_tail(&info->list, &scan_info->pkt_list[band]);
+               ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
+               if (ret)
+                       goto out;
+
+               kfree_skb(new);
+       }
+out:
+       return ret;
+}
+
+static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
+                                         struct rtw89_vif *rtwvif)
+{
+       struct cfg80211_scan_request *req = rtwvif->scan_req;
+       struct sk_buff *skb;
+       u8 num = req->n_ssids, i;
+       int ret;
+
+       for (i = 0; i < num; i++) {
+               skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
+                                            req->ssids[i].ssid,
+                                            req->ssids[i].ssid_len,
+                                            req->ie_len);
+               if (!skb)
+                       return -ENOMEM;
+
+               ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb);
+               kfree_skb(skb);
+
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
+                                  int ssid_num,
+                                  struct rtw89_mac_chinfo *ch_info)
+{
+       struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+       struct rtw89_pktofld_info *info;
+       u8 band, probe_count = 0;
+
+       ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+       ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+       ch_info->bw = RTW89_SCAN_WIDTH;
+       ch_info->tx_pkt = true;
+       ch_info->cfg_tx_pwr = false;
+       ch_info->tx_pwr_idx = 0;
+       ch_info->tx_null = false;
+       ch_info->pause_data = false;
+
+       if (ssid_num) {
+               ch_info->num_pkt = ssid_num;
+               band = ch_info->ch_band;
+
+               list_for_each_entry(info, &scan_info->pkt_list[band], list) {
+                       ch_info->probe_id = info->id;
+                       ch_info->pkt_id[probe_count] = info->id;
+                       if (++probe_count >= ssid_num)
+                               break;
+               }
+               if (probe_count != ssid_num)
+                       rtw89_err(rtwdev, "SSID num differs from list len\n");
+       }
+
+       switch (chan_type) {
+       case RTW89_CHAN_OPERATE:
+               ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+               ch_info->central_ch = scan_info->op_chan;
+               ch_info->pri_ch = scan_info->op_pri_ch;
+               ch_info->ch_band = scan_info->op_band;
+               ch_info->bw = scan_info->op_bw;
+               ch_info->tx_null = true;
+               ch_info->num_pkt = 0;
+               break;
+       case RTW89_CHAN_DFS:
+               ch_info->period = min_t(u8, ch_info->period,
+                                       RTW89_DFS_CHAN_TIME);
+               ch_info->dwell_time = RTW89_DWELL_TIME;
+               break;
+       case RTW89_CHAN_ACTIVE:
+               break;
+       default:
+               rtw89_err(rtwdev, "Channel type out of bound\n");
+       }
+}
+
+static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+                                      struct rtw89_vif *rtwvif)
+{
+       struct cfg80211_scan_request *req = rtwvif->scan_req;
+       struct rtw89_mac_chinfo *ch_info, *tmp;
+       struct ieee80211_channel *channel;
+       struct list_head chan_list;
+       bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
+       int list_len = req->n_channels, off_chan_time = 0;
+       enum rtw89_chan_type type;
+       int ret = 0, i;
+
+       INIT_LIST_HEAD(&chan_list);
+       for (i = 0; i < req->n_channels; i++) {
+               channel = req->channels[i];
+               ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+               if (!ch_info) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               ch_info->period = req->duration_mandatory ?
+                                 req->duration : RTW89_CHANNEL_TIME;
+               ch_info->ch_band = channel->band;
+               ch_info->central_ch = channel->hw_value;
+               ch_info->pri_ch = channel->hw_value;
+               ch_info->rand_seq_num = random_seq;
+
+               if (channel->flags &
+                   (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+                       type = RTW89_CHAN_DFS;
+               else
+                       type = RTW89_CHAN_ACTIVE;
+               rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
+
+               if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK &&
+                   off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
+                       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+                       if (!tmp) {
+                               ret = -ENOMEM;
+                               kfree(ch_info);
+                               goto out;
+                       }
+
+                       type = RTW89_CHAN_OPERATE;
+                       tmp->period = req->duration_mandatory ?
+                                     req->duration : RTW89_CHANNEL_TIME;
+                       rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
+                       list_add_tail(&tmp->list, &chan_list);
+                       off_chan_time = 0;
+                       list_len++;
+               }
+               list_add_tail(&ch_info->list, &chan_list);
+               off_chan_time += ch_info->period;
+       }
+       rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+
+out:
+       list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+               list_del(&ch_info->list);
+               kfree(ch_info);
+       }
+
+       return ret;
+}
+
+static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
+                                  struct rtw89_vif *rtwvif)
+{
+       int ret;
+
+       ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
+       if (ret) {
+               rtw89_err(rtwdev, "Update probe request failed\n");
+               goto out;
+       }
+       ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif);
+out:
+       return ret;
+}
+
+void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+                        struct ieee80211_scan_request *scan_req)
+{
+       struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+       struct cfg80211_scan_request *req = &scan_req->req;
+       u8 mac_addr[ETH_ALEN];
+
+       rtwdev->scan_info.scanning_vif = vif;
+       rtwvif->scan_ies = &scan_req->ies;
+       rtwvif->scan_req = req;
+       ieee80211_stop_queues(rtwdev->hw);
+
+       if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+               get_random_mask_addr(mac_addr, req->mac_addr,
+                                    req->mac_addr_mask);
+       else
+               ether_addr_copy(mac_addr, vif->addr);
+       rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
+
+       rtwdev->hal.rx_fltr &= ~B_AX_A_BCN_CHK_EN;
+       rtwdev->hal.rx_fltr &= ~B_AX_A_BC;
+       rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH;
+       rtw89_write32_mask(rtwdev,
+                          rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
+                          B_AX_RX_FLTR_CFG_MASK,
+                          rtwdev->hal.rx_fltr);
+}
+
+void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+                           bool aborted)
+{
+       struct cfg80211_scan_info info = {
+               .aborted = aborted,
+       };
+       struct rtw89_vif *rtwvif;
+
+       if (!vif)
+               return;
+
+       rtwdev->hal.rx_fltr |= B_AX_A_BCN_CHK_EN;
+       rtwdev->hal.rx_fltr |= B_AX_A_BC;
+       rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
+       rtw89_write32_mask(rtwdev,
+                          rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
+                          B_AX_RX_FLTR_CFG_MASK,
+                          rtwdev->hal.rx_fltr);
+
+       rtw89_core_scan_complete(rtwdev, vif, true);
+       ieee80211_scan_completed(rtwdev->hw, &info);
+       ieee80211_wake_queues(rtwdev->hw);
+
+       rtw89_release_pkt_list(rtwdev);
+       rtwvif = (struct rtw89_vif *)vif->drv_priv;
+       rtwvif->scan_req = NULL;
+       rtwvif->scan_ies = NULL;
+       rtwdev->scan_info.scanning_vif = NULL;
+}
+
+void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+       rtw89_hw_scan_offload(rtwdev, vif, false);
+       rtw89_hw_scan_complete(rtwdev, vif, true);
+}
+
+int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+                         bool enable)
+{
+       struct rtw89_scan_option opt = {0};
+       struct rtw89_vif *rtwvif;
+       int ret = 0;
+
+       rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
+       if (!rtwvif)
+               return -EINVAL;
+
+       opt.enable = enable;
+       opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
+       if (enable) {
+               ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif);
+               if (ret)
+                       goto out;
+       }
+       rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
+out:
+       return ret;
+}
+
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev)
+{
+       struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+       struct rtw89_hal *hal = &rtwdev->hal;
+
+       scan_info->op_pri_ch = hal->current_primary_channel;
+       scan_info->op_chan = hal->current_channel;
+       scan_info->op_bw = hal->current_band_width;
+       scan_info->op_band = hal->current_band_type;
+}
index 83f4eaa..ed8609b 100644 (file)
@@ -123,6 +123,27 @@ enum rtw89_fw_log_comp {
        RTW89_FW_LOG_COMP_MCC = 20,
 };
 
+enum rtw89_pkt_offload_op {
+       RTW89_PKT_OFLD_OP_ADD,
+       RTW89_PKT_OFLD_OP_DEL,
+       RTW89_PKT_OFLD_OP_READ,
+};
+
+enum rtw89_scanofld_notify_reason {
+       RTW89_SCAN_DWELL_NOTIFY,
+       RTW89_SCAN_PRE_TX_NOTIFY,
+       RTW89_SCAN_POST_TX_NOTIFY,
+       RTW89_SCAN_ENTER_CH_NOTIFY,
+       RTW89_SCAN_LEAVE_CH_NOTIFY,
+       RTW89_SCAN_END_SCAN_NOTIFY,
+};
+
+enum rtw89_chan_type {
+       RTW89_CHAN_OPERATE = 0,
+       RTW89_CHAN_ACTIVE,
+       RTW89_CHAN_DFS,
+};
+
 #define FWDL_SECTION_MAX_NUM 10
 #define FWDL_SECTION_CHKSUM_LEN        8
 #define FWDL_SECTION_PER_PKT_LEN 2020
@@ -156,6 +177,50 @@ struct rtw89_h2creg_sch_tx_en {
        u16 rsvd:15;
 } __packed;
 
+#define RTW89_CHANNEL_TIME 45
+#define RTW89_DFS_CHAN_TIME 105
+#define RTW89_OFF_CHAN_TIME 100
+#define RTW89_DWELL_TIME 20
+#define RTW89_SCAN_WIDTH 0
+#define RTW89_SCANOFLD_MAX_SSID 8
+#define RTW89_SCANOFLD_MAX_IE_LEN 512
+#define RTW89_SCANOFLD_PKT_NONE 0xFF
+#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
+#define RTW89_MAC_CHINFO_SIZE 20
+
+struct rtw89_mac_chinfo {
+       u8 period;
+       u8 dwell_time;
+       u8 central_ch;
+       u8 pri_ch;
+       u8 bw:3;
+       u8 notify_action:5;
+       u8 num_pkt:4;
+       u8 tx_pkt:1;
+       u8 pause_data:1;
+       u8 ch_band:2;
+       u8 probe_id;
+       u8 dfs_ch:1;
+       u8 tx_null:1;
+       u8 rand_seq_num:1;
+       u8 cfg_tx_pwr:1;
+       u8 rsvd0: 4;
+       u8 pkt_id[RTW89_SCANOFLD_MAX_SSID];
+       u16 tx_pwr_idx;
+       u8 rsvd1;
+       struct list_head list;
+};
+
+struct rtw89_scan_option {
+       bool enable;
+       bool target_ch_mode;
+};
+
+struct rtw89_pktofld_info {
+       struct list_head list;
+       u8 id;
+};
+
 static inline void RTW89_SET_FWCMD_RA_IS_DIS(void *cmd, u32 val)
 {
        le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0));
@@ -1436,6 +1501,14 @@ enum rtw89_btc_cxdrvinfo {
        CXDRVINFO_MAX,
 };
 
+enum rtw89_scan_mode {
+       RTW89_SCAN_IMMEDIATE,
+};
+
+enum rtw89_scan_type {
+       RTW89_SCAN_ONCE,
+};
+
 static inline void RTW89_SET_FWCMD_CXHDR_TYPE(void *cmd, u8 val)
 {
        u8p_replace_bits((u8 *)(cmd) + 0, val, GENMASK(7, 0));
@@ -1706,6 +1779,242 @@ static inline void RTW89_SET_FWCMD_CXRFK_TYPE(void *cmd, u32 val)
        le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(17, 10));
 }
 
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(10, 8));
+}
+
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_CH_NUM(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PERIOD(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_DWELL(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_CENTER_CH(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PRI_CH(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_BW(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(2, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_ACTION(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 3));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_NUM_PKT(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(11, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_TX(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(12));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(13));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_BAND(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 14));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT_ID(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_DFS(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_TX_NULL(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(25));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_RANDOM(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(26));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_CFG_TX(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(27));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT0(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT1(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT2(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT3(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT4(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT5(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT6(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT7(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_POWER_IDX(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_MACID(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NORM_CY(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_PORT_ID(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(18, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_BAND(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, BIT(19));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_OPERATION(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(21, 20));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 22));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_START_MODE(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(2));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(4, 3));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 5));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(void *cmd,
+                                                             u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_PROBE_REQ_PKT_ID(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NORM_PD(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_SLOW_PD(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_HIGH(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_SLOW(void *cmd, u32 val)
+{
+       le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(31, 0));
+}
+
 #define RTW89_C2H_HEADER_LEN 8
 
 #define RTW89_GET_C2H_CATEGORY(c2h) \
@@ -1762,6 +2071,26 @@ static inline void RTW89_SET_FWCMD_CXRFK_TYPE(void *cmd, u32 val)
 #define RTW89_MK_HT_RATE(nss, mcs) (FIELD_PREP(GENMASK(4, 3), nss) | \
                                    FIELD_PREP(GENMASK(2, 0), mcs))
 
+#define RTW89_GET_MAC_C2H_PKTOFLD_ID(c2h) \
+       le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
+#define RTW89_GET_MAC_C2H_PKTOFLD_OP(c2h) \
+       le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(10, 8))
+#define RTW89_GET_MAC_C2H_PKTOFLD_LEN(c2h) \
+       le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 16))
+
+#define RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h) \
+       le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
+#define RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h) \
+       le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(19, 16))
+#define RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h) \
+       le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 20))
+#define RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h) \
+       le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(3, 0))
+#define RTW89_GET_MAC_C2H_SCANOFLD_AIR_DENSITY(c2h) \
+       le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(7, 4))
+#define RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h) \
+       le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
+
 #define RTW89_FW_HDR_SIZE 32
 #define RTW89_FW_SECTION_HDR_SIZE 16
 
@@ -1842,9 +2171,12 @@ struct rtw89_fw_h2c_rf_reg_info {
 
 /* CLASS 9 - FW offload */
 #define H2C_CL_MAC_FW_OFLD             0x9
+#define H2C_FUNC_PACKET_OFLD           0x1
 #define H2C_FUNC_MAC_MACID_PAUSE       0x8
 #define H2C_FUNC_USR_EDCA              0xF
 #define H2C_FUNC_OFLD_CFG              0x14
+#define H2C_FUNC_ADD_SCANOFLD_CH       0x16
+#define H2C_FUNC_SCANOFLD              0x17
 
 /* CLASS 10 - Security CAM */
 #define H2C_CL_MAC_SEC_CAM             0xa
@@ -1900,6 +2232,14 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev);
 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev);
 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev);
 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id);
+int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
+                                struct sk_buff *skb_ofld);
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+                                  struct list_head *chan_list);
+int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
+                             struct rtw89_scan_option *opt,
+                             struct rtw89_vif *vif);
 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
                        struct rtw89_fw_h2c_rf_reg_info *info,
                        u16 len, u8 page);
@@ -1922,5 +2262,16 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
                     struct rtw89_mac_c2h_info *c2h_info);
 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable);
 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev);
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev);
+void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+                        struct ieee80211_scan_request *req);
+void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+                           bool aborted);
+int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+                         bool enable);
+void rtw89_hw_scan_status_report(struct rtw89_dev *rtwdev, struct sk_buff *skb);
+void rtw89_hw_scan_chan_switch(struct rtw89_dev *rtwdev, struct sk_buff *skb);
+void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev);
 
 #endif
index 031e6e9..5e554bd 100644 (file)
@@ -569,6 +569,8 @@ static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev)
 
 static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch)
 {
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+       const struct rtw89_page_regs *regs = chip->page_regs;
        struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
        const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
        int ret = 0;
@@ -588,13 +590,15 @@ static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch)
        val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) |
              u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) |
              (cfg[ch].grp ? B_AX_GRP : 0);
-       rtw89_write32(rtwdev, R_AX_ACH0_PAGE_CTRL + ch * 4, val);
+       rtw89_write32(rtwdev, regs->ach_page_ctrl + ch * 4, val);
 
        return 0;
 }
 
 static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
 {
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+       const struct rtw89_page_regs *regs = chip->page_regs;
        struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
        struct rtw89_hfc_ch_info *info = param->ch_info;
        const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
@@ -608,7 +612,7 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
        if (ch > RTW89_DMA_H2C)
                return -EINVAL;
 
-       val = rtw89_read32(rtwdev, R_AX_ACH0_PAGE_INFO + ch * 4);
+       val = rtw89_read32(rtwdev, regs->ach_page_info + ch * 4);
        info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK);
        if (ch < RTW89_DMA_H2C)
                info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK);
@@ -620,6 +624,8 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
 
 static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
 {
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+       const struct rtw89_page_regs *regs = chip->page_regs;
        const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg;
        u32 val;
        int ret;
@@ -634,16 +640,18 @@ static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
 
        val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) |
              u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK);
-       rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL1, val);
+       rtw89_write32(rtwdev, regs->pub_page_ctrl1, val);
 
        val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK);
-       rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL2, val);
+       rtw89_write32(rtwdev, regs->wp_page_ctrl2, val);
 
        return 0;
 }
 
 static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
 {
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+       const struct rtw89_page_regs *regs = chip->page_regs;
        struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
        struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
        struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
@@ -655,20 +663,20 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
        if (ret)
                return ret;
 
-       val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO1);
+       val = rtw89_read32(rtwdev, regs->pub_page_info1);
        info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK);
        info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK);
-       val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO3);
+       val = rtw89_read32(rtwdev, regs->pub_page_info3);
        info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK);
        info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK);
        info->pub_aval =
-               u32_get_bits(rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO2),
+               u32_get_bits(rtw89_read32(rtwdev, regs->pub_page_info2),
                             B_AX_PUB_AVAL_PG_MASK);
        info->wp_aval =
-               u32_get_bits(rtw89_read32(rtwdev, R_AX_WP_PAGE_INFO1),
+               u32_get_bits(rtw89_read32(rtwdev, regs->wp_page_info1),
                             B_AX_WP_AVAL_PG_MASK);
 
-       val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL);
+       val = rtw89_read32(rtwdev, regs->hci_fc_ctrl);
        param->en = val & B_AX_HCI_FC_EN ? 1 : 0;
        param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0;
        param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK);
@@ -681,21 +689,21 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
        prec_cfg->wp_ch811_full_cond =
                u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
 
-       val = rtw89_read32(rtwdev, R_AX_CH_PAGE_CTRL);
+       val = rtw89_read32(rtwdev, regs->ch_page_ctrl);
        prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK);
        prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK);
 
-       val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL2);
+       val = rtw89_read32(rtwdev, regs->pub_page_ctrl2);
        pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK);
 
-       val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL1);
+       val = rtw89_read32(rtwdev, regs->wp_page_ctrl1);
        prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK);
        prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK);
 
-       val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL2);
+       val = rtw89_read32(rtwdev, regs->wp_page_ctrl2);
        pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK);
 
-       val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL1);
+       val = rtw89_read32(rtwdev, regs->pub_page_ctrl1);
        pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK);
        pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK);
 
@@ -708,20 +716,24 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
 
 static void hfc_h2c_cfg(struct rtw89_dev *rtwdev)
 {
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+       const struct rtw89_page_regs *regs = chip->page_regs;
        struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
        const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
        u32 val;
 
        val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
-       rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val);
+       rtw89_write32(rtwdev, regs->ch_page_ctrl, val);
 
-       rtw89_write32_mask(rtwdev, R_AX_HCI_FC_CTRL,
+       rtw89_write32_mask(rtwdev, regs->hci_fc_ctrl,
                           B_AX_HCI_FC_CH12_FULL_COND_MASK,
                           prec_cfg->h2c_full_cond);
 }
 
 static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
 {
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+       const struct rtw89_page_regs *regs = chip->page_regs;
        struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
        const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
        const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
@@ -729,18 +741,18 @@ static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
 
        val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) |
              u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
-       rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val);
+       rtw89_write32(rtwdev, regs->ch_page_ctrl, val);
 
        val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK);
-       rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL2, val);
+       rtw89_write32(rtwdev, regs->pub_page_ctrl2, val);
 
        val = u32_encode_bits(prec_cfg->wp_ch07_prec,
                              B_AX_PREC_PAGE_WP_CH07_MASK) |
              u32_encode_bits(prec_cfg->wp_ch811_prec,
                              B_AX_PREC_PAGE_WP_CH811_MASK);
-       rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL1, val);
+       rtw89_write32(rtwdev, regs->wp_page_ctrl1, val);
 
-       val = u32_replace_bits(rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL),
+       val = u32_replace_bits(rtw89_read32(rtwdev, regs->hci_fc_ctrl),
                               param->mode, B_AX_HCI_FC_MODE_MASK);
        val = u32_replace_bits(val, prec_cfg->ch011_full_cond,
                               B_AX_HCI_FC_WD_FULL_COND_MASK);
@@ -750,21 +762,23 @@ static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
                               B_AX_HCI_FC_WP_CH07_FULL_COND_MASK);
        val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond,
                               B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
-       rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val);
+       rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
 }
 
 static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
 {
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+       const struct rtw89_page_regs *regs = chip->page_regs;
        struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
        u32 val;
 
-       val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL);
+       val = rtw89_read32(rtwdev, regs->hci_fc_ctrl);
        param->en = en;
        param->h2c_en = h2c_en;
        val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN);
        val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) :
                         (val & ~B_AX_HCI_FC_CH12_EN);
-       rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val);
+       rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
 }
 
 static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
@@ -917,23 +931,31 @@ rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev)
 }
 
 static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev,
-                               enum rtw89_rpwm_req_pwr_state req_pwr_state)
+                               enum rtw89_rpwm_req_pwr_state req_pwr_state,
+                               bool notify_wake)
 {
        u16 request;
 
+       spin_lock_bh(&rtwdev->rpwm_lock);
+
        request = rtw89_read16(rtwdev, R_AX_RPWM);
        request ^= request | PS_RPWM_TOGGLE;
-
-       rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) &
-                                  RPWM_SEQ_NUM_MAX;
-       request |= FIELD_PREP(PS_RPWM_SEQ_NUM, rtwdev->mac.rpwm_seq_num);
-
        request |= req_pwr_state;
 
-       if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
-               request |= PS_RPWM_ACK;
+       if (notify_wake) {
+               request |= PS_RPWM_NOTIFY_WAKE;
+       } else {
+               rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) &
+                                           RPWM_SEQ_NUM_MAX;
+               request |= FIELD_PREP(PS_RPWM_SEQ_NUM,
+                                     rtwdev->mac.rpwm_seq_num);
 
+               if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
+                       request |= PS_RPWM_ACK;
+       }
        rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request);
+
+       spin_unlock_bh(&rtwdev->rpwm_lock);
 }
 
 static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
@@ -993,7 +1015,7 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
        else
                state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE;
 
-       rtw89_mac_send_rpwm(rtwdev, state);
+       rtw89_mac_send_rpwm(rtwdev, state, false);
        ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret,
                                       1000, 15000, false, rtwdev, state);
        if (ret)
@@ -1001,19 +1023,31 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
                          enter ? "entering" : "leaving");
 }
 
+void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev)
+{
+       enum rtw89_rpwm_req_pwr_state state;
+
+       state = rtw89_mac_get_req_pwr_state(rtwdev);
+       rtw89_mac_send_rpwm(rtwdev, state, true);
+}
+
 static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
 {
 #define PWR_ACT 1
        const struct rtw89_chip_info *chip = rtwdev->chip;
        const struct rtw89_pwr_cfg * const *cfg_seq;
+       int (*cfg_func)(struct rtw89_dev *rtwdev);
        struct rtw89_hal *hal = &rtwdev->hal;
        int ret;
        u8 val;
 
-       if (on)
+       if (on) {
                cfg_seq = chip->pwr_on_seq;
-       else
+               cfg_func = chip->ops->pwr_on_func;
+       } else {
                cfg_seq = chip->pwr_off_seq;
+               cfg_func = chip->ops->pwr_off_func;
+       }
 
        if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
                __rtw89_leave_ps_mode(rtwdev);
@@ -1024,7 +1058,7 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
                return -EBUSY;
        }
 
-       ret = rtw89_mac_pwr_seq(rtwdev, cfg_seq);
+       ret = cfg_func ? cfg_func(rtwdev) : rtw89_mac_pwr_seq(rtwdev, cfg_seq);
        if (ret)
                return ret;
 
@@ -1094,18 +1128,31 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
 
 static int dmac_func_en(struct rtw89_dev *rtwdev)
 {
+       enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
        u32 val32;
 
-       val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN |
-                B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
-                B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | B_AX_STA_SCH_EN |
-                B_AX_TXPKT_CTRL_EN | B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN);
+       if (chip_id == RTL8852C)
+               val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN |
+                        B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN |
+                        B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
+                        B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN |
+                        B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN |
+                        B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN |
+                        B_AX_DMAC_CRPRT | B_AX_H_AXIDMA_EN);
+       else
+               val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN |
+                        B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN |
+                        B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
+                        B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN |
+                        B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN |
+                        B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN |
+                        B_AX_DMAC_CRPRT);
        rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32);
 
        val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN |
                 B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN |
                 B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN |
-                B_AX_WD_RLS_CLK_EN);
+                B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN);
        rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32);
 
        return 0;
@@ -1113,7 +1160,11 @@ static int dmac_func_en(struct rtw89_dev *rtwdev)
 
 static int chip_func_en(struct rtw89_dev *rtwdev)
 {
-       rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0, B_AX_OCP_L1_MASK);
+       enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+
+       if (chip_id == RTL8852A)
+               rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0,
+                                 B_AX_OCP_L1_MASK);
 
        return 0;
 }
@@ -1149,6 +1200,18 @@ const struct rtw89_dle_size rtw89_wde_size4 = {
 };
 EXPORT_SYMBOL(rtw89_wde_size4);
 
+/* 8852C DLFW */
+const struct rtw89_dle_size rtw89_wde_size18 = {
+       RTW89_WDE_PG_64, 0, 2048,
+};
+EXPORT_SYMBOL(rtw89_wde_size18);
+
+/* 8852C PCIE SCC */
+const struct rtw89_dle_size rtw89_wde_size19 = {
+       RTW89_WDE_PG_64, 3328, 0,
+};
+EXPORT_SYMBOL(rtw89_wde_size19);
+
 /* PCIE */
 const struct rtw89_dle_size rtw89_ple_size0 = {
        RTW89_PLE_PG_128, 1520, 16,
@@ -1161,6 +1224,18 @@ const struct rtw89_dle_size rtw89_ple_size4 = {
 };
 EXPORT_SYMBOL(rtw89_ple_size4);
 
+/* 8852C DLFW */
+const struct rtw89_dle_size rtw89_ple_size18 = {
+       RTW89_PLE_PG_128, 2544, 16,
+};
+EXPORT_SYMBOL(rtw89_ple_size18);
+
+/* 8852C PCIE SCC */
+const struct rtw89_dle_size rtw89_ple_size19 = {
+       RTW89_PLE_PG_128, 1904, 16,
+};
+EXPORT_SYMBOL(rtw89_ple_size19);
+
 /* PCIE 64 */
 const struct rtw89_wde_quota rtw89_wde_qt0 = {
        3792, 196, 0, 107,
@@ -1173,6 +1248,18 @@ const struct rtw89_wde_quota rtw89_wde_qt4 = {
 };
 EXPORT_SYMBOL(rtw89_wde_qt4);
 
+/* 8852C DLFW */
+const struct rtw89_wde_quota rtw89_wde_qt17 = {
+       0, 0, 0,  0,
+};
+EXPORT_SYMBOL(rtw89_wde_qt17);
+
+/* 8852C PCIE SCC */
+const struct rtw89_wde_quota rtw89_wde_qt18 = {
+       3228, 60, 0, 40,
+};
+EXPORT_SYMBOL(rtw89_wde_qt18);
+
 /* PCIE SCC */
 const struct rtw89_ple_quota rtw89_ple_qt4 = {
        264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,
@@ -1191,6 +1278,30 @@ const struct rtw89_ple_quota rtw89_ple_qt13 = {
 };
 EXPORT_SYMBOL(rtw89_ple_qt13);
 
+/* DLFW 52C */
+const struct rtw89_ple_quota rtw89_ple_qt44 = {
+       0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+EXPORT_SYMBOL(rtw89_ple_qt44);
+
+/* DLFW 52C */
+const struct rtw89_ple_quota rtw89_ple_qt45 = {
+       0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+EXPORT_SYMBOL(rtw89_ple_qt45);
+
+/* 8852C PCIE SCC */
+const struct rtw89_ple_quota rtw89_ple_qt46 = {
+       525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,
+};
+EXPORT_SYMBOL(rtw89_ple_qt46);
+
+/* 8852C PCIE SCC */
+const struct rtw89_ple_quota rtw89_ple_qt47 = {
+       525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,
+};
+EXPORT_SYMBOL(rtw89_ple_qt47);
+
 static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
                                                   enum rtw89_qta_mode mode)
 {
@@ -1345,6 +1456,8 @@ static void ple_quota_cfg(struct rtw89_dev *rtwdev,
        SET_QUOTA(bb_rpt, PLE, 8);
        SET_QUOTA(wd_rel, PLE, 9);
        SET_QUOTA(cpu_io, PLE, 10);
+       if (rtwdev->chip->chip_id == RTL8852C)
+               SET_QUOTA(tx_rpt, PLE, 11);
 }
 
 #undef SET_QUOTA
@@ -1432,6 +1545,43 @@ error:
        return ret;
 }
 
+static int preload_init_set(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
+                           enum rtw89_qta_mode mode)
+{
+       u32 reg, max_preld_size, min_rsvd_size;
+
+       max_preld_size = (mac_idx == RTW89_MAC_0 ?
+                         PRELD_B0_ENT_NUM : PRELD_B1_ENT_NUM) * PRELD_AMSDU_SIZE;
+       reg = mac_idx == RTW89_MAC_0 ?
+             R_AX_TXPKTCTL_B0_PRELD_CFG0 : R_AX_TXPKTCTL_B1_PRELD_CFG0;
+       rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_USEMAXSZ_MASK, max_preld_size);
+       rtw89_write32_set(rtwdev, reg, B_AX_B0_PRELD_FEN);
+
+       min_rsvd_size = PRELD_AMSDU_SIZE;
+       reg = mac_idx == RTW89_MAC_0 ?
+             R_AX_TXPKTCTL_B0_PRELD_CFG1 : R_AX_TXPKTCTL_B1_PRELD_CFG1;
+       rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_TXENDWIN_MASK, PRELD_NEXT_WND);
+       rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_RSVMINSZ_MASK, min_rsvd_size);
+
+       return 0;
+}
+
+static bool is_qta_poh(struct rtw89_dev *rtwdev)
+{
+       return rtwdev->hci.type == RTW89_HCI_TYPE_PCIE;
+}
+
+static int preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
+                       enum rtw89_qta_mode mode)
+{
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+
+       if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || !is_qta_poh(rtwdev))
+               return 0;
+
+       return preload_init_set(rtwdev, mac_idx, mode);
+}
+
 static bool dle_is_txq_empty(struct rtw89_dev *rtwdev)
 {
        u32 msk32;
@@ -1539,6 +1689,12 @@ static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
                return ret;
        }
 
+       ret = preload_init(rtwdev, RTW89_MAC_0, rtwdev->mac.qta_mode);
+       if (ret) {
+               rtw89_err(rtwdev, "[ERR]preload init %d\n", ret);
+               return ret;
+       }
+
        ret = hfc_init(rtwdev, true, true, true);
        if (ret) {
                rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret);
@@ -2090,8 +2246,26 @@ static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx,
        return 0;
 }
 
+static int rtw89_set_hw_sch_tx_en_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+                                    u32 tx_en, u32 tx_en_mask)
+{
+       u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx);
+       u32 val;
+       int ret;
+
+       ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+       if (ret)
+               return ret;
+
+       val = rtw89_read32(rtwdev, reg);
+       val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
+       rtw89_write32(rtwdev, reg, val);
+
+       return 0;
+}
+
 int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
-                         u16 *tx_en, enum rtw89_sch_tx_sel sel)
+                         u32 *tx_en, enum rtw89_sch_tx_sel sel)
 {
        int ret;
 
@@ -2100,7 +2274,8 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
 
        switch (sel) {
        case RTW89_SCH_TX_SEL_ALL:
-               ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff);
+               ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0,
+                                            B_AX_CTN_TXEN_ALL_MASK);
                if (ret)
                        return ret;
                break;
@@ -2117,7 +2292,8 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
                        return ret;
                break;
        case RTW89_SCH_TX_SEL_MACID:
-               ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff);
+               ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0,
+                                            B_AX_CTN_TXEN_ALL_MASK);
                if (ret)
                        return ret;
                break;
@@ -2129,11 +2305,52 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
 }
 EXPORT_SYMBOL(rtw89_mac_stop_sch_tx);
 
-int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en)
+int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+                            u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+       int ret;
+
+       *tx_en = rtw89_read32(rtwdev,
+                             rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx));
+
+       switch (sel) {
+       case RTW89_SCH_TX_SEL_ALL:
+               ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0,
+                                               B_AX_CTN_TXEN_ALL_MASK_V1);
+               if (ret)
+                       return ret;
+               break;
+       case RTW89_SCH_TX_SEL_HIQ:
+               ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx,
+                                               0, B_AX_CTN_TXEN_HGQ);
+               if (ret)
+                       return ret;
+               break;
+       case RTW89_SCH_TX_SEL_MG0:
+               ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx,
+                                               0, B_AX_CTN_TXEN_MGQ);
+               if (ret)
+                       return ret;
+               break;
+       case RTW89_SCH_TX_SEL_MACID:
+               ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0,
+                                               B_AX_CTN_TXEN_ALL_MASK_V1);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               return 0;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v1);
+
+int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
 {
        int ret;
 
-       ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, 0xffff);
+       ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, B_AX_CTN_TXEN_ALL_MASK);
        if (ret)
                return ret;
 
@@ -2141,6 +2358,19 @@ int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en)
 }
 EXPORT_SYMBOL(rtw89_mac_resume_sch_tx);
 
+int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+       int ret;
+
+       ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, tx_en,
+                                       B_AX_CTN_TXEN_ALL_MASK_V1);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v1);
+
 static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
                                 bool wd)
 {
@@ -2303,9 +2533,9 @@ static int band1_enable(struct rtw89_dev *rtwdev)
        int ret, i;
        u32 sleep_bak[4] = {0};
        u32 pause_bak[4] = {0};
-       u16 tx_en;
+       u32 tx_en;
 
-       ret = rtw89_mac_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL);
+       ret = rtw89_chip_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL);
        if (ret) {
                rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret);
                return ret;
@@ -2335,7 +2565,7 @@ static int band1_enable(struct rtw89_dev *rtwdev)
                rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]);
        }
 
-       ret = rtw89_mac_resume_sch_tx(rtwdev, 0, tx_en);
+       ret = rtw89_chip_resume_sch_tx(rtwdev, 0, tx_en);
        if (ret) {
                rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret);
                return ret;
@@ -2529,7 +2759,11 @@ static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
        clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
 
        rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN);
+       rtw89_write32_clr(rtwdev, R_AX_WCPU_FW_CTRL, B_AX_WCPU_FWDL_EN |
+                         B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY);
        rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
+       rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+       rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
 }
 
 static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
@@ -2599,7 +2833,9 @@ static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev)
 
 static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev)
 {
-       rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN,
+       const struct rtw89_chip_info *chip = rtwdev->chip;
+
+       rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
                          B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
 }
 
@@ -3129,6 +3365,57 @@ rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
 {
 }
 
+static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
+{
+       struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+
+       return band == scan_info->op_band && channel == scan_info->op_pri_ch;
+}
+
+static void
+rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+                          u32 len)
+{
+       struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+       struct rtw89_hal *hal = &rtwdev->hal;
+       u8 reason, status, tx_fail, band;
+       u16 chan;
+
+       tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
+       status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
+       chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
+       reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
+       band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
+
+       if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
+               band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
+
+       rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+                   "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d\n",
+                   band, chan, reason, status, tx_fail);
+
+       switch (reason) {
+       case RTW89_SCAN_LEAVE_CH_NOTIFY:
+               if (rtw89_is_op_chan(rtwdev, band, chan))
+                       ieee80211_stop_queues(rtwdev->hw);
+               return;
+       case RTW89_SCAN_END_SCAN_NOTIFY:
+               rtw89_hw_scan_complete(rtwdev, vif, false);
+               break;
+       case RTW89_SCAN_ENTER_CH_NOTIFY:
+               if (rtw89_is_op_chan(rtwdev, band, chan))
+                       ieee80211_wake_queues(rtwdev->hw);
+               break;
+       default:
+               return;
+       }
+
+       hal->prev_band_type = hal->current_band_type;
+       hal->prev_primary_channel = hal->current_channel;
+       hal->current_channel = chan;
+       hal->current_band_type = band;
+}
+
 static void
 rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
 {
@@ -3172,6 +3459,7 @@ void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
        [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL,
        [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
        [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
+       [RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
 };
 
 static
@@ -3407,29 +3695,32 @@ EXPORT_SYMBOL(rtw89_mac_coex_init);
 int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
                      const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
 {
-       u32 val, ret;
+       u32 val = 0, ret;
+
+       if (gnt_cfg->band[0].gnt_bt)
+               val |= B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL;
+
+       if (gnt_cfg->band[0].gnt_bt_sw_en)
+               val |= B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL;
+
+       if (gnt_cfg->band[0].gnt_wl)
+               val |= B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL;
+
+       if (gnt_cfg->band[0].gnt_wl_sw_en)
+               val |= B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL;
+
+       if (gnt_cfg->band[1].gnt_bt)
+               val |= B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL;
+
+       if (gnt_cfg->band[1].gnt_bt_sw_en)
+               val |= B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL;
+
+       if (gnt_cfg->band[1].gnt_wl)
+               val |= B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL;
+
+       if (gnt_cfg->band[1].gnt_wl_sw_en)
+               val |= B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL;
 
-       ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
-       if (ret) {
-               rtw89_err(rtwdev, "Read LTE fail!\n");
-               return ret;
-       }
-       val = (gnt_cfg->band[0].gnt_bt ?
-              B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL : 0) |
-             (gnt_cfg->band[0].gnt_bt_sw_en ?
-              B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL : 0) |
-             (gnt_cfg->band[0].gnt_wl ?
-              B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL : 0) |
-             (gnt_cfg->band[0].gnt_wl_sw_en ?
-              B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL : 0) |
-             (gnt_cfg->band[1].gnt_bt ?
-              B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL : 0) |
-             (gnt_cfg->band[1].gnt_bt_sw_en ?
-              B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL : 0) |
-             (gnt_cfg->band[1].gnt_wl ?
-              B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL : 0) |
-             (gnt_cfg->band[1].gnt_wl_sw_en ?
-              B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL : 0);
        ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val);
        if (ret) {
                rtw89_err(rtwdev, "Write LTE fail!\n");
@@ -3438,6 +3729,54 @@ int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
 
        return 0;
 }
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt);
+
+int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
+                        const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+       u32 val = 0;
+
+       if (gnt_cfg->band[0].gnt_bt)
+               val |= B_AX_GNT_BT_RFC_S0_VAL | B_AX_GNT_BT_RX_VAL |
+                      B_AX_GNT_BT_TX_VAL;
+       else
+               val |= B_AX_WL_ACT_VAL;
+
+       if (gnt_cfg->band[0].gnt_bt_sw_en)
+               val |= B_AX_GNT_BT_RFC_S0_SWCTRL | B_AX_GNT_BT_RX_SWCTRL |
+                      B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL;
+
+       if (gnt_cfg->band[0].gnt_wl)
+               val |= B_AX_GNT_WL_RFC_S0_VAL | B_AX_GNT_WL_RX_VAL |
+                      B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL;
+
+       if (gnt_cfg->band[0].gnt_wl_sw_en)
+               val |= B_AX_GNT_WL_RFC_S0_SWCTRL | B_AX_GNT_WL_RX_SWCTRL |
+                      B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL;
+
+       if (gnt_cfg->band[1].gnt_bt)
+               val |= B_AX_GNT_BT_RFC_S1_VAL | B_AX_GNT_BT_RX_VAL |
+                      B_AX_GNT_BT_TX_VAL;
+       else
+               val |= B_AX_WL_ACT_VAL;
+
+       if (gnt_cfg->band[1].gnt_bt_sw_en)
+               val |= B_AX_GNT_BT_RFC_S1_SWCTRL | B_AX_GNT_BT_RX_SWCTRL |
+                      B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL;
+
+       if (gnt_cfg->band[1].gnt_wl)
+               val |= B_AX_GNT_WL_RFC_S1_VAL | B_AX_GNT_WL_RX_VAL |
+                      B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL;
+
+       if (gnt_cfg->band[1].gnt_wl_sw_en)
+               val |= B_AX_GNT_WL_RFC_S1_SWCTRL | B_AX_GNT_WL_RX_SWCTRL |
+                      B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL;
+
+       rtw89_write32(rtwdev, R_AX_GNT_SW_CTRL, val);
+
+       return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v1);
 
 int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
 {
@@ -3497,6 +3836,28 @@ int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
 
        return 0;
 }
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path);
+
+int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl)
+{
+       struct rtw89_btc *btc = &rtwdev->btc;
+       struct rtw89_btc_dm *dm = &btc->dm;
+       struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+       int i;
+
+       if (wl)
+               return 0;
+
+       for (i = 0; i < RTW89_PHY_MAX; i++) {
+               g[i].gnt_bt_sw_en = 1;
+               g[i].gnt_bt = 1;
+               g[i].gnt_wl_sw_en = 1;
+               g[i].gnt_wl = 0;
+       }
+
+       return rtw89_mac_cfg_gnt_v1(rtwdev, &dm->gnt);
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v1);
 
 bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
 {
@@ -3900,3 +4261,51 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
 
        return 0;
 }
+
+int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask)
+{
+       u32 val32;
+       int ret;
+
+       val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) |
+               FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, val) |
+               FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, mask) |
+               FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_WRITE) |
+               FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1);
+       rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32);
+
+       ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL),
+                               50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL);
+       if (ret) {
+               rtw89_warn(rtwdev, "xtal si not ready(W): offset=%x val=%x mask=%x\n",
+                          offset, val, mask);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_write_xtal_si);
+
+int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
+{
+       u32 val32;
+       int ret;
+
+       val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) |
+               FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, 0x00) |
+               FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, 0x00) |
+               FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_READ) |
+               FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1);
+       rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32);
+
+       ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL),
+                               50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL);
+       if (ret) {
+               rtw89_warn(rtwdev, "xtal si not ready(R): offset=%x\n", offset);
+               return ret;
+       }
+
+       *val = rtw89_read8(rtwdev, R_AX_WLAN_XTAL_SI_CTRL + 1);
+
+       return 0;
+}
index e5db0a2..b797667 100644 (file)
@@ -301,6 +301,7 @@ enum rtw89_mac_c2h_ofld_func {
        RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP,
        RTW89_MAC_C2H_FUNC_BCN_RESEND,
        RTW89_MAC_C2H_FUNC_MACID_PAUSE,
+       RTW89_MAC_C2H_FUNC_SCANOFLD_RSP = 0x9,
        RTW89_MAC_C2H_FUNC_OFLD_MAX,
 };
 
@@ -674,13 +675,23 @@ enum mac_ax_err_info {
 extern const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie;
 extern const struct rtw89_dle_size rtw89_wde_size0;
 extern const struct rtw89_dle_size rtw89_wde_size4;
+extern const struct rtw89_dle_size rtw89_wde_size18;
+extern const struct rtw89_dle_size rtw89_wde_size19;
 extern const struct rtw89_dle_size rtw89_ple_size0;
 extern const struct rtw89_dle_size rtw89_ple_size4;
+extern const struct rtw89_dle_size rtw89_ple_size18;
+extern const struct rtw89_dle_size rtw89_ple_size19;
 extern const struct rtw89_wde_quota rtw89_wde_qt0;
 extern const struct rtw89_wde_quota rtw89_wde_qt4;
+extern const struct rtw89_wde_quota rtw89_wde_qt17;
+extern const struct rtw89_wde_quota rtw89_wde_qt18;
 extern const struct rtw89_ple_quota rtw89_ple_qt4;
 extern const struct rtw89_ple_quota rtw89_ple_qt5;
 extern const struct rtw89_ple_quota rtw89_ple_qt13;
+extern const struct rtw89_ple_quota rtw89_ple_qt44;
+extern const struct rtw89_ple_quota rtw89_ple_qt45;
+extern const struct rtw89_ple_quota rtw89_ple_qt46;
+extern const struct rtw89_ple_quota rtw89_ple_qt47;
 
 static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band)
 {
@@ -780,24 +791,31 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
                          u32 len, u8 class, u8 func);
 int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev);
 int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
-                         u16 *tx_en, enum rtw89_sch_tx_sel sel);
-int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en);
+                         u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+                            u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
 int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable);
 void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx);
 void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
 int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex);
 int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
                      const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
+                        const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
 int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
 u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band);
 void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val);
 u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev);
 bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev);
 int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl);
+int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl);
 bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev,
                            enum rtw89_phy_idx phy_idx,
                            u32 reg_base, u32 *cr);
 void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter);
+void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev);
 void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta);
 void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
@@ -870,4 +888,44 @@ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
 int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
                                 struct rtw89_sta *rtwsta, u8 *tx_retry);
 
+enum rtw89_mac_xtal_si_offset {
+       XTAL_SI_XTAL_SC_XI = 0x04,
+#define XTAL_SC_XI_MASK                GENMASK(7, 0)
+       XTAL_SI_XTAL_SC_XO = 0x05,
+#define XTAL_SC_XO_MASK                GENMASK(7, 0)
+       XTAL_SI_PWR_CUT = 0x10,
+#define XTAL_SI_SMALL_PWR_CUT  BIT(0)
+#define XTAL_SI_BIG_PWR_CUT    BIT(1)
+       XTAL_SI_XTAL_XMD_2 = 0x24,
+#define XTAL_SI_LDO_LPS                GENMASK(6, 4)
+       XTAL_SI_XTAL_XMD_4 = 0x26,
+#define XTAL_SI_LPS_CAP                GENMASK(3, 0)
+       XTAL_SI_CV = 0x41,
+       XTAL_SI_LOW_ADDR = 0x62,
+#define XTAL_SI_LOW_ADDR_MASK  GENMASK(7, 0)
+       XTAL_SI_CTRL = 0x63,
+#define XTAL_SI_MODE_SEL_MASK  GENMASK(7, 6)
+#define XTAL_SI_RDY            BIT(5)
+#define XTAL_SI_HIGH_ADDR_MASK GENMASK(2, 0)
+       XTAL_SI_READ_VAL = 0x7A,
+       XTAL_SI_WL_RFC_S0 = 0x80,
+#define XTAL_SI_RF00           BIT(0)
+       XTAL_SI_WL_RFC_S1 = 0x81,
+#define XTAL_SI_RF10           BIT(0)
+       XTAL_SI_ANAPAR_WL = 0x90,
+#define XTAL_SI_SRAM2RFC       BIT(7)
+#define XTAL_SI_GND_SHDN_WL    BIT(6)
+#define XTAL_SI_SHDN_WL                BIT(5)
+#define XTAL_SI_RFC2RF         BIT(4)
+#define XTAL_SI_OFF_EI         BIT(3)
+#define XTAL_SI_OFF_WEI                BIT(2)
+#define XTAL_SI_PON_EI         BIT(1)
+#define XTAL_SI_PON_WEI                BIT(0)
+       XTAL_SI_SRAM_CTRL = 0xA1,
+#define FULL_BIT_MASK          GENMASK(7, 0)
+};
+
+int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask);
+int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val);
+
 #endif
index 5df7ace..fca9f82 100644 (file)
@@ -66,6 +66,9 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
 {
        struct rtw89_dev *rtwdev = hw->priv;
 
+       /* let previous ips work finish to ensure we don't leave ips twice */
+       cancel_work_sync(&rtwdev->ips_work);
+
        mutex_lock(&rtwdev->mutex);
        rtw89_leave_ps_mode(rtwdev);
 
@@ -347,6 +350,13 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
                        rtw89_phy_set_bss_color(rtwdev, vif);
                        rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
                        rtw89_mac_port_update(rtwdev, rtwvif);
+                       rtw89_store_op_chan(rtwdev);
+               } else {
+                       /* Abort ongoing scan if cancel_scan isn't issued
+                        * when disconnected by peer
+                        */
+                       if (rtwdev->scanning)
+                               rtw89_hw_scan_abort(rtwdev, vif);
                }
        }
 
@@ -684,15 +694,9 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
 {
        struct rtw89_dev *rtwdev = hw->priv;
        struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
-       struct rtw89_hal *hal = &rtwdev->hal;
 
        mutex_lock(&rtwdev->mutex);
-       rtwdev->scanning = true;
-       rtw89_leave_lps(rtwdev);
-       rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
-       rtw89_chip_rfk_scan(rtwdev, true);
-       rtw89_hci_recalc_int_mit(rtwdev);
-       rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
+       rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, false);
        mutex_unlock(&rtwdev->mutex);
 }
 
@@ -700,14 +704,9 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif)
 {
        struct rtw89_dev *rtwdev = hw->priv;
-       struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
 
        mutex_lock(&rtwdev->mutex);
-       rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
-       rtw89_chip_rfk_scan(rtwdev, false);
-       rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
-       rtwdev->scanning = false;
-       rtwdev->dig.bypass_dig = true;
+       rtw89_core_scan_complete(rtwdev, vif, false);
        mutex_unlock(&rtwdev->mutex);
 }
 
@@ -720,6 +719,46 @@ static void rtw89_ops_reconfig_complete(struct ieee80211_hw *hw,
                rtw89_ser_recfg_done(rtwdev);
 }
 
+static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                            struct ieee80211_scan_request *req)
+{
+       struct rtw89_dev *rtwdev = hw->priv;
+       int ret = 0;
+
+       if (!rtwdev->fw.scan_offload)
+               return 1;
+
+       if (rtwdev->scanning)
+               return -EBUSY;
+
+       mutex_lock(&rtwdev->mutex);
+       rtw89_hw_scan_start(rtwdev, vif, req);
+       ret = rtw89_hw_scan_offload(rtwdev, vif, true);
+       if (ret) {
+               rtw89_hw_scan_abort(rtwdev, vif);
+               rtw89_err(rtwdev, "HW scan failed with status: %d\n", ret);
+       }
+       mutex_unlock(&rtwdev->mutex);
+
+       return ret;
+}
+
+static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif)
+{
+       struct rtw89_dev *rtwdev = hw->priv;
+
+       if (!rtwdev->fw.scan_offload)
+               return;
+
+       if (!rtwdev->scanning)
+               return;
+
+       mutex_lock(&rtwdev->mutex);
+       rtw89_hw_scan_abort(rtwdev, vif);
+       mutex_unlock(&rtwdev->mutex);
+}
+
 const struct ieee80211_ops rtw89_ops = {
        .tx                     = rtw89_ops_tx,
        .wake_tx_queue          = rtw89_ops_wake_tx_queue,
@@ -746,6 +785,8 @@ const struct ieee80211_ops rtw89_ops = {
        .sw_scan_start          = rtw89_ops_sw_scan_start,
        .sw_scan_complete       = rtw89_ops_sw_scan_complete,
        .reconfig_complete      = rtw89_ops_reconfig_complete,
+       .hw_scan                = rtw89_ops_hw_scan,
+       .cancel_hw_scan         = rtw89_ops_cancel_hw_scan,
        .set_sar_specs          = rtw89_ops_set_sar_specs,
 };
 EXPORT_SYMBOL(rtw89_ops);
index 6481085..e79bfc3 100644 (file)
@@ -62,7 +62,7 @@ static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
                                 struct rtw89_pci_tx_ring *tx_ring)
 {
        struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
-       u32 addr_idx = bd_ring->addr_idx;
+       u32 addr_idx = bd_ring->addr.idx;
        u32 cnt, idx;
 
        idx = rtw89_read32(rtwdev, addr_idx);
@@ -121,7 +121,7 @@ static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
                                 struct rtw89_pci_rx_ring *rx_ring)
 {
        struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
-       u32 addr_idx = bd_ring->addr_idx;
+       u32 addr_idx = bd_ring->addr.idx;
        u32 cnt, idx;
 
        idx = rtw89_read32(rtwdev, addr_idx);
@@ -304,7 +304,7 @@ static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
                cnt -= rx_cnt;
        }
 
-       rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp);
+       rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
 }
 
 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
@@ -555,7 +555,7 @@ static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
                cnt -= release_cnt;
        }
 
-       rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp);
+       rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
 }
 
 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
@@ -598,7 +598,7 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
                rx_ring = &rtwpci->rx_rings[i];
                bd_ring = &rx_ring->bd_ring;
 
-               reg_idx = rtw89_read32(rtwdev, bd_ring->addr_idx);
+               reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
                hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
                host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
                hw_idx_next = (hw_idx + 1) % bd_ring->len;
@@ -697,71 +697,110 @@ exit:
        return irqret;
 }
 
-#define case_TXCHADDRS(txch) \
-       case RTW89_TXCH_##txch: \
-               *addr_num = R_AX_##txch##_TXBD_NUM; \
-               *addr_idx = R_AX_##txch##_TXBD_IDX; \
-               *addr_bdram = R_AX_##txch##_BDRAM_CTRL; \
-               *addr_desa_l = R_AX_##txch##_TXBD_DESA_L; \
-               *addr_desa_h = R_AX_##txch##_TXBD_DESA_H; \
-               break
-
-static int rtw89_pci_get_txch_addrs(enum rtw89_tx_channel txch,
-                                   u32 *addr_num,
-                                   u32 *addr_idx,
-                                   u32 *addr_bdram,
-                                   u32 *addr_desa_l,
-                                   u32 *addr_desa_h)
-{
-       switch (txch) {
-       case_TXCHADDRS(ACH0);
-       case_TXCHADDRS(ACH1);
-       case_TXCHADDRS(ACH2);
-       case_TXCHADDRS(ACH3);
-       case_TXCHADDRS(ACH4);
-       case_TXCHADDRS(ACH5);
-       case_TXCHADDRS(ACH6);
-       case_TXCHADDRS(ACH7);
-       case_TXCHADDRS(CH8);
-       case_TXCHADDRS(CH9);
-       case_TXCHADDRS(CH10);
-       case_TXCHADDRS(CH11);
-       case_TXCHADDRS(CH12);
-       default:
+#define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
+       [RTW89_TXCH_##txch] = { \
+               .num = R_AX_##txch##_TXBD_NUM ##v, \
+               .idx = R_AX_##txch##_TXBD_IDX ##v, \
+               .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
+               .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
+               .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
+       }
+
+#define DEF_TXCHADDRS(info, txch, v...) \
+       [RTW89_TXCH_##txch] = { \
+               .num = R_AX_##txch##_TXBD_NUM, \
+               .idx = R_AX_##txch##_TXBD_IDX, \
+               .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
+               .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
+               .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
+       }
+
+#define DEF_RXCHADDRS(info, rxch, v...) \
+       [RTW89_RXCH_##rxch] = { \
+               .num = R_AX_##rxch##_RXBD_NUM ##v, \
+               .idx = R_AX_##rxch##_RXBD_IDX ##v, \
+               .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \
+               .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \
+       }
+
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
+       .tx = {
+               DEF_TXCHADDRS(info, ACH0),
+               DEF_TXCHADDRS(info, ACH1),
+               DEF_TXCHADDRS(info, ACH2),
+               DEF_TXCHADDRS(info, ACH3),
+               DEF_TXCHADDRS(info, ACH4),
+               DEF_TXCHADDRS(info, ACH5),
+               DEF_TXCHADDRS(info, ACH6),
+               DEF_TXCHADDRS(info, ACH7),
+               DEF_TXCHADDRS(info, CH8),
+               DEF_TXCHADDRS(info, CH9),
+               DEF_TXCHADDRS_TYPE1(info, CH10),
+               DEF_TXCHADDRS_TYPE1(info, CH11),
+               DEF_TXCHADDRS(info, CH12),
+       },
+       .rx = {
+               DEF_RXCHADDRS(info, RXQ),
+               DEF_RXCHADDRS(info, RPQ),
+       },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
+
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
+       .tx = {
+               DEF_TXCHADDRS(info, ACH0, _V1),
+               DEF_TXCHADDRS(info, ACH1, _V1),
+               DEF_TXCHADDRS(info, ACH2, _V1),
+               DEF_TXCHADDRS(info, ACH3, _V1),
+               DEF_TXCHADDRS(info, ACH4, _V1),
+               DEF_TXCHADDRS(info, ACH5, _V1),
+               DEF_TXCHADDRS(info, ACH6, _V1),
+               DEF_TXCHADDRS(info, ACH7, _V1),
+               DEF_TXCHADDRS(info, CH8, _V1),
+               DEF_TXCHADDRS(info, CH9, _V1),
+               DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
+               DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
+               DEF_TXCHADDRS(info, CH12, _V1),
+       },
+       .rx = {
+               DEF_RXCHADDRS(info, RXQ, _V1),
+               DEF_RXCHADDRS(info, RPQ, _V1),
+       },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
+
+#undef DEF_TXCHADDRS_TYPE1
+#undef DEF_TXCHADDRS
+#undef DEF_RXCHADDRS
+
+static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
+                                   enum rtw89_tx_channel txch,
+                                   const struct rtw89_pci_ch_dma_addr **addr)
+{
+       const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+       if (txch >= RTW89_TXCH_NUM)
                return -EINVAL;
-       }
+
+       *addr = &info->dma_addr_set->tx[txch];
 
        return 0;
 }
 
-#undef case_TXCHADDRS
-
-#define case_RXCHADDRS(rxch) \
-       case RTW89_RXCH_##rxch: \
-               *addr_num = R_AX_##rxch##_RXBD_NUM; \
-               *addr_idx = R_AX_##rxch##_RXBD_IDX; \
-               *addr_desa_l = R_AX_##rxch##_RXBD_DESA_L; \
-               *addr_desa_h = R_AX_##rxch##_RXBD_DESA_H; \
-               break
-
-static int rtw89_pci_get_rxch_addrs(enum rtw89_rx_channel rxch,
-                                   u32 *addr_num,
-                                   u32 *addr_idx,
-                                   u32 *addr_desa_l,
-                                   u32 *addr_desa_h)
+static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
+                                   enum rtw89_rx_channel rxch,
+                                   const struct rtw89_pci_ch_dma_addr **addr)
 {
-       switch (rxch) {
-       case_RXCHADDRS(RXQ);
-       case_RXCHADDRS(RPQ);
-       default:
+       const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+       if (rxch >= RTW89_RXCH_NUM)
                return -EINVAL;
-       }
+
+       *addr = &info->dma_addr_set->rx[rxch];
 
        return 0;
 }
 
-#undef case_RXCHADDRS
-
 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
 {
        struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
@@ -837,7 +876,7 @@ static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_t
        struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
        u32 host_idx, addr;
 
-       addr = bd_ring->addr_idx;
+       addr = bd_ring->addr.idx;
        host_idx = bd_ring->wp;
        rtw89_write16(rtwdev, addr, host_idx);
 }
@@ -879,7 +918,7 @@ static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
         * just use for loop with udelay here.
         */
        for (i = 0; i < 60; i++) {
-               cur_idx = rtw89_read32(rtwdev, bd_ring->addr_idx);
+               cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
                cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
                if (cur_rp == bd_ring->wp)
                        return;
@@ -1140,9 +1179,9 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
                tx_ring = &rtwpci->tx_rings[i];
                bd_ring = &tx_ring->bd_ring;
                bd_ram = &bd_ram_table[i];
-               addr_num = bd_ring->addr_num;
-               addr_bdram = bd_ring->addr_bdram;
-               addr_desa_l = bd_ring->addr_desa_l;
+               addr_num = bd_ring->addr.num;
+               addr_bdram = bd_ring->addr.bdram;
+               addr_desa_l = bd_ring->addr.desa_l;
                bd_ring->wp = 0;
                bd_ring->rp = 0;
 
@@ -1158,8 +1197,8 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
        for (i = 0; i < RTW89_RXCH_NUM; i++) {
                rx_ring = &rtwpci->rx_rings[i];
                bd_ring = &rx_ring->bd_ring;
-               addr_num = bd_ring->addr_num;
-               addr_desa_l = bd_ring->addr_desa_l;
+               addr_num = bd_ring->addr.num;
+               addr_desa_l = bd_ring->addr.desa_l;
                bd_ring->wp = 0;
                bd_ring->rp = 0;
                rx_ring->diliver_skb = NULL;
@@ -2188,14 +2227,10 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
                                   u32 desc_size, u32 len,
                                   enum rtw89_tx_channel txch)
 {
+       const struct rtw89_pci_ch_dma_addr *txch_addr;
        int ring_sz = desc_size * len;
        u8 *head;
        dma_addr_t dma;
-       u32 addr_num;
-       u32 addr_idx;
-       u32 addr_bdram;
-       u32 addr_desa_l;
-       u32 addr_desa_h;
        int ret;
 
        ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
@@ -2204,8 +2239,7 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
                goto err;
        }
 
-       ret = rtw89_pci_get_txch_addrs(txch, &addr_num, &addr_idx, &addr_bdram,
-                                      &addr_desa_l, &addr_desa_h);
+       ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
        if (ret) {
                rtw89_err(rtwdev, "failed to get address of txch %d", txch);
                goto err_free_wd_ring;
@@ -2222,11 +2256,7 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
        tx_ring->bd_ring.dma = dma;
        tx_ring->bd_ring.len = len;
        tx_ring->bd_ring.desc_size = desc_size;
-       tx_ring->bd_ring.addr_num = addr_num;
-       tx_ring->bd_ring.addr_idx = addr_idx;
-       tx_ring->bd_ring.addr_bdram = addr_bdram;
-       tx_ring->bd_ring.addr_desa_l = addr_desa_l;
-       tx_ring->bd_ring.addr_desa_h = addr_desa_h;
+       tx_ring->bd_ring.addr = *txch_addr;
        tx_ring->bd_ring.wp = 0;
        tx_ring->bd_ring.rp = 0;
        tx_ring->txch = txch;
@@ -2278,20 +2308,16 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
                                   struct rtw89_pci_rx_ring *rx_ring,
                                   u32 desc_size, u32 len, u32 rxch)
 {
+       const struct rtw89_pci_ch_dma_addr *rxch_addr;
        struct sk_buff *skb;
        u8 *head;
        dma_addr_t dma;
-       u32 addr_num;
-       u32 addr_idx;
-       u32 addr_desa_l;
-       u32 addr_desa_h;
        int ring_sz = desc_size * len;
        int buf_sz = RTW89_PCI_RX_BUF_SIZE;
        int i, allocated;
        int ret;
 
-       ret = rtw89_pci_get_rxch_addrs(rxch, &addr_num, &addr_idx,
-                                      &addr_desa_l, &addr_desa_h);
+       ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
        if (ret) {
                rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
                return ret;
@@ -2307,10 +2333,7 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
        rx_ring->bd_ring.dma = dma;
        rx_ring->bd_ring.len = len;
        rx_ring->bd_ring.desc_size = desc_size;
-       rx_ring->bd_ring.addr_num = addr_num;
-       rx_ring->bd_ring.addr_idx = addr_idx;
-       rx_ring->bd_ring.addr_desa_l = addr_desa_l;
-       rx_ring->bd_ring.addr_desa_h = addr_desa_h;
+       rx_ring->bd_ring.addr = *rxch_addr;
        rx_ring->bd_ring.wp = 0;
        rx_ring->bd_ring.rp = 0;
        rx_ring->buf_sz = buf_sz;
@@ -2937,6 +2960,7 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        info = (const struct rtw89_driver_info *)id->driver_data;
        rtwdev->chip = info->chip;
+       rtwdev->pci_info = info->bus.pci;
 
        ret = rtw89_core_init(rtwdev);
        if (ret) {
index 7f1ee15..b84acd0 100644 (file)
 #define R_AX_CH10_TXBD_IDX     0x137C /* Management Queue band 1 */
 #define R_AX_CH11_TXBD_IDX     0x1380 /* HI Queue band 1 */
 #define R_AX_CH12_TXBD_IDX     0x1080 /* FWCMD Queue */
+#define R_AX_CH10_TXBD_IDX_V1  0x11D0
+#define R_AX_CH11_TXBD_IDX_V1  0x11D4
+#define R_AX_RXQ_RXBD_IDX_V1   0x1218
+#define R_AX_RPQ_RXBD_IDX_V1   0x121C
 #define TXBD_HW_IDX_MASK       GENMASK(27, 16)
 #define TXBD_HOST_IDX_MASK     GENMASK(11, 0)
 
 #define R_AX_RXQ_RXBD_DESA_H   0x1104
 #define R_AX_RPQ_RXBD_DESA_L   0x1108
 #define R_AX_RPQ_RXBD_DESA_H   0x110C
+#define R_AX_RXQ_RXBD_DESA_L_V1 0x1220
+#define R_AX_RXQ_RXBD_DESA_H_V1 0x1224
+#define R_AX_RPQ_RXBD_DESA_L_V1 0x1228
+#define R_AX_RPQ_RXBD_DESA_H_V1 0x122C
+#define R_AX_ACH0_TXBD_DESA_L_V1 0x1230
+#define R_AX_ACH0_TXBD_DESA_H_V1 0x1234
+#define R_AX_ACH1_TXBD_DESA_L_V1 0x1238
+#define R_AX_ACH1_TXBD_DESA_H_V1 0x123C
+#define R_AX_ACH2_TXBD_DESA_L_V1 0x1240
+#define R_AX_ACH2_TXBD_DESA_H_V1 0x1244
+#define R_AX_ACH3_TXBD_DESA_L_V1 0x1248
+#define R_AX_ACH3_TXBD_DESA_H_V1 0x124C
+#define R_AX_ACH4_TXBD_DESA_L_V1 0x1250
+#define R_AX_ACH4_TXBD_DESA_H_V1 0x1254
+#define R_AX_ACH5_TXBD_DESA_L_V1 0x1258
+#define R_AX_ACH5_TXBD_DESA_H_V1 0x125C
+#define R_AX_ACH6_TXBD_DESA_L_V1 0x1260
+#define R_AX_ACH6_TXBD_DESA_H_V1 0x1264
+#define R_AX_ACH7_TXBD_DESA_L_V1 0x1268
+#define R_AX_ACH7_TXBD_DESA_H_V1 0x126C
+#define R_AX_CH8_TXBD_DESA_L_V1 0x1270
+#define R_AX_CH8_TXBD_DESA_H_V1 0x1274
+#define R_AX_CH9_TXBD_DESA_L_V1 0x1278
+#define R_AX_CH9_TXBD_DESA_H_V1 0x127C
+#define R_AX_CH12_TXBD_DESA_L_V1 0x1280
+#define R_AX_CH12_TXBD_DESA_H_V1 0x1284
+#define R_AX_CH10_TXBD_DESA_L_V1 0x1458
+#define R_AX_CH10_TXBD_DESA_H_V1 0x145C
+#define R_AX_CH11_TXBD_DESA_L_V1 0x1460
+#define R_AX_CH11_TXBD_DESA_H_V1 0x1464
 #define B_AX_DESC_NUM_MSK              GENMASK(11, 0)
 
 #define R_AX_RXQ_RXBD_NUM      0x1020
 #define R_AX_CH10_TXBD_NUM     0x1338
 #define R_AX_CH11_TXBD_NUM     0x133A
 #define R_AX_CH12_TXBD_NUM     0x1038
+#define R_AX_RXQ_RXBD_NUM_V1   0x1210
+#define R_AX_RPQ_RXBD_NUM_V1   0x1212
+#define R_AX_CH10_TXBD_NUM_V1  0x1438
+#define R_AX_CH11_TXBD_NUM_V1  0x143A
 
 #define R_AX_ACH0_BDRAM_CTRL   0x1200
 #define R_AX_ACH1_BDRAM_CTRL   0x1204
 #define R_AX_CH10_BDRAM_CTRL   0x1320
 #define R_AX_CH11_BDRAM_CTRL   0x1324
 #define R_AX_CH12_BDRAM_CTRL   0x1228
+#define R_AX_ACH0_BDRAM_CTRL_V1 0x1300
+#define R_AX_ACH1_BDRAM_CTRL_V1 0x1304
+#define R_AX_ACH2_BDRAM_CTRL_V1 0x1308
+#define R_AX_ACH3_BDRAM_CTRL_V1 0x130C
+#define R_AX_ACH4_BDRAM_CTRL_V1 0x1310
+#define R_AX_ACH5_BDRAM_CTRL_V1 0x1314
+#define R_AX_ACH6_BDRAM_CTRL_V1 0x1318
+#define R_AX_ACH7_BDRAM_CTRL_V1 0x131C
+#define R_AX_CH8_BDRAM_CTRL_V1 0x1320
+#define R_AX_CH9_BDRAM_CTRL_V1 0x1324
+#define R_AX_CH12_BDRAM_CTRL_V1 0x1328
+#define R_AX_CH10_BDRAM_CTRL_V1 0x1420
+#define R_AX_CH11_BDRAM_CTRL_V1 0x1424
 #define BDRAM_SIDX_MASK                GENMASK(7, 0)
 #define BDRAM_MAX_MASK         GENMASK(15, 8)
 #define BDRAM_MIN_MASK         GENMASK(23, 16)
@@ -382,6 +433,23 @@ enum rtw89_pcie_clkdly_hw {
        PCIE_CLKDLY_HW_200US = 0x5,
 };
 
+struct rtw89_pci_ch_dma_addr {
+       u32 num;
+       u32 idx;
+       u32 bdram;
+       u32 desa_l;
+       u32 desa_h;
+};
+
+struct rtw89_pci_ch_dma_addr_set {
+       struct rtw89_pci_ch_dma_addr tx[RTW89_TXCH_NUM];
+       struct rtw89_pci_ch_dma_addr rx[RTW89_RXCH_NUM];
+};
+
+struct rtw89_pci_info {
+       const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
+};
+
 struct rtw89_pci_bd_ram {
        u8 start_idx;
        u8 max_num;
@@ -469,11 +537,7 @@ struct rtw89_pci_dma_ring {
        u8 desc_size;
        dma_addr_t dma;
 
-       u32 addr_num;
-       u32 addr_idx;
-       u32 addr_bdram;
-       u32 addr_desa_l;
-       u32 addr_desa_h;
+       struct rtw89_pci_ch_dma_addr addr;
 
        u32 len;
        u32 wp; /* host idx */
@@ -626,6 +690,8 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
 }
 
 extern const struct dev_pm_ops rtw89_pm_ops;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
 
 struct pci_device_id;
 
index 130db2f..ac211d8 100644 (file)
@@ -4,6 +4,7 @@
 
 #include "debug.h"
 #include "fw.h"
+#include "mac.h"
 #include "phy.h"
 #include "ps.h"
 #include "reg.h"
@@ -161,6 +162,11 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
                cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
                                           RA_MASK_OFDM_RATES);
                break;
+       case RTW89_BAND_6G:
+               band = NL80211_BAND_6GHZ;
+               cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
+                                          RA_MASK_OFDM_RATES);
+               break;
        default:
                rtw89_warn(rtwdev, "unhandled band type %d\n", hal->current_band_type);
                return -1;
@@ -254,15 +260,25 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
                        ldpc_en = 1;
        }
 
-       if (rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+       switch (rtwdev->hal.current_band_type) {
+       case RTW89_BAND_2G:
                ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
                if (sta->supp_rates[NL80211_BAND_2GHZ] <= 0xf)
                        mode |= RTW89_RA_MODE_CCK;
                else
                        mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM;
-       } else {
+               break;
+       case RTW89_BAND_5G:
                ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
                mode |= RTW89_RA_MODE_OFDM;
+               break;
+       case RTW89_BAND_6G:
+               ra_mask |= (u64)sta->supp_rates[NL80211_BAND_6GHZ] << 4;
+               mode |= RTW89_RA_MODE_OFDM;
+               break;
+       default:
+               rtw89_err(rtwdev, "Unknown band type\n");
+               break;
        }
 
        ra_mask_bak = ra_mask;
@@ -287,6 +303,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
        ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
 
        switch (sta->bandwidth) {
+       case IEEE80211_STA_RX_BW_160:
+               bw_mode = RTW89_CHANNEL_WIDTH_160;
+               sgi = sta->vht_cap.vht_supported &&
+                     (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
+               break;
        case IEEE80211_STA_RX_BW_80:
                bw_mode = RTW89_CHANNEL_WIDTH_80;
                sgi = sta->vht_cap.vht_supported &&
@@ -583,6 +604,12 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
 }
 EXPORT_SYMBOL(rtw89_phy_get_txsc);
 
+static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
+{
+       return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
+              !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
+}
+
 u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
                      u32 addr, u32 mask)
 {
@@ -605,6 +632,56 @@ u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
 }
 EXPORT_SYMBOL(rtw89_phy_read_rf);
 
+static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
+                              enum rtw89_rf_path rf_path, u32 addr, u32 mask)
+{
+       bool busy;
+       bool done;
+       u32 val;
+       int ret;
+
+       ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
+                                      1, 30, false, rtwdev);
+       if (ret) {
+               rtw89_err(rtwdev, "read rf busy swsi\n");
+               return INV_RF_DATA;
+       }
+
+       mask &= RFREG_MASK;
+
+       val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
+             FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
+       rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
+       udelay(2);
+
+       ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
+                                      30, false, rtwdev, R_SWSI_V1,
+                                      B_SWSI_R_DATA_DONE_V1);
+       if (ret) {
+               rtw89_err(rtwdev, "read swsi busy\n");
+               return INV_RF_DATA;
+       }
+
+       return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
+}
+
+u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+                        u32 addr, u32 mask)
+{
+       bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
+
+       if (rf_path >= rtwdev->chip->rf_path_num) {
+               rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+               return INV_RF_DATA;
+       }
+
+       if (ad_sel)
+               return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
+       else
+               return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
+}
+EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
+
 bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
                        u32 addr, u32 mask, u32 data)
 {
@@ -630,6 +707,60 @@ bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
 }
 EXPORT_SYMBOL(rtw89_phy_write_rf);
 
+static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
+                                enum rtw89_rf_path rf_path, u32 addr, u32 mask,
+                                u32 data)
+{
+       u8 bit_shift;
+       u32 val;
+       bool busy, b_msk_en = false;
+       int ret;
+
+       ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
+                                      1, 30, false, rtwdev);
+       if (ret) {
+               rtw89_err(rtwdev, "write rf busy swsi\n");
+               return false;
+       }
+
+       data &= RFREG_MASK;
+       mask &= RFREG_MASK;
+
+       if (mask != RFREG_MASK) {
+               b_msk_en = true;
+               rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
+                                      mask);
+               bit_shift = __ffs(mask);
+               data = (data << bit_shift) & RFREG_MASK;
+       }
+
+       val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
+             FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
+             FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
+             FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
+
+       rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
+
+       return true;
+}
+
+bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+                          u32 addr, u32 mask, u32 data)
+{
+       bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
+
+       if (rf_path >= rtwdev->chip->rf_path_num) {
+               rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+               return false;
+       }
+
+       if (ad_sel)
+               return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
+       else
+               return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
+}
+EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
+
 static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
                               enum rtw89_phy_idx phy_idx)
 {
@@ -731,6 +862,21 @@ static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
        }
 }
 
+void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
+                               const struct rtw89_reg2_def *reg,
+                               enum rtw89_rf_path rf_path,
+                               void *extra_data)
+{
+       rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
+
+       if (reg->addr < 0x100)
+               return;
+
+       rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
+                                    (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
+}
+EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
+
 static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
                                  const struct rtw89_phy_table *table,
                                  u32 *headline_size, u32 *headline_idx,
@@ -902,6 +1048,8 @@ static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
 
 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
 {
+       void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
+                      enum rtw89_rf_path rf_path, void *data);
        const struct rtw89_chip_info *chip = rtwdev->chip;
        const struct rtw89_phy_table *rf_table;
        struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
@@ -912,13 +1060,13 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
                return;
 
        for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
-               rf_reg_info->rf_path = path;
                rf_table = chip->rf_table[path];
-               rtw89_phy_init_reg(rtwdev, rf_table, rtw89_phy_config_rf_reg,
-                                  (void *)rf_reg_info);
+               rf_reg_info->rf_path = rf_table->rf_path;
+               config = rf_table->config ? rf_table->config : rtw89_phy_config_rf_reg;
+               rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
                if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
                        rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
-                                  path);
+                                  rf_reg_info->rf_path);
        }
        kfree(rf_reg_info);
 }
@@ -1095,8 +1243,36 @@ s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
 }
 EXPORT_SYMBOL(rtw89_phy_read_txpwr_byrate);
 
-static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 channel)
+static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
+{
+       switch (channel_6g) {
+       case 1 ... 29:
+               return (channel_6g - 1) / 2;
+       case 33 ... 61:
+               return (channel_6g - 3) / 2;
+       case 65 ... 93:
+               return (channel_6g - 5) / 2;
+       case 97 ... 125:
+               return (channel_6g - 7) / 2;
+       case 129 ... 157:
+               return (channel_6g - 9) / 2;
+       case 161 ... 189:
+               return (channel_6g - 11) / 2;
+       case 193 ... 221:
+               return (channel_6g - 13) / 2;
+       case 225 ... 253:
+               return (channel_6g - 15) / 2;
+       default:
+               rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
+               return 0;
+       }
+}
+
+static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
 {
+       if (band == RTW89_BAND_6G)
+               return rtw89_channel_6g_to_idx(rtwdev, channel);
+
        switch (channel) {
        case 1 ... 14:
                return channel - 1;
@@ -1116,8 +1292,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
                              u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
 {
        const struct rtw89_chip_info *chip = rtwdev->chip;
-       u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch);
        u8 band = rtwdev->hal.current_band_type;
+       u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
        u8 regd = rtw89_regd_get(rtwdev, band);
        s8 lmt = 0, sar;
 
@@ -1134,6 +1310,12 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
                        lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf]
                                                   [RTW89_WW][ch_idx];
                break;
+       case RTW89_BAND_6G:
+               lmt = (*chip->txpwr_lmt_6g)[bw][ntx][rs][bf][regd][ch_idx];
+               if (!lmt)
+                       lmt = (*chip->txpwr_lmt_6g)[bw][ntx][rs][bf]
+                                                  [RTW89_WW][ch_idx];
+               break;
        default:
                rtw89_warn(rtwdev, "unknown band type: %d\n", band);
                return 0;
@@ -1172,14 +1354,14 @@ static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev,
 
 static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
                                           struct rtw89_txpwr_limit *lmt,
-                                          u8 ntx, u8 ch)
+                                          u8 ntx, u8 ch, u8 pri_ch)
 {
        __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
                                    ntx, RTW89_RS_CCK, ch - 2);
        __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
                                    ntx, RTW89_RS_CCK, ch);
        __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
-                                   ntx, RTW89_RS_OFDM, ch - 2);
+                                   ntx, RTW89_RS_OFDM, pri_ch);
        __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
                                    ntx, RTW89_RS_MCS, ch - 2);
        __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
@@ -1190,14 +1372,14 @@ static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
 
 static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
                                           struct rtw89_txpwr_limit *lmt,
-                                          u8 ntx, u8 ch)
+                                          u8 ntx, u8 ch, u8 pri_ch)
 {
        s8 val_0p5_n[RTW89_BF_NUM];
        s8 val_0p5_p[RTW89_BF_NUM];
        u8 i;
 
        __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
-                                   ntx, RTW89_RS_OFDM, ch - 6);
+                                   ntx, RTW89_RS_OFDM, pri_ch);
        __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
                                    ntx, RTW89_RS_MCS, ch - 6);
        __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
@@ -1222,10 +1404,82 @@ static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
                lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
 }
 
+static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
+                                           struct rtw89_txpwr_limit *lmt,
+                                           u8 ntx, u8 ch, u8 pri_ch)
+{
+       s8 val_0p5_n[RTW89_BF_NUM];
+       s8 val_0p5_p[RTW89_BF_NUM];
+       s8 val_2p5_n[RTW89_BF_NUM];
+       s8 val_2p5_p[RTW89_BF_NUM];
+       u8 i;
+
+       /* fill ofdm section */
+       __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+                                   ntx, RTW89_RS_OFDM, pri_ch);
+
+       /* fill mcs 20m section */
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+                                   ntx, RTW89_RS_MCS, ch - 14);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+                                   ntx, RTW89_RS_MCS, ch - 10);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], RTW89_CHANNEL_WIDTH_20,
+                                   ntx, RTW89_RS_MCS, ch - 6);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], RTW89_CHANNEL_WIDTH_20,
+                                   ntx, RTW89_RS_MCS, ch - 2);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], RTW89_CHANNEL_WIDTH_20,
+                                   ntx, RTW89_RS_MCS, ch + 2);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], RTW89_CHANNEL_WIDTH_20,
+                                   ntx, RTW89_RS_MCS, ch + 6);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], RTW89_CHANNEL_WIDTH_20,
+                                   ntx, RTW89_RS_MCS, ch + 10);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], RTW89_CHANNEL_WIDTH_20,
+                                   ntx, RTW89_RS_MCS, ch + 14);
+
+       /* fill mcs 40m section */
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+                                   ntx, RTW89_RS_MCS, ch - 12);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], RTW89_CHANNEL_WIDTH_40,
+                                   ntx, RTW89_RS_MCS, ch - 4);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], RTW89_CHANNEL_WIDTH_40,
+                                   ntx, RTW89_RS_MCS, ch + 4);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], RTW89_CHANNEL_WIDTH_40,
+                                   ntx, RTW89_RS_MCS, ch + 12);
+
+       /* fill mcs 80m section */
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], RTW89_CHANNEL_WIDTH_80,
+                                   ntx, RTW89_RS_MCS, ch - 8);
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], RTW89_CHANNEL_WIDTH_80,
+                                   ntx, RTW89_RS_MCS, ch + 8);
+
+       /* fill mcs 160m section */
+       __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, RTW89_CHANNEL_WIDTH_160,
+                                   ntx, RTW89_RS_MCS, ch);
+
+       /* fill mcs 40m 0p5 section */
+       __fill_txpwr_limit_nonbf_bf(val_0p5_n, RTW89_CHANNEL_WIDTH_40,
+                                   ntx, RTW89_RS_MCS, ch - 4);
+       __fill_txpwr_limit_nonbf_bf(val_0p5_p, RTW89_CHANNEL_WIDTH_40,
+                                   ntx, RTW89_RS_MCS, ch + 4);
+
+       for (i = 0; i < RTW89_BF_NUM; i++)
+               lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
+
+       /* fill mcs 40m 2p5 section */
+       __fill_txpwr_limit_nonbf_bf(val_2p5_n, RTW89_CHANNEL_WIDTH_40,
+                                   ntx, RTW89_RS_MCS, ch - 8);
+       __fill_txpwr_limit_nonbf_bf(val_2p5_p, RTW89_CHANNEL_WIDTH_40,
+                                   ntx, RTW89_RS_MCS, ch + 8);
+
+       for (i = 0; i < RTW89_BF_NUM; i++)
+               lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
+}
+
 void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
                                struct rtw89_txpwr_limit *lmt,
                                u8 ntx)
 {
+       u8 pri_ch = rtwdev->hal.current_primary_channel;
        u8 ch = rtwdev->hal.current_channel;
        u8 bw = rtwdev->hal.current_band_width;
 
@@ -1236,10 +1490,13 @@ void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
                rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, ntx, ch);
                break;
        case RTW89_CHANNEL_WIDTH_40:
-               rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch);
+               rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch, pri_ch);
                break;
        case RTW89_CHANNEL_WIDTH_80:
-               rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch);
+               rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch, pri_ch);
+               break;
+       case RTW89_CHANNEL_WIDTH_160:
+               rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, ntx, ch, pri_ch);
                break;
        }
 }
@@ -1249,8 +1506,8 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
                                        u8 ru, u8 ntx, u8 ch)
 {
        const struct rtw89_chip_info *chip = rtwdev->chip;
-       u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch);
        u8 band = rtwdev->hal.current_band_type;
+       u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
        u8 regd = rtw89_regd_get(rtwdev, band);
        s8 lmt_ru = 0, sar;
 
@@ -1267,6 +1524,12 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
                        lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx]
                                                         [RTW89_WW][ch_idx];
                break;
+       case RTW89_BAND_6G:
+               lmt_ru = (*chip->txpwr_lmt_ru_6g)[ru][ntx][regd][ch_idx];
+               if (!lmt_ru)
+                       lmt_ru = (*chip->txpwr_lmt_ru_6g)[ru][ntx]
+                                                        [RTW89_WW][ch_idx];
+               break;
        default:
                rtw89_warn(rtwdev, "unknown band type: %d\n", band);
                return 0;
@@ -1341,6 +1604,31 @@ rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev,
                                                         ntx, ch + 6);
 }
 
+static void
+rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
+                                  struct rtw89_txpwr_limit_ru *lmt_ru,
+                                  u8 ntx, u8 ch)
+{
+       static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
+       int i;
+
+       static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM);
+       for (i = 0; i < RTW89_RU_SEC_NUM; i++) {
+               lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+                                                               RTW89_RU26,
+                                                               ntx,
+                                                               ch + ofst[i]);
+               lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+                                                               RTW89_RU52,
+                                                               ntx,
+                                                               ch + ofst[i]);
+               lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+                                                                RTW89_RU106,
+                                                                ntx,
+                                                                ch + ofst[i]);
+       }
+}
+
 void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
                                   struct rtw89_txpwr_limit_ru *lmt_ru,
                                   u8 ntx)
@@ -1360,6 +1648,9 @@ void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
        case RTW89_CHANNEL_WIDTH_80:
                rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, ntx, ch);
                break;
+       case RTW89_CHANNEL_WIDTH_160:
+               rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, ntx, ch);
+               break;
        }
 }
 EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit_ru);
@@ -1424,13 +1715,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
                break;
        }
 
-       if (bw == RTW89_CHANNEL_WIDTH_80)
-               ra_report->txrate.bw = RATE_INFO_BW_80;
-       else if (bw == RTW89_CHANNEL_WIDTH_40)
-               ra_report->txrate.bw = RATE_INFO_BW_40;
-       else
-               ra_report->txrate.bw = RATE_INFO_BW_20;
-
+       ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
        ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
        ra_report->hw_rate = FIELD_PREP(RTW89_HW_RATE_MASK_MOD, mode) |
                             FIELD_PREP(RTW89_HW_RATE_MASK_VAL, rate);
@@ -1510,15 +1795,25 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
                                          u8 crystal_cap, bool force)
 {
        struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+       const struct rtw89_chip_info *chip = rtwdev->chip;
        u8 sc_xi_val, sc_xo_val;
 
        if (!force && cfo->crystal_cap == crystal_cap)
                return;
        crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
-       rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
-       rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
-       sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
-       sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
+       if (chip->chip_id == RTL8852A) {
+               rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
+               rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
+               sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
+               sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
+       } else {
+               rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
+                                       crystal_cap, XTAL_SC_XO_MASK);
+               rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
+                                       crystal_cap, XTAL_SC_XI_MASK);
+               rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
+               rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
+       }
        cfo->crystal_cap = sc_xi_val;
        cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
 
@@ -1548,9 +1843,11 @@ static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
 
 static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
 {
+       const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
        bool is_linked = rtwdev->total_sta_assoc > 0;
        s32 cfo_avg_312;
-       s32 dcfo_comp;
+       s32 dcfo_comp_val;
+       u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
        int sign;
 
        if (!is_linked) {
@@ -1561,13 +1858,13 @@ static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
        rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
        if (curr_cfo == 0)
                return;
-       dcfo_comp = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
+       dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
        sign = curr_cfo > 0 ? 1 : -1;
-       cfo_avg_312 = (curr_cfo << 3) / 5 + sign * dcfo_comp;
+       cfo_avg_312 = (curr_cfo << dcfo_comp_sft) / 5 + sign * dcfo_comp_val;
        rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: avg_cfo=%d\n", cfo_avg_312);
        if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
                cfo_avg_312 = -cfo_avg_312;
-       rtw89_phy_set_phy_regs(rtwdev, R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK,
+       rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
                               cfo_avg_312);
 }
 
@@ -1586,8 +1883,12 @@ static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
        cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
        cfo->crystal_cap = cfo->crystal_cap_default;
        cfo->def_x_cap = cfo->crystal_cap;
+       cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
+       cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
        cfo->is_adjust = false;
+       cfo->divergence_lock_en = false;
        cfo->x_cap_ofst = 0;
+       cfo->lock_cnt = 0;
        cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
        cfo->apply_compensation = false;
        cfo->residual_cfo_acc = 0;
@@ -1805,6 +2106,23 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
                rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
                return;
        }
+       if (cfo->divergence_lock_en) {
+               cfo->lock_cnt++;
+               if (cfo->lock_cnt > CFO_PERIOD_CNT) {
+                       cfo->divergence_lock_en = false;
+                       cfo->lock_cnt = 0;
+               } else {
+                       rtw89_phy_cfo_reset(rtwdev);
+               }
+               return;
+       }
+       if (cfo->crystal_cap >= cfo->x_cap_ub ||
+           cfo->crystal_cap <= cfo->x_cap_lb) {
+               cfo->divergence_lock_en = true;
+               rtw89_phy_cfo_reset(rtwdev);
+               return;
+       }
+
        rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
        cfo->cfo_avg_pre = new_cfo;
        x_cap_update =  cfo->crystal_cap != pre_x_cap;
@@ -3037,3 +3355,55 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
        rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_STAID,
                              vif->bss_conf.aid, phy_idx);
 }
+
+static void
+_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+       rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
+}
+
+static void
+_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+       rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
+}
+
+static void
+_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+       rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
+}
+
+static void
+_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+       rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
+}
+
+static void
+_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+       udelay(def->data);
+}
+
+static void
+(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
+       [RTW89_RFK_F_WRF] = _rfk_write_rf,
+       [RTW89_RFK_F_WM] = _rfk_write32_mask,
+       [RTW89_RFK_F_WS] = _rfk_write32_set,
+       [RTW89_RFK_F_WC] = _rfk_write32_clr,
+       [RTW89_RFK_F_DELAY] = _rfk_delay,
+};
+
+static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
+
+void
+rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
+{
+       const struct rtw89_reg5_def *p = tbl->defs;
+       const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
+
+       for (; p < end; p++)
+               _rfk_handler[p->flag](rtwdev, p);
+}
+EXPORT_SYMBOL(rtw89_rfk_parser);
index 2cb68f4..adcfcb4 100644 (file)
@@ -8,6 +8,7 @@
 #include "core.h"
 
 #define RTW89_PHY_ADDR_OFFSET  0x10000
+#define RTW89_RF_ADDR_ADSEL_MASK  BIT(16)
 
 #define get_phy_headline(addr)         FIELD_GET(GENMASK(31, 28), addr)
 #define PHY_HEADLINE_VALID     0xf
@@ -55,6 +56,7 @@
 #define CFO_TRK_STOP_TH (2 << 2)
 #define CFO_SW_COMP_FINE_TUNE (2 << 2)
 #define CFO_PERIOD_CNT 15
+#define CFO_BOUND 32
 #define CFO_TP_UPPER 100
 #define CFO_TP_LOWER 50
 #define CFO_COMP_PERIOD 250
@@ -328,6 +330,65 @@ static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev,
        return rtw89_read32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask);
 }
 
+enum rtw89_rfk_flag {
+       RTW89_RFK_F_WRF = 0,
+       RTW89_RFK_F_WM = 1,
+       RTW89_RFK_F_WS = 2,
+       RTW89_RFK_F_WC = 3,
+       RTW89_RFK_F_DELAY = 4,
+       RTW89_RFK_F_NUM,
+};
+
+struct rtw89_rfk_tbl {
+       const struct rtw89_reg5_def *defs;
+       u32 size;
+};
+
+#define RTW89_DECLARE_RFK_TBL(_name)           \
+const struct rtw89_rfk_tbl _name ## _tbl = {   \
+       .defs = _name,                          \
+       .size = ARRAY_SIZE(_name),              \
+}
+
+#define RTW89_DECL_RFK_WRF(_path, _addr, _mask, _data) \
+       {.flag = RTW89_RFK_F_WRF,                       \
+        .path = _path,                                 \
+        .addr = _addr,                                 \
+        .mask = _mask,                                 \
+        .data = _data,}
+
+#define RTW89_DECL_RFK_WM(_addr, _mask, _data) \
+       {.flag = RTW89_RFK_F_WM,                \
+        .addr = _addr,                         \
+        .mask = _mask,                         \
+        .data = _data,}
+
+#define RTW89_DECL_RFK_WS(_addr, _mask)        \
+       {.flag = RTW89_RFK_F_WS,        \
+        .addr = _addr,                 \
+        .mask = _mask,}
+
+#define RTW89_DECL_RFK_WC(_addr, _mask)        \
+       {.flag = RTW89_RFK_F_WC,        \
+        .addr = _addr,                 \
+        .mask = _mask,}
+
+#define RTW89_DECL_RFK_DELAY(_data)    \
+       {.flag = RTW89_RFK_F_DELAY,     \
+        .data = _data,}
+
+void
+rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl);
+
+#define rtw89_rfk_parser_by_cond(dev, cond, tbl_t, tbl_f)      \
+       do {                                                    \
+               typeof(dev) __dev = (dev);                      \
+               if (cond)                                       \
+                       rtw89_rfk_parser(__dev, (tbl_t));       \
+               else                                            \
+                       rtw89_rfk_parser(__dev, (tbl_f));       \
+       } while (0)
+
 void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
                              const struct rtw89_phy_reg3_tbl *tbl);
 u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
@@ -335,10 +396,18 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
                      enum rtw89_bandwidth dbw);
 u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
                      u32 addr, u32 mask);
+u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+                        u32 addr, u32 mask);
 bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
                        u32 addr, u32 mask, u32 data);
+bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+                          u32 addr, u32 mask, u32 data);
 void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev);
+void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
+                               const struct rtw89_reg2_def *reg,
+                               enum rtw89_rf_path rf_path,
+                               void *extra_data);
 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
                           u32 data, enum rtw89_phy_idx phy_idx);
index b39e531..25b1067 100644 (file)
@@ -8,16 +8,36 @@
 #define R_AX_SYS_WL_EFUSE_CTRL 0x000A
 #define B_AX_AUTOLOAD_SUS BIT(5)
 
+#define R_AX_SYS_ISO_CTRL 0x0000
+#define B_AX_PWC_EV2EF_MASK GENMASK(15, 14)
+#define B_AX_PWC_EV2EF_B15 BIT(15)
+#define B_AX_PWC_EV2EF_B14 BIT(14)
+#define B_AX_ISO_EB2CORE BIT(8)
+
 #define R_AX_SYS_FUNC_EN 0x0002
 #define B_AX_FEN_BB_GLB_RSTN BIT(1)
 #define B_AX_FEN_BBRSTB BIT(0)
 
 #define R_AX_SYS_PW_CTRL 0x0004
+#define B_AX_XTAL_OFF_A_DIE BIT(22)
+#define B_AX_DIS_WLBT_PDNSUSEN_SOPC BIT(18)
+#define B_AX_RDY_SYSPWR BIT(17)
+#define B_AX_EN_WLON BIT(16)
+#define B_AX_APDM_HPDN BIT(15)
 #define B_AX_PSUS_OFF_CAPC_EN BIT(14)
+#define B_AX_AFSM_PCIE_SUS_EN BIT(12)
+#define B_AX_AFSM_WLSUS_EN BIT(11)
+#define B_AX_APFM_SWLPS BIT(10)
+#define B_AX_APFM_OFFMAC BIT(9)
+#define B_AX_APFN_ONMAC BIT(8)
 
 #define R_AX_SYS_CLK_CTRL 0x0008
 #define B_AX_CPU_CLK_EN BIT(14)
 
+#define R_AX_SYS_ADIE_PAD_PWR_CTRL 0x0018
+#define B_AX_SYM_PADPDN_WL_PTA_1P3 BIT(6)
+#define B_AX_SYM_PADPDN_WL_RFC_1P3 BIT(5)
+
 #define R_AX_RSV_CTRL 0x001C
 #define B_AX_R_DIS_PRST BIT(6)
 #define B_AX_WLOCK_1C_BIT6 BIT(5)
 #define B_AX_EF_ADDR_MASK GENMASK(26, 16)
 #define B_AX_EF_DATA_MASK GENMASK(15, 0)
 
+#define R_AX_EFUSE_CTRL_1_V1 0x0038
+#define B_AX_EF_ENT BIT(31)
+#define B_AX_EF_BURST BIT(19)
+#define B_AX_EF_TEST_SEL_MASK GENMASK(18, 16)
+#define B_AX_EF_TROW_EN BIT(15)
+#define B_AX_EF_ERR_FLAG BIT(14)
+#define B_AX_EF_DSB_EN BIT(11)
+#define B_AX_PCIE_CALIB_EN_V1 BIT(12)
+#define B_AX_WDT_WAKE_PCIE_EN BIT(10)
+#define B_AX_WDT_WAKE_USB_EN BIT(9)
+
 #define R_AX_GPIO_MUXCFG 0x0040
 #define B_AX_BOOT_MODE BIT(19)
 #define B_AX_WL_EECS_EXT_32K_SEL BIT(18)
 #define R_AX_SYS_SDIO_CTRL 0x0070
 #define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15)
 #define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14)
+#define B_AX_PCIE_CALIB_EN_V1 BIT(12)
 #define B_AX_PCIE_AUXCLK_GATE BIT(11)
 #define B_AX_LTE_MUX_CTRL_PATH BIT(26)
 
 #define R_AX_PLATFORM_ENABLE 0x0088
 #define B_AX_WCPU_EN BIT(1)
+#define B_AX_PLATFORM_EN BIT(0)
+
+#define R_AX_WLLPS_CTRL 0x0090
+#define B_AX_DIS_WLBT_LPSEN_LOPC BIT(1)
 
 #define R_AX_SCOREBOARD  0x00AC
 #define B_AX_TOGGLE BIT(31)
 #define R_AX_DBG_PORT_SEL 0x00C0
 #define B_AX_DEBUG_ST_MASK GENMASK(31, 0)
 
+#define R_AX_PMC_DBG_CTRL2 0x00CC
+#define B_AX_SYSON_DIS_PMCR_AX_WRMSK BIT(2)
+
 #define R_AX_SYS_CFG1 0x00F0
 #define B_AX_CHIP_VER_MASK GENMASK(15, 12)
 
 #define R_AX_SYS_STATUS1 0x00F4
 #define B_AX_SEL_0XC0_MASK GENMASK(17, 16)
+#define B_AX_PAD_HCI_SEL_V2_MASK GENMASK(5, 3)
+#define MAC_AX_HCI_SEL_SDIO_UART 0
+#define MAC_AX_HCI_SEL_MULTI_USB 1
+#define MAC_AX_HCI_SEL_PCIE_UART 2
+#define MAC_AX_HCI_SEL_PCIE_USB 3
+#define MAC_AX_HCI_SEL_MULTI_SDIO 4
 
 #define R_AX_HALT_H2C_CTRL 0x0160
 #define R_AX_HALT_H2C 0x0168
 #define PS_RPWM_TOGGLE BIT(15)
 #define PS_RPWM_ACK BIT(14)
 #define PS_RPWM_SEQ_NUM GENMASK(13, 12)
+#define PS_RPWM_NOTIFY_WAKE BIT(8)
 #define PS_RPWM_STATE 0x7
 #define RPWM_SEQ_NUM_MAX 3
 #define PS_CPWM_SEQ_NUM GENMASK(13, 12)
 #define R_AX_UDM2 0x01F8
 #define R_AX_UDM3 0x01FC
 
+#define R_AX_LDO_AON_CTRL0 0x0218
+#define B_AX_PD_REGU_L BIT(16)
+
+#define R_AX_WLAN_XTAL_SI_CTRL 0x0270
+#define B_AX_WL_XTAL_SI_CMD_POLL BIT(31)
+#define B_AX_BT_XTAL_SI_ERR_FLAG BIT(30)
+#define B_AX_WL_XTAL_GNT BIT(29)
+#define B_AX_BT_XTAL_GNT BIT(28)
+#define B_AX_WL_XTAL_SI_MODE_MASK GENMASK(25, 24)
+#define XTAL_SI_NORMAL_WRITE 0x00
+#define XTAL_SI_NORMAL_READ 0x01
+#define B_AX_WL_XTAL_SI_BITMASK_MASK GENMASK(23, 16)
+#define B_AX_WL_XTAL_SI_DATA_MASK GENMASK(15, 8)
+#define B_AX_WL_XTAL_SI_ADDR_MASK GENMASK(7, 0)
+
 #define R_AX_XTAL_ON_CTRL0 0x0280
 #define B_AX_XTAL_SC_LPS BIT(31)
 #define B_AX_XTAL_SC_XO_MASK GENMASK(23, 17)
 
 #define R_AX_GPIO0_7_FUNC_SEL 0x02D0
 
+#define R_AX_GPIO0_15_EECS_EESK_LED1_PULL_LOW_EN 0x02E4
+#define B_AX_LED1_PULL_LOW_EN BIT(18)
+#define B_AX_EESK_PULL_LOW_EN BIT(17)
+#define B_AX_EECS_PULL_LOW_EN BIT(16)
+
 #define R_AX_WLRF_CTRL 0x02F0
 #define B_AX_WLRF1_CTRL_7 BIT(15)
 #define B_AX_WLRF1_CTRL_1 BIT(9)
 #define B_AX_ASFF_FULL_NO_STK BIT(1)
 #define B_AX_EN_STUCK_DBG BIT(0)
 
+#define R_AX_HCI_FC_CTRL_V1 0x1700
+#define R_AX_CH_PAGE_CTRL_V1 0x1704
+
+#define R_AX_ACH0_PAGE_CTRL_V1 0x1710
+#define R_AX_ACH1_PAGE_CTRL_V1 0x1714
+#define R_AX_ACH2_PAGE_CTRL_V1 0x1718
+#define R_AX_ACH3_PAGE_CTRL_V1 0x171C
+#define R_AX_ACH4_PAGE_CTRL_V1 0x1720
+#define R_AX_ACH5_PAGE_CTRL_V1 0x1724
+#define R_AX_ACH6_PAGE_CTRL_V1 0x1728
+#define R_AX_ACH7_PAGE_CTRL_V1 0x172C
+#define R_AX_CH8_PAGE_CTRL_V1 0x1730
+#define R_AX_CH9_PAGE_CTRL_V1 0x1734
+#define R_AX_CH10_PAGE_CTRL_V1 0x1738
+#define R_AX_CH11_PAGE_CTRL_V1 0x173C
+
+#define R_AX_ACH0_PAGE_INFO_V1 0x1750
+#define R_AX_ACH1_PAGE_INFO_V1 0x1754
+#define R_AX_ACH2_PAGE_INFO_V1 0x1758
+#define R_AX_ACH3_PAGE_INFO_V1 0x175C
+#define R_AX_ACH4_PAGE_INFO_V1 0x1760
+#define R_AX_ACH5_PAGE_INFO_V1 0x1764
+#define R_AX_ACH6_PAGE_INFO_V1 0x1768
+#define R_AX_ACH7_PAGE_INFO_V1 0x176C
+#define R_AX_CH8_PAGE_INFO_V1 0x1770
+#define R_AX_CH9_PAGE_INFO_V1 0x1774
+#define R_AX_CH10_PAGE_INFO_V1 0x1778
+#define R_AX_CH11_PAGE_INFO_V1 0x177C
+#define R_AX_CH12_PAGE_INFO_V1 0x1780
+
+#define R_AX_PUB_PAGE_INFO3_V1 0x178C
+#define R_AX_PUB_PAGE_CTRL1_V1 0x1790
+#define R_AX_PUB_PAGE_CTRL2_V1 0x1794
+#define R_AX_PUB_PAGE_INFO1_V1 0x1798
+#define R_AX_PUB_PAGE_INFO2_V1 0x179C
+#define R_AX_WP_PAGE_CTRL1_V1 0x17A0
+#define R_AX_WP_PAGE_CTRL2_V1 0x17A4
+#define R_AX_WP_PAGE_INFO1_V1 0x17A8
+
+#define R_AX_H2CREG_DATA0_V1 0x7140
+#define R_AX_H2CREG_DATA1_V1 0x7144
+#define R_AX_H2CREG_DATA2_V1 0x7148
+#define R_AX_H2CREG_DATA3_V1 0x714C
+#define R_AX_C2HREG_DATA0_V1 0x7150
+#define R_AX_C2HREG_DATA1_V1 0x7154
+#define R_AX_C2HREG_DATA2_V1 0x7158
+#define R_AX_C2HREG_DATA3_V1 0x715C
+#define R_AX_H2CREG_CTRL_V1 0x7160
+#define R_AX_C2HREG_CTRL_V1 0x7164
+
+#define R_AX_HCI_FUNC_EN_V1 0x7880
+
 #define R_AX_PHYREG_SET 0x8040
 #define PHYREG_SET_ALL_CYCLE 0x8
 
 #define R_AX_BOOT_DBG 0x83F0
 
 #define R_AX_DMAC_FUNC_EN 0x8400
+#define B_AX_DMAC_CRPRT BIT(31)
 #define B_AX_MAC_FUNC_EN BIT(30)
 #define B_AX_DMAC_FUNC_EN BIT(29)
 #define B_AX_MPDU_PROC_EN BIT(28)
 #define B_AX_PKT_IN_EN BIT(20)
 #define B_AX_DLE_CPUIO_EN BIT(19)
 #define B_AX_DISPATCHER_EN BIT(18)
+#define B_AX_BBRPT_EN BIT(17)
 #define B_AX_MAC_SEC_EN BIT(16)
+#define B_AX_MAC_UN_EN BIT(15)
+#define B_AX_H_AXIDMA_EN BIT(14)
 
 #define R_AX_DMAC_CLK_EN 0x8404
 #define B_AX_WD_RLS_CLK_EN BIT(27)
 #define B_AX_PKT_IN_CLK_EN BIT(20)
 #define B_AX_DLE_CPUIO_CLK_EN BIT(19)
 #define B_AX_DISPATCHER_CLK_EN BIT(18)
+#define B_AX_BBRPT_CLK_EN BIT(17)
 #define B_AX_MAC_SEC_CLK_EN BIT(16)
 
 #define PCI_LTR_IDLE_TIMER_1US 0
 #define R_AX_PLE_QTA8_CFG 0x9060
 #define R_AX_PLE_QTA9_CFG 0x9064
 #define R_AX_PLE_QTA10_CFG 0x9068
+#define R_AX_PLE_QTA11_CFG 0x906C
 
 #define R_AX_PLE_INI_STATUS 0x9100
 #define B_AX_PLE_Q_MGN_INI_RDY BIT(1)
 #define R_AX_DBG_FUN_INTF_DATA 0x9F34
 #define B_AX_DFI_DATA_MASK GENMASK(31, 0)
 
+#define R_AX_TXPKTCTL_B0_PRELD_CFG0 0x9F48
+#define B_AX_B0_PRELD_FEN BIT(31)
+#define B_AX_B0_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
+#define PRELD_B0_ENT_NUM 10
+#define PRELD_AMSDU_SIZE 52
+#define B_AX_B0_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define B_AX_B0_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+
+#define R_AX_TXPKTCTL_B0_PRELD_CFG1 0x9F4C
+#define B_AX_B0_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
+#define PRELD_NEXT_WND 1
+#define B_AX_B0_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+
+#define R_AX_TXPKTCTL_B1_PRELD_CFG0 0x9F88
+#define B_AX_B1_PRELD_FEN BIT(31)
+#define B_AX_B1_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
+#define PRELD_B1_ENT_NUM 4
+#define B_AX_B1_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define B_AX_B1_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+
+#define R_AX_TXPKTCTL_B1_PRELD_CFG1 0x9F8C
+#define B_AX_B1_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
+#define B_AX_B1_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+
 #define R_AX_AFE_CTRL1 0x0024
 
 #define B_AX_R_SYM_WLCMAC1_P4_PC_EN BIT(4)
 #define B_AX_CTN_TXEN_VI_0 BIT(2)
 #define B_AX_CTN_TXEN_BK_0 BIT(1)
 #define B_AX_CTN_TXEN_BE_0 BIT(0)
+#define B_AX_CTN_TXEN_ALL_MASK GENMASK(15, 0)
 
 #define R_AX_MUEDCA_BE_PARAM_0 0xC350
 #define R_AX_MUEDCA_BE_PARAM_0_C1 0xE350
 #define B_AX_CTN_CHK_CCA_S20 BIT(1)
 #define B_AX_CTN_CHK_CCA_P20 BIT(0)
 
+#define R_AX_CTN_DRV_TXEN 0xC398
+#define R_AX_CTN_DRV_TXEN_C1 0xE398
+#define B_AX_CTN_TXEN_TWT_3 BIT(17)
+#define B_AX_CTN_TXEN_TWT_2 BIT(16)
+#define B_AX_CTN_TXEN_ALL_MASK_V1 GENMASK(17, 0)
+
 #define R_AX_SCHEDULE_ERR_IMR 0xC3E8
 #define R_AX_SCHEDULE_ERR_IMR_C1 0xE3E8
 #define B_AX_SORT_NON_IDLE_ERR_INT_EN BIT(1)
 #define B_AX_PWR_UL_TB_CTRL_EN BIT(31)
 #define R_AX_PWR_UL_TB_1T 0xD28C
 #define B_AX_PWR_UL_TB_1T_MASK GENMASK(4, 0)
+#define B_AX_PWR_UL_TB_1T_V1_MASK GENMASK(7, 0)
 #define R_AX_PWR_UL_TB_2T 0xD290
 #define B_AX_PWR_UL_TB_2T_MASK GENMASK(4, 0)
+#define B_AX_PWR_UL_TB_2T_V1_MASK GENMASK(7, 0)
 #define R_AX_PWR_BY_RATE_TABLE0 0xD2C0
 #define R_AX_PWR_BY_RATE_TABLE10 0xD2E8
 #define R_AX_PWR_BY_RATE R_AX_PWR_BY_RATE_TABLE0
 #define B_AX_STATIS_BT_LO_TX_1_MASK GENMASK(15, 0)
 #define B_AX_STATIS_BT_LO_RX_1_MASK GENMASK(31, 16)
 
+#define R_AX_GNT_SW_CTRL 0xDA48
+#define R_AX_GNT_SW_CTRL_C1 0xFA48
+#define B_AX_WL_ACT2_VAL BIT(21)
+#define B_AX_WL_ACT2_SWCTRL BIT(20)
+#define B_AX_WL_ACT_VAL BIT(19)
+#define B_AX_WL_ACT_SWCTRL BIT(18)
+#define B_AX_GNT_BT_RX_VAL BIT(17)
+#define B_AX_GNT_BT_RX_SWCTRL BIT(16)
+#define B_AX_GNT_BT_TX_VAL BIT(15)
+#define B_AX_GNT_BT_TX_SWCTRL BIT(14)
+#define B_AX_GNT_WL_RX_VAL BIT(13)
+#define B_AX_GNT_WL_RX_SWCTRL BIT(12)
+#define B_AX_GNT_WL_TX_VAL BIT(11)
+#define B_AX_GNT_WL_TX_SWCTRL BIT(10)
+#define B_AX_GNT_BT_RFC_S1_VAL BIT(9)
+#define B_AX_GNT_BT_RFC_S1_SWCTRL BIT(8)
+#define B_AX_GNT_WL_RFC_S1_VAL BIT(7)
+#define B_AX_GNT_WL_RFC_S1_SWCTRL BIT(6)
+#define B_AX_GNT_BT_RFC_S0_VAL BIT(5)
+#define B_AX_GNT_BT_RFC_S0_SWCTRL BIT(4)
+#define B_AX_GNT_WL_RFC_S0_VAL BIT(3)
+#define B_AX_GNT_WL_RFC_S0_SWCTRL BIT(2)
+#define B_AX_GNT_WL_BB_VAL BIT(1)
+#define B_AX_GNT_WL_BB_SWCTRL BIT(0)
+
 #define R_AX_TDMA_MODE 0xDA4C
 #define R_AX_TDMA_MODE_C1 0xFA4C
 #define B_AX_R_BT_CMD_RPT_MASK GENMASK(31, 16)
 #define B_ANAPAR_FLTRST BIT(22)
 #define B_ANAPAR_CRXBB GENMASK(18, 16)
 #define B_ANAPAR_14 GENMASK(15, 0)
+#define R_SWSI_DATA_V1 0x0370
+#define B_SWSI_DATA_VAL_V1 GENMASK(19, 0)
+#define B_SWSI_DATA_ADDR_V1 GENMASK(27, 20)
+#define B_SWSI_DATA_PATH_V1 GENMASK(30, 28)
+#define B_SWSI_DATA_BIT_MASK_EN_V1 BIT(31)
+#define R_SWSI_BIT_MASK_V1 0x0374
+#define B_SWSI_BIT_MASK_V1 GENMASK(19, 0)
+#define R_SWSI_READ_ADDR_V1 0x0378
+#define B_SWSI_READ_ADDR_ADDR_V1 GENMASK(7, 0)
+#define B_SWSI_READ_ADDR_PATH_V1 GENMASK(10, 8)
+#define B_SWSI_READ_ADDR_V1 GENMASK(10, 0)
 #define R_UPD_CLK_ADC 0x0700
 #define B_UPD_CLK_ADC_ON BIT(24)
 #define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
 #define R_CFO_COMP_SEG0_H 0x1388
 #define R_CFO_COMP_SEG0_CTRL 0x138C
 #define R_DBG32_D 0x1730
+#define R_SWSI_V1 0x174C
+#define B_SWSI_W_BUSY_V1 BIT(24)
+#define B_SWSI_R_BUSY_V1 BIT(25)
+#define B_SWSI_R_DATA_DONE_V1 BIT(26)
 #define R_TX_COUNTER 0x1A40
 #define R_IFS_CLM_TX_CNT 0x1ACC
 #define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16)
 #define R_CHBW_MOD 0x4978
 #define B_CHBW_MOD_PRICH GENMASK(11, 8)
 #define B_CHBW_MOD_SBW GENMASK(13, 12)
+#define R_DCFO_COMP_S0_V1 0x4A40
+#define B_DCFO_COMP_S0_V1_MSK GENMASK(13, 0)
 #define R_BMODE_PDTH_V1 0x4B64
 #define B_BMODE_PDTH_LOWER_BOUND_MSK_V1 GENMASK(31, 24)
 #define R_BMODE_PDTH_EN_V1 0x4B74
index c39635a..41fc8db 100644 (file)
@@ -3,6 +3,7 @@
  */
 
 #include "coex.h"
+#include "fw.h"
 #include "mac.h"
 #include "phy.h"
 #include "reg.h"
@@ -376,6 +377,35 @@ static const struct rtw89_pwr_cfg * const pwr_off_seq_8852a[] = {
        rtw8852a_pwroff, NULL
 };
 
+static const u32 rtw8852a_h2c_regs[RTW89_H2CREG_MAX] = {
+       R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1,  R_AX_H2CREG_DATA2,
+       R_AX_H2CREG_DATA3
+};
+
+static const u32 rtw8852a_c2h_regs[RTW89_C2HREG_MAX] = {
+       R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1, R_AX_C2HREG_DATA2,
+       R_AX_C2HREG_DATA3
+};
+
+static const struct rtw89_page_regs rtw8852a_page_regs = {
+       .hci_fc_ctrl    = R_AX_HCI_FC_CTRL,
+       .ch_page_ctrl   = R_AX_CH_PAGE_CTRL,
+       .ach_page_ctrl  = R_AX_ACH0_PAGE_CTRL,
+       .ach_page_info  = R_AX_ACH0_PAGE_INFO,
+       .pub_page_info3 = R_AX_PUB_PAGE_INFO3,
+       .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1,
+       .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2,
+       .pub_page_info1 = R_AX_PUB_PAGE_INFO1,
+       .pub_page_info2 = R_AX_PUB_PAGE_INFO2,
+       .wp_page_ctrl1  = R_AX_WP_PAGE_CTRL1,
+       .wp_page_ctrl2  = R_AX_WP_PAGE_CTRL2,
+       .wp_page_info1  = R_AX_WP_PAGE_INFO1,
+};
+
+static const struct rtw89_reg_def rtw8852a_dcfo_comp = {
+       R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
+};
+
 static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse,
                                    struct rtw8852a_efuse *map)
 {
@@ -1137,7 +1167,7 @@ static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
        u8 phy_idx = RTW89_PHY_0;
 
        if (enter) {
-               rtw89_mac_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
+               rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
                rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
                rtw8852a_dfs_en(rtwdev, false);
                rtw8852a_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
@@ -1150,7 +1180,7 @@ static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
                rtw8852a_dfs_en(rtwdev, true);
                rtw8852a_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
                rtw8852a_bb_reset_en(rtwdev, phy_idx, true);
-               rtw89_mac_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+               rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
        }
 }
 
@@ -1245,10 +1275,10 @@ static u32 rtw8852a_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
 
 static
 void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
-                                    s16 pw_ofst, enum rtw89_mac_idx mac_idx)
+                                    s8 pw_ofst, enum rtw89_mac_idx mac_idx)
 {
-       s32 val_1t = 0;
-       s32 val_2t = 0;
+       s8 val_1t = 0;
+       s8 val_2t = 0;
        u32 reg;
 
        if (pw_ofst < -16 || pw_ofst > 15) {
@@ -1258,7 +1288,7 @@ void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
        }
        reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx);
        rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN);
-       val_1t = (s32)pw_ofst;
+       val_1t = pw_ofst;
        reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx);
        rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, val_1t);
        val_2t = max(val_1t - 3, -16);
@@ -1987,6 +2017,12 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
        .query_ppdu             = rtw8852a_query_ppdu,
        .bb_ctrl_btc_preagc     = rtw8852a_bb_ctrl_btc_preagc,
        .set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
+       .pwr_on_func            = NULL,
+       .pwr_off_func           = NULL,
+       .cfg_ctrl_path          = rtw89_mac_cfg_ctrl_path,
+       .mac_cfg_gnt            = rtw89_mac_cfg_gnt,
+       .stop_sch_tx            = rtw89_mac_stop_sch_tx,
+       .resume_sch_tx          = rtw89_mac_resume_sch_tx,
 
        .btc_set_rfe            = rtw8852a_btc_set_rfe,
        .btc_init_cfg           = rtw8852a_btc_init_cfg,
@@ -2024,6 +2060,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
        .dig_table              = &rtw89_8852a_phy_dig_table,
        .support_bands          = BIT(NL80211_BAND_2GHZ) |
                                  BIT(NL80211_BAND_5GHZ),
+       .support_bw160          = false,
        .rf_path_num            = 2,
        .tx_nss                 = 2,
        .rx_nss                 = 2,
@@ -2034,6 +2071,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
        .physical_efuse_size    = 1216,
        .logical_efuse_size     = 1536,
        .limit_efuse_size       = 1152,
+       .dav_phy_efuse_size     = 0,
+       .dav_log_efuse_size     = 0,
        .phycap_addr            = 0x580,
        .phycap_size            = 128,
        .para_ver               = 0x05050864,
@@ -2054,6 +2093,14 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
        .ps_mode_supported      = BIT(RTW89_PS_MODE_RFOFF) |
                                  BIT(RTW89_PS_MODE_CLK_GATED) |
                                  BIT(RTW89_PS_MODE_PWR_GATED),
+       .hci_func_en_addr       = R_AX_HCI_FUNC_EN,
+       .h2c_ctrl_reg           = R_AX_H2CREG_CTRL,
+       .h2c_regs               = rtw8852a_h2c_regs,
+       .c2h_ctrl_reg           = R_AX_C2HREG_CTRL,
+       .c2h_regs               = rtw8852a_c2h_regs,
+       .page_regs              = &rtw8852a_page_regs,
+       .dcfo_comp              = &rtw8852a_dcfo_comp,
+       .dcfo_comp_sft          = 3,
 };
 EXPORT_SYMBOL(rtw8852a_chip_info);
 
index c021e93..ad27285 100644 (file)
 #include "rtw8852a_rfk_table.h"
 #include "rtw8852a_table.h"
 
-static void
-_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
-       rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
-}
-
-static void
-_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
-       rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
-}
-
-static void
-_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
-       rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
-}
-
-static void
-_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
-       rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
-}
-
-static void
-_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
-       udelay(def->data);
-}
-
-static void
-(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
-       [RTW89_RFK_F_WRF] = _rfk_write_rf,
-       [RTW89_RFK_F_WM] = _rfk_write32_mask,
-       [RTW89_RFK_F_WS] = _rfk_write32_set,
-       [RTW89_RFK_F_WC] = _rfk_write32_clr,
-       [RTW89_RFK_F_DELAY] = _rfk_delay,
-};
-
-static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
-
-static void
-rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
-{
-       const struct rtw89_reg5_def *p = tbl->defs;
-       const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
-
-       for (; p < end; p++)
-               _rfk_handler[p->flag](rtwdev, p);
-}
-
-#define rtw89_rfk_parser_by_cond(rtwdev, cond, tbl_t, tbl_f)   \
-       do {                                                    \
-               typeof(rtwdev) _dev = (rtwdev);                 \
-               if (cond)                                       \
-                       rtw89_rfk_parser(_dev, (tbl_t));        \
-               else                                            \
-                       rtw89_rfk_parser(_dev, (tbl_f));        \
-       } while (0)
-
 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
 {
        rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,  PHY%d\n",
@@ -2977,6 +2917,7 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
        u8 i, j;
 
        switch (subband) {
+       default:
        case RTW89_CH_2G:
                thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p;
                thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n;
@@ -3161,6 +3102,7 @@ static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
        u8 subband = rtwdev->hal.current_subband;
 
        switch (subband) {
+       default:
        case RTW89_CH_2G:
                rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
                                         &rtw8852a_tssi_pak_defs_a_2g_tbl,
@@ -3584,7 +3526,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
        const struct rtw89_chip_info *mac_reg = rtwdev->chip;
        u8 ch = rtwdev->hal.current_channel, ch_tmp;
        u8 bw = rtwdev->hal.current_band_width;
-       u16 tx_en;
+       u32 tx_en;
        u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
        s8 power;
        s16 xdbm;
@@ -3612,7 +3554,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
                    __func__, phy, power, xdbm);
 
        rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
-       rtw89_mac_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
+       rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
        _wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
        tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
 
@@ -3658,7 +3600,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
 
        rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0);
 
-       rtw89_mac_resume_sch_tx(rtwdev, phy, tx_en);
+       rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
        rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
 }
 
@@ -3681,11 +3623,11 @@ void rtw8852a_dack(struct rtw89_dev *rtwdev)
 
 void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
 {
-       u16 tx_en;
+       u32 tx_en;
        u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
 
        rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
-       rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+       rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
        _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
 
        _iqk_init(rtwdev);
@@ -3694,7 +3636,7 @@ void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
        else
                _iqk(rtwdev, phy_idx, false);
 
-       rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+       rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
        rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
 }
 
@@ -3706,33 +3648,33 @@ void rtw8852a_iqk_track(struct rtw89_dev *rtwdev)
 void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
                     bool is_afe)
 {
-       u16 tx_en;
+       u32 tx_en;
        u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
 
        rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
-       rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+       rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
        _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
 
        _rx_dck(rtwdev, phy_idx, is_afe);
 
-       rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+       rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
        rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
 }
 
 void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
 {
-       u16 tx_en;
+       u32 tx_en;
        u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
 
        rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
-       rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+       rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
        _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
 
        rtwdev->dpk.is_dpk_enable = true;
        rtwdev->dpk.is_dpk_reload_en = false;
        _dpk(rtwdev, phy_idx, false);
 
-       rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+       rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
        rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
 }
 
index 5105700..dd2a978 100644 (file)
 #include "rtw8852a_rfk_table.h"
 
 static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs[] = {
-       DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001),
-       DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002),
-       DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001),
-       DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002),
-       DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005),
-       DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005),
-       DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005),
-       DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005),
-       DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
-       DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
-       DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005),
-       DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005),
-       DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005),
-       DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005),
-       DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
-       DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
-       DECL_RFK_WM(0x0300, 0xff000000, 0x00000019),
-       DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019),
-       DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d),
-       DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044),
-       DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042),
-       DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002),
-       DECL_RFK_WM(0x0020, 0x00006000, 0x00000003),
-       DECL_RFK_WM(0x0024, 0x00006000, 0x00000003),
-       DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e),
-       DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e),
-       DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004),
-       DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004),
-       DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000),
-       DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs);
+       RTW89_DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001),
+       RTW89_DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002),
+       RTW89_DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001),
+       RTW89_DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002),
+       RTW89_DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005),
+       RTW89_DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005),
+       RTW89_DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005),
+       RTW89_DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005),
+       RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
+       RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
+       RTW89_DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005),
+       RTW89_DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005),
+       RTW89_DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005),
+       RTW89_DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005),
+       RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
+       RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
+       RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x00000019),
+       RTW89_DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019),
+       RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d),
+       RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044),
+       RTW89_DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042),
+       RTW89_DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002),
+       RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e),
+       RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e),
+       RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004),
+       RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004),
+       RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_2g[] = {
-       DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
-       DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
-       DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
-       DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
+       RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
+       RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
+       RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
+       RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_5g[] = {
-       DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044),
-       DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044),
-       DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044),
-       DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044),
+       RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044),
+       RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044),
+       RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044),
+       RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_a[] = {
-       DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f),
-       DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080),
-       DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f),
-       DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
-       DECL_RFK_WM(0x5800, 0x20000000, 0x00000000),
-       DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000),
-       DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
-       DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040),
-       DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040),
-       DECL_RFK_WM(0x580c, 0x00008000, 0x00000000),
-       DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
-       DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x5810, 0x00000200, 0x00000000),
-       DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000),
-       DECL_RFK_WM(0x5810, 0x00010000, 0x00000001),
-       DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000),
-       DECL_RFK_WM(0x5810, 0x01000000, 0x00000001),
-       DECL_RFK_WM(0x5810, 0x06000000, 0x00000000),
-       DECL_RFK_WM(0x5810, 0x38000000, 0x00000003),
-       DECL_RFK_WM(0x5810, 0x40000000, 0x00000001),
-       DECL_RFK_WM(0x5810, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000),
-       DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
-       DECL_RFK_WM(0x5814, 0x00002000, 0x00000000),
-       DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
-       DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
-       DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000),
-       DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000),
-       DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
-       DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000),
-       DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000),
-       DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018),
-       DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016),
-       DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000),
-       DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
-       DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
-       DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
-       DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008),
-       DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e),
-       DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008),
-       DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e),
-       DECL_RFK_WM(0x581c, 0x20000000, 0x00000000),
-       DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
-       DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f),
-       DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000),
-       DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000),
-       DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x5860, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
-       DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
-       DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000),
-       DECL_RFK_WM(0x5864, 0x04000000, 0x00000000),
-       DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd),
-       DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5),
-       DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd),
-       DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5),
-       DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016),
-       DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
-       DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000),
-       DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000),
-       DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000),
-       DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000),
-       DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002),
-       DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000),
-       DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
-       DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
-       DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
-       DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
-       DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
-       DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f),
-       DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080),
-       DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003),
-       DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001),
-       DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002),
-       DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002),
-       DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007),
-       DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f),
-       DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000),
-       DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff),
-       DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000),
-       DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000),
-       DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000),
-       DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101),
-       DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004),
-       DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100),
-       DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000),
-       DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000),
-       DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff),
-       DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100),
-       DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
-       DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c),
-       DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002),
-       DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800),
-       DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f),
-       DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080),
-       DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000),
-       DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000),
-       DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001),
-       DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff),
-       DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a);
+       RTW89_DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f),
+       RTW89_DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080),
+       RTW89_DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f),
+       RTW89_DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5800, 0x20000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040),
+       RTW89_DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040),
+       RTW89_DECL_RFK_WM(0x580c, 0x00008000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5810, 0x00000200, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5810, 0x00010000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5810, 0x01000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5810, 0x06000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5810, 0x38000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x5810, 0x40000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5810, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
+       RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018),
+       RTW89_DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016),
+       RTW89_DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
+       RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
+       RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008),
+       RTW89_DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e),
+       RTW89_DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008),
+       RTW89_DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e),
+       RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
+       RTW89_DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f),
+       RTW89_DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
+       RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
+       RTW89_DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5864, 0x04000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd),
+       RTW89_DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5),
+       RTW89_DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd),
+       RTW89_DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5),
+       RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016),
+       RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
+       RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
+       RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
+       RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f),
+       RTW89_DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080),
+       RTW89_DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007),
+       RTW89_DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f),
+       RTW89_DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff),
+       RTW89_DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101),
+       RTW89_DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004),
+       RTW89_DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100),
+       RTW89_DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff),
+       RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100),
+       RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
+       RTW89_DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c),
+       RTW89_DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800),
+       RTW89_DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f),
+       RTW89_DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080),
+       RTW89_DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff),
+       RTW89_DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_b[] = {
-       DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f),
-       DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080),
-       DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f),
-       DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
-       DECL_RFK_WM(0x7800, 0x20000000, 0x00000000),
-       DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000),
-       DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
-       DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040),
-       DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040),
-       DECL_RFK_WM(0x780c, 0x00008000, 0x00000000),
-       DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
-       DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x7810, 0x00000200, 0x00000000),
-       DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000),
-       DECL_RFK_WM(0x7810, 0x00010000, 0x00000001),
-       DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000),
-       DECL_RFK_WM(0x7810, 0x01000000, 0x00000001),
-       DECL_RFK_WM(0x7810, 0x06000000, 0x00000000),
-       DECL_RFK_WM(0x7810, 0x38000000, 0x00000003),
-       DECL_RFK_WM(0x7810, 0x40000000, 0x00000001),
-       DECL_RFK_WM(0x7810, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000),
-       DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
-       DECL_RFK_WM(0x7814, 0x00002000, 0x00000000),
-       DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
-       DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
-       DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000),
-       DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000),
-       DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
-       DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000),
-       DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000),
-       DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018),
-       DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016),
-       DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000),
-       DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
-       DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
-       DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
-       DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008),
-       DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e),
-       DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008),
-       DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e),
-       DECL_RFK_WM(0x781c, 0x20000000, 0x00000000),
-       DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
-       DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000),
-       DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000),
-       DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000),
-       DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000),
-       DECL_RFK_WM(0x7860, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
-       DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
-       DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000),
-       DECL_RFK_WM(0x7864, 0x04000000, 0x00000000),
-       DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd),
-       DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5),
-       DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd),
-       DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5),
-       DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016),
-       DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
-       DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000),
-       DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000),
-       DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000),
-       DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000),
-       DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002),
-       DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000),
-       DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
-       DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
-       DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
-       DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
-       DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
-       DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f),
-       DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080),
-       DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003),
-       DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001),
-       DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002),
-       DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002),
-       DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007),
-       DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f),
-       DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000),
-       DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff),
-       DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000),
-       DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000),
-       DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000),
-       DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101),
-       DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004),
-       DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100),
-       DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000),
-       DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000),
-       DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff),
-       DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100),
-       DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
-       DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c),
-       DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002),
-       DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800),
-       DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f),
-       DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080),
-       DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000),
-       DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000),
-       DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001),
-       DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff),
-       DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b);
+       RTW89_DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f),
+       RTW89_DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080),
+       RTW89_DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f),
+       RTW89_DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7800, 0x20000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040),
+       RTW89_DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040),
+       RTW89_DECL_RFK_WM(0x780c, 0x00008000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7810, 0x00000200, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7810, 0x00010000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7810, 0x01000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7810, 0x06000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7810, 0x38000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x7810, 0x40000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7810, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
+       RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018),
+       RTW89_DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016),
+       RTW89_DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
+       RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
+       RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008),
+       RTW89_DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e),
+       RTW89_DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008),
+       RTW89_DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e),
+       RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
+       RTW89_DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
+       RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
+       RTW89_DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7864, 0x04000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd),
+       RTW89_DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5),
+       RTW89_DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd),
+       RTW89_DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5),
+       RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016),
+       RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
+       RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
+       RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
+       RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f),
+       RTW89_DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080),
+       RTW89_DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007),
+       RTW89_DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f),
+       RTW89_DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff),
+       RTW89_DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101),
+       RTW89_DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004),
+       RTW89_DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100),
+       RTW89_DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff),
+       RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100),
+       RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
+       RTW89_DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c),
+       RTW89_DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800),
+       RTW89_DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f),
+       RTW89_DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080),
+       RTW89_DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff),
+       RTW89_DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_2g[] = {
-       DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c),
-       DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c),
+       RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c),
+       RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_5g[] = {
-       DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
-       DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
+       RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
+       RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = {
-       DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc),
-       DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020),
+       RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc),
+       RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = {
-       DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc),
-       DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020),
+       RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc),
+       RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_a[] = {
-       DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
-       DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
-       DECL_RFK_WM(0x5814, 0x00002000, 0x00000001),
-       DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
-       DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
-       DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003),
-       DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
+       RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_b[] = {
-       DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
-       DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
-       DECL_RFK_WM(0x7814, 0x00002000, 0x00000001),
-       DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
-       DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
-       DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003),
-       DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
+       RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_a[] = {
-       DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000),
-       DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001),
-       DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a);
+       RTW89_DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_b[] = {
-       DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000),
-       DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001),
-       DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
-       DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b);
+       RTW89_DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_a[] = {
-       DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
-       DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001),
-       DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000),
-       DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001),
-       DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002),
-       DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003),
-       DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040),
-       DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040),
-       DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040),
-       DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x5898, 0xff000000, 0x00000040),
-       DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040),
-       DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040),
-       DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040),
-       DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x589c, 0xff000000, 0x00000040),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a);
+       RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001),
+       RTW89_DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002),
+       RTW89_DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003),
+       RTW89_DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5898, 0xff000000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x589c, 0xff000000, 0x00000040),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_b[] = {
-       DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
-       DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001),
-       DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000),
-       DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001),
-       DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002),
-       DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003),
-       DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040),
-       DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040),
-       DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040),
-       DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x7898, 0xff000000, 0x00000040),
-       DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040),
-       DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040),
-       DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040),
-       DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040),
-       DECL_RFK_WM(0x789c, 0xff000000, 0x00000040),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b);
+       RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001),
+       RTW89_DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002),
+       RTW89_DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003),
+       RTW89_DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7898, 0xff000000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040),
+       RTW89_DECL_RFK_WM(0x789c, 0xff000000, 0x00000040),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_a[] = {
-       DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
-       DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a);
+       RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_b[] = {
-       DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
-       DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b);
+       RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_a[] = {
-       DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
-       DECL_RFK_WM(0x5814, 0x00000800, 0x00000001),
-       DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
-       DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001),
-       DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
-       DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
-       DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
-       DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
-       DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
-       DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
-       DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
-       DECL_RFK_WM(0x5810, 0x20000000, 0x00000000),
-       DECL_RFK_WM(0x5814, 0x20000000, 0x00000001),
-       DECL_RFK_WM(0x580c, 0x10000000, 0x00000001),
-       DECL_RFK_WM(0x580c, 0x40000000, 0x00000001),
-       DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a);
+       RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x00000001),
+       RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
+       RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
+       RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
+       RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
+       RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
+       RTW89_DECL_RFK_WM(0x5810, 0x20000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_b[] = {
-       DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
-       DECL_RFK_WM(0x7814, 0x00000800, 0x00000001),
-       DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
-       DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001),
-       DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
-       DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
-       DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
-       DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
-       DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
-       DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
-       DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
-       DECL_RFK_WM(0x7810, 0x20000000, 0x00000000),
-       DECL_RFK_WM(0x7814, 0x20000000, 0x00000001),
-       DECL_RFK_WM(0x780c, 0x10000000, 0x00000001),
-       DECL_RFK_WM(0x780c, 0x40000000, 0x00000001),
-       DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
-       DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
-       DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
-       DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
-       DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b);
+       RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x00000001),
+       RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
+       RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
+       RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
+       RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
+       RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
+       RTW89_DECL_RFK_WM(0x7810, 0x20000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
+       RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
+       RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_a[] = {
-       DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x5818, 0x18000000, 0x00000000),
-       DECL_RFK_WM(0x5814, 0x00000800, 0x00000000),
-       DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
-       DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
-       DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
-       DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
-       DECL_RFK_WM(0x5814, 0x01000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x00000000),
+       RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
+       RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
+       RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
+       RTW89_DECL_RFK_WM(0x5814, 0x01000000, 0x00000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_b[] = {
-       DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x7818, 0x18000000, 0x00000000),
-       DECL_RFK_WM(0x7814, 0x00000800, 0x00000000),
-       DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
-       DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
-       DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
-       DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
-       DECL_RFK_WM(0x7814, 0x01000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x00000000),
+       RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
+       RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
+       RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
+       RTW89_DECL_RFK_WM(0x7814, 0x01000000, 0x00000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_a[] = {
-       DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
-       DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
-       DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
-       DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000),
-       DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_b[] = {
-       DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
-       DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
-       DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
-       DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000),
-       DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_2g[] = {
-       DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0),
-       DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8),
-       DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b),
-       DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g);
+       RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0),
+       RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8),
+       RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b),
+       RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_1[] = {
-       DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7),
-       DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb),
-       DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005),
-       DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1);
+       RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7),
+       RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb),
+       RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005),
+       RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_3[] = {
-       DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8),
-       DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc),
-       DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006),
-       DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3);
+       RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8),
+       RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc),
+       RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006),
+       RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_4[] = {
-       DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5),
-       DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a),
-       DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011),
-       DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4);
+       RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5),
+       RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a),
+       RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011),
+       RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_2g[] = {
-       DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc),
-       DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2),
-       DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005),
-       DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g);
+       RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc),
+       RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2),
+       RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005),
+       RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_1[] = {
-       DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5),
-       DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc),
-       DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005),
-       DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1);
+       RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5),
+       RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc),
+       RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005),
+       RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_3[] = {
-       DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc),
-       DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002),
-       DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b),
-       DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3);
+       RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc),
+       RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002),
+       RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b),
+       RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_4[] = {
-       DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
-       DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
-       DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0),
-       DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016),
-       DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
-       DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f),
-       DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4);
+       RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0),
+       RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016),
+       RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f),
+       RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_a[] = {
-       DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001),
-       DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0),
-       DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
-       DECL_RFK_WM(0x5818, 0x10000000, 0x00000001),
-       DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x5820, 0x80000000, 0x00000001),
-       DECL_RFK_WM(0x5818, 0x18000000, 0x00000003),
+       RTW89_DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001),
+       RTW89_DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0),
+       RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000003),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_b[] = {
-       DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001),
-       DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0),
-       DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
-       DECL_RFK_WM(0x7818, 0x10000000, 0x00000001),
-       DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x7820, 0x80000000, 0x00000001),
-       DECL_RFK_WM(0x7818, 0x18000000, 0x00000003),
+       RTW89_DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001),
+       RTW89_DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0),
+       RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000003),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_disable_defs[] = {
-       DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x5818, 0x18000000, 0x00000001),
-       DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
-       DECL_RFK_WM(0x7818, 0x18000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000001),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_ab[] = {
-       DECL_RFK_WM(0x5820, 0x80000000, 0x0),
-       DECL_RFK_WM(0x5820, 0x80000000, 0x1),
-       DECL_RFK_WM(0x5818, 0x18000000, 0x3),
-       DECL_RFK_WM(0x7820, 0x80000000, 0x0),
-       DECL_RFK_WM(0x7820, 0x80000000, 0x1),
-       DECL_RFK_WM(0x7818, 0x18000000, 0x3),
+       RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+       RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1),
+       RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x3),
+       RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+       RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1),
+       RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x3),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab);
 
 static const struct rtw89_reg5_def rtw8852a_tssi_tracking_defs[] = {
-       DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
-       DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
-       DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
-       DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400),
-       DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
-       DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-       DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
-       DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400),
+       RTW89_DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400),
+       RTW89_DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400),
 };
 
-DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_afe_init_defs[] = {
-       DECL_RFK_WC(0x12ec, 0x00008000),
-       DECL_RFK_WS(0x12ec, 0x00008000),
-       DECL_RFK_WC(0x5e00, 0x00000001),
-       DECL_RFK_WS(0x5e00, 0x00000001),
-       DECL_RFK_WC(0x32ec, 0x00008000),
-       DECL_RFK_WS(0x32ec, 0x00008000),
-       DECL_RFK_WC(0x7e00, 0x00000001),
-       DECL_RFK_WS(0x7e00, 0x00000001),
+       RTW89_DECL_RFK_WC(0x12ec, 0x00008000),
+       RTW89_DECL_RFK_WS(0x12ec, 0x00008000),
+       RTW89_DECL_RFK_WC(0x5e00, 0x00000001),
+       RTW89_DECL_RFK_WS(0x5e00, 0x00000001),
+       RTW89_DECL_RFK_WC(0x32ec, 0x00008000),
+       RTW89_DECL_RFK_WS(0x32ec, 0x00008000),
+       RTW89_DECL_RFK_WC(0x7e00, 0x00000001),
+       RTW89_DECL_RFK_WS(0x7e00, 0x00000001),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_a[] = {
-       DECL_RFK_WS(0x5e00, 0x00000008),
-       DECL_RFK_WS(0x5e50, 0x00000008),
-       DECL_RFK_WS(0x5e10, 0x80000000),
-       DECL_RFK_WS(0x5e60, 0x80000000),
-       DECL_RFK_WC(0x5e00, 0x00000008),
-       DECL_RFK_WC(0x5e50, 0x00000008),
+       RTW89_DECL_RFK_WS(0x5e00, 0x00000008),
+       RTW89_DECL_RFK_WS(0x5e50, 0x00000008),
+       RTW89_DECL_RFK_WS(0x5e10, 0x80000000),
+       RTW89_DECL_RFK_WS(0x5e60, 0x80000000),
+       RTW89_DECL_RFK_WC(0x5e00, 0x00000008),
+       RTW89_DECL_RFK_WC(0x5e50, 0x00000008),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_b[] = {
-       DECL_RFK_WS(0x7e00, 0x00000008),
-       DECL_RFK_WS(0x7e50, 0x00000008),
-       DECL_RFK_WS(0x7e10, 0x80000000),
-       DECL_RFK_WS(0x7e60, 0x80000000),
-       DECL_RFK_WC(0x7e00, 0x00000008),
-       DECL_RFK_WC(0x7e50, 0x00000008),
+       RTW89_DECL_RFK_WS(0x7e00, 0x00000008),
+       RTW89_DECL_RFK_WS(0x7e50, 0x00000008),
+       RTW89_DECL_RFK_WS(0x7e10, 0x80000000),
+       RTW89_DECL_RFK_WS(0x7e60, 0x80000000),
+       RTW89_DECL_RFK_WC(0x7e00, 0x00000008),
+       RTW89_DECL_RFK_WC(0x7e50, 0x00000008),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_a[] = {
-       DECL_RFK_WC(0x20f4, 0x01000000),
-       DECL_RFK_WS(0x20f8, 0x80000000),
-       DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
-       DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
-       DECL_RFK_WC(0x20f0, 0x0000000f),
-       DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002),
+       RTW89_DECL_RFK_WC(0x20f4, 0x01000000),
+       RTW89_DECL_RFK_WS(0x20f8, 0x80000000),
+       RTW89_DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
+       RTW89_DECL_RFK_WC(0x20f0, 0x0000000f),
+       RTW89_DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_b[] = {
-       DECL_RFK_WC(0x20f4, 0x01000000),
-       DECL_RFK_WS(0x20f8, 0x80000000),
-       DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
-       DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
-       DECL_RFK_WC(0x20f0, 0x0000000f),
-       DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003),
+       RTW89_DECL_RFK_WC(0x20f4, 0x01000000),
+       RTW89_DECL_RFK_WS(0x20f8, 0x80000000),
+       RTW89_DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
+       RTW89_DECL_RFK_WC(0x20f0, 0x0000000f),
+       RTW89_DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_a[] = {
-       DECL_RFK_WC(0x12d8, 0x00000030),
-       DECL_RFK_WC(0x32d8, 0x00000030),
-       DECL_RFK_WS(0x12b8, 0x40000000),
-       DECL_RFK_WC(0x032c, 0x40000000),
-       DECL_RFK_WC(0x032c, 0x00400000),
-       DECL_RFK_WS(0x032c, 0x00400000),
-       DECL_RFK_WS(0x030c, 0x0f000000),
-       DECL_RFK_WC(0x032c, 0x00010000),
-       DECL_RFK_WS(0x12dc, 0x00000002),
-       DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+       RTW89_DECL_RFK_WC(0x12d8, 0x00000030),
+       RTW89_DECL_RFK_WC(0x32d8, 0x00000030),
+       RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+       RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+       RTW89_DECL_RFK_WC(0x032c, 0x00400000),
+       RTW89_DECL_RFK_WS(0x032c, 0x00400000),
+       RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+       RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+       RTW89_DECL_RFK_WS(0x12dc, 0x00000002),
+       RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_a[] = {
-       DECL_RFK_WS(0x12d8, 0x000000c0),
-       DECL_RFK_WS(0x12d8, 0x00000800),
-       DECL_RFK_WC(0x12d8, 0x00000800),
-       DECL_RFK_DELAY(1),
-       DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001),
+       RTW89_DECL_RFK_WS(0x12d8, 0x000000c0),
+       RTW89_DECL_RFK_WS(0x12d8, 0x00000800),
+       RTW89_DECL_RFK_WC(0x12d8, 0x00000800),
+       RTW89_DECL_RFK_DELAY(1),
+       RTW89_DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_a[] = {
-       DECL_RFK_WC(0x12dc, 0x00000002),
-       DECL_RFK_WS(0x032c, 0x00010000),
-       DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
-       DECL_RFK_WS(0x032c, 0x40000000),
-       DECL_RFK_WC(0x12b8, 0x40000000),
+       RTW89_DECL_RFK_WC(0x12dc, 0x00000002),
+       RTW89_DECL_RFK_WS(0x032c, 0x00010000),
+       RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
+       RTW89_DECL_RFK_WS(0x032c, 0x40000000),
+       RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_b[] = {
-       DECL_RFK_WS(0x32b8, 0x40000000),
-       DECL_RFK_WC(0x032c, 0x40000000),
-       DECL_RFK_WC(0x032c, 0x00400000),
-       DECL_RFK_WS(0x032c, 0x00400000),
-       DECL_RFK_WS(0x030c, 0x0f000000),
-       DECL_RFK_WC(0x032c, 0x00010000),
-       DECL_RFK_WS(0x32dc, 0x00000002),
-       DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+       RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+       RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+       RTW89_DECL_RFK_WC(0x032c, 0x00400000),
+       RTW89_DECL_RFK_WS(0x032c, 0x00400000),
+       RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+       RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+       RTW89_DECL_RFK_WS(0x32dc, 0x00000002),
+       RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_b[] = {
-       DECL_RFK_WS(0x32d8, 0x000000c0),
-       DECL_RFK_WS(0x32d8, 0x00000800),
-       DECL_RFK_WC(0x32d8, 0x00000800),
-       DECL_RFK_DELAY(1),
-       DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001),
+       RTW89_DECL_RFK_WS(0x32d8, 0x000000c0),
+       RTW89_DECL_RFK_WS(0x32d8, 0x00000800),
+       RTW89_DECL_RFK_WC(0x32d8, 0x00000800),
+       RTW89_DECL_RFK_DELAY(1),
+       RTW89_DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_b[] = {
-       DECL_RFK_WC(0x32dc, 0x00000002),
-       DECL_RFK_WS(0x032c, 0x00010000),
-       DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
-       DECL_RFK_WS(0x032c, 0x40000000),
-       DECL_RFK_WC(0x32b8, 0x40000000),
+       RTW89_DECL_RFK_WC(0x32dc, 0x00000002),
+       RTW89_DECL_RFK_WS(0x032c, 0x00010000),
+       RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
+       RTW89_DECL_RFK_WS(0x032c, 0x40000000),
+       RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_a[] = {
-       DECL_RFK_WC(0x032c, 0x40000000),
-       DECL_RFK_WS(0x030c, 0x0f000000),
-       DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
-       DECL_RFK_WC(0x032c, 0x00010000),
-       DECL_RFK_WS(0x12dc, 0x00000001),
-       DECL_RFK_WS(0x12e8, 0x00000004),
-       DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001),
+       RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+       RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+       RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+       RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+       RTW89_DECL_RFK_WS(0x12dc, 0x00000001),
+       RTW89_DECL_RFK_WS(0x12e8, 0x00000004),
+       RTW89_DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_b[] = {
-       DECL_RFK_WC(0x032c, 0x40000000),
-       DECL_RFK_WS(0x030c, 0x0f000000),
-       DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
-       DECL_RFK_WC(0x032c, 0x00010000),
-       DECL_RFK_WS(0x32dc, 0x00000001),
-       DECL_RFK_WS(0x32e8, 0x00000004),
-       DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001),
+       RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+       RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+       RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+       RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+       RTW89_DECL_RFK_WS(0x32dc, 0x00000001),
+       RTW89_DECL_RFK_WS(0x32e8, 0x00000004),
+       RTW89_DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_a[] = {
-       DECL_RFK_WC(0x12dc, 0x00000001),
-       DECL_RFK_WC(0x12e8, 0x00000004),
-       DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000),
-       DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
+       RTW89_DECL_RFK_WC(0x12dc, 0x00000001),
+       RTW89_DECL_RFK_WC(0x12e8, 0x00000004),
+       RTW89_DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000),
+       RTW89_DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_b[] = {
-       DECL_RFK_WC(0x32dc, 0x00000001),
-       DECL_RFK_WC(0x32e8, 0x00000004),
-       DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000),
-       DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
+       RTW89_DECL_RFK_WC(0x32dc, 0x00000001),
+       RTW89_DECL_RFK_WC(0x32e8, 0x00000004),
+       RTW89_DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000),
+       RTW89_DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_a[] = {
-       DECL_RFK_WS(0x5e00, 0x00000008),
-       DECL_RFK_WC(0x5e10, 0x80000000),
-       DECL_RFK_WS(0x5e50, 0x00000008),
-       DECL_RFK_WC(0x5e60, 0x80000000),
-       DECL_RFK_WS(0x12a0, 0x00008000),
-       DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003),
-       DECL_RFK_WS(0x12b8, 0x40000000),
-       DECL_RFK_WS(0x030c, 0x10000000),
-       DECL_RFK_WC(0x032c, 0x80000000),
-       DECL_RFK_WS(0x12e0, 0x00010000),
-       DECL_RFK_WS(0x12e4, 0x0c000000),
-       DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030),
-       DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030),
-       DECL_RFK_WC(0x5e00, 0x0c000000),
-       DECL_RFK_WC(0x5e50, 0x0c000000),
-       DECL_RFK_WC(0x5e0c, 0x00000008),
-       DECL_RFK_WC(0x5e5c, 0x00000008),
-       DECL_RFK_WS(0x5e0c, 0x00000001),
-       DECL_RFK_WS(0x5e5c, 0x00000001),
-       DECL_RFK_DELAY(1),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a);
+       RTW89_DECL_RFK_WS(0x5e00, 0x00000008),
+       RTW89_DECL_RFK_WC(0x5e10, 0x80000000),
+       RTW89_DECL_RFK_WS(0x5e50, 0x00000008),
+       RTW89_DECL_RFK_WC(0x5e60, 0x80000000),
+       RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003),
+       RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+       RTW89_DECL_RFK_WS(0x030c, 0x10000000),
+       RTW89_DECL_RFK_WC(0x032c, 0x80000000),
+       RTW89_DECL_RFK_WS(0x12e0, 0x00010000),
+       RTW89_DECL_RFK_WS(0x12e4, 0x0c000000),
+       RTW89_DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030),
+       RTW89_DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030),
+       RTW89_DECL_RFK_WC(0x5e00, 0x0c000000),
+       RTW89_DECL_RFK_WC(0x5e50, 0x0c000000),
+       RTW89_DECL_RFK_WC(0x5e0c, 0x00000008),
+       RTW89_DECL_RFK_WC(0x5e5c, 0x00000008),
+       RTW89_DECL_RFK_WS(0x5e0c, 0x00000001),
+       RTW89_DECL_RFK_WS(0x5e5c, 0x00000001),
+       RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_a[] = {
-       DECL_RFK_WC(0x12e4, 0x0c000000),
-       DECL_RFK_WS(0x5e0c, 0x00000008),
-       DECL_RFK_WS(0x5e5c, 0x00000008),
-       DECL_RFK_DELAY(1),
+       RTW89_DECL_RFK_WC(0x12e4, 0x0c000000),
+       RTW89_DECL_RFK_WS(0x5e0c, 0x00000008),
+       RTW89_DECL_RFK_WS(0x5e5c, 0x00000008),
+       RTW89_DECL_RFK_DELAY(1),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_a[] = {
-       DECL_RFK_WC(0x5e0c, 0x00000001),
-       DECL_RFK_WC(0x5e5c, 0x00000001),
-       DECL_RFK_WC(0x12e0, 0x00010000),
-       DECL_RFK_WC(0x12a0, 0x00008000),
-       DECL_RFK_WS(0x12a0, 0x00007000),
+       RTW89_DECL_RFK_WC(0x5e0c, 0x00000001),
+       RTW89_DECL_RFK_WC(0x5e5c, 0x00000001),
+       RTW89_DECL_RFK_WC(0x12e0, 0x00010000),
+       RTW89_DECL_RFK_WC(0x12a0, 0x00008000),
+       RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_b[] = {
-       DECL_RFK_WS(0x7e00, 0x00000008),
-       DECL_RFK_WC(0x7e10, 0x80000000),
-       DECL_RFK_WS(0x7e50, 0x00000008),
-       DECL_RFK_WC(0x7e60, 0x80000000),
-       DECL_RFK_WS(0x32a0, 0x00008000),
-       DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003),
-       DECL_RFK_WS(0x32b8, 0x40000000),
-       DECL_RFK_WS(0x030c, 0x10000000),
-       DECL_RFK_WC(0x032c, 0x80000000),
-       DECL_RFK_WS(0x32e0, 0x00010000),
-       DECL_RFK_WS(0x32e4, 0x0c000000),
-       DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030),
-       DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030),
-       DECL_RFK_WC(0x7e00, 0x0c000000),
-       DECL_RFK_WC(0x7e50, 0x0c000000),
-       DECL_RFK_WC(0x7e0c, 0x00000008),
-       DECL_RFK_WC(0x7e5c, 0x00000008),
-       DECL_RFK_WS(0x7e0c, 0x00000001),
-       DECL_RFK_WS(0x7e5c, 0x00000001),
-       DECL_RFK_DELAY(1),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b);
+       RTW89_DECL_RFK_WS(0x7e00, 0x00000008),
+       RTW89_DECL_RFK_WC(0x7e10, 0x80000000),
+       RTW89_DECL_RFK_WS(0x7e50, 0x00000008),
+       RTW89_DECL_RFK_WC(0x7e60, 0x80000000),
+       RTW89_DECL_RFK_WS(0x32a0, 0x00008000),
+       RTW89_DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003),
+       RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+       RTW89_DECL_RFK_WS(0x030c, 0x10000000),
+       RTW89_DECL_RFK_WC(0x032c, 0x80000000),
+       RTW89_DECL_RFK_WS(0x32e0, 0x00010000),
+       RTW89_DECL_RFK_WS(0x32e4, 0x0c000000),
+       RTW89_DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030),
+       RTW89_DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030),
+       RTW89_DECL_RFK_WC(0x7e00, 0x0c000000),
+       RTW89_DECL_RFK_WC(0x7e50, 0x0c000000),
+       RTW89_DECL_RFK_WC(0x7e0c, 0x00000008),
+       RTW89_DECL_RFK_WC(0x7e5c, 0x00000008),
+       RTW89_DECL_RFK_WS(0x7e0c, 0x00000001),
+       RTW89_DECL_RFK_WS(0x7e5c, 0x00000001),
+       RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_b[] = {
-       DECL_RFK_WC(0x32e4, 0x0c000000),
-       DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001),
-       DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001),
-       DECL_RFK_DELAY(1),
+       RTW89_DECL_RFK_WC(0x32e4, 0x0c000000),
+       RTW89_DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001),
+       RTW89_DECL_RFK_DELAY(1),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_b[] = {
-       DECL_RFK_WC(0x7e0c, 0x00000001),
-       DECL_RFK_WC(0x7e5c, 0x00000001),
-       DECL_RFK_WC(0x32e0, 0x00010000),
-       DECL_RFK_WC(0x32a0, 0x00008000),
-       DECL_RFK_WS(0x32a0, 0x00007000),
+       RTW89_DECL_RFK_WC(0x7e0c, 0x00000001),
+       RTW89_DECL_RFK_WC(0x7e5c, 0x00000001),
+       RTW89_DECL_RFK_WC(0x32e0, 0x00010000),
+       RTW89_DECL_RFK_WC(0x32a0, 0x00008000),
+       RTW89_DECL_RFK_WS(0x32a0, 0x00007000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_a[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
-       DECL_RFK_WS(0x12b8, 0x40000000),
-       DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
-       DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
-       DECL_RFK_WS(0x12b8, 0x10000000),
-       DECL_RFK_WS(0x58c8, 0x01000000),
-       DECL_RFK_WS(0x5864, 0xc0000000),
-       DECL_RFK_WS(0x2008, 0x01ffffff),
-       DECL_RFK_WS(0x0c1c, 0x00000004),
-       DECL_RFK_WS(0x0700, 0x08000000),
-       DECL_RFK_WS(0x0c70, 0x000003ff),
-       DECL_RFK_WS(0x0c60, 0x00000003),
-       DECL_RFK_WS(0x0c6c, 0x00000001),
-       DECL_RFK_WS(0x58ac, 0x08000000),
-       DECL_RFK_WS(0x0c3c, 0x00000200),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+       RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+       RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+       RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+       RTW89_DECL_RFK_WS(0x12b8, 0x10000000),
+       RTW89_DECL_RFK_WS(0x58c8, 0x01000000),
+       RTW89_DECL_RFK_WS(0x5864, 0xc0000000),
+       RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+       RTW89_DECL_RFK_WS(0x0c1c, 0x00000004),
+       RTW89_DECL_RFK_WS(0x0700, 0x08000000),
+       RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+       RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+       RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+       RTW89_DECL_RFK_WS(0x58ac, 0x08000000),
+       RTW89_DECL_RFK_WS(0x0c3c, 0x00000200),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_a[] = {
-       DECL_RFK_WS(0x4490, 0x80000000),
-       DECL_RFK_WS(0x12a0, 0x00007000),
-       DECL_RFK_WS(0x12a0, 0x00008000),
-       DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
-       DECL_RFK_WS(0x12a0, 0x00080000),
-       DECL_RFK_WS(0x0700, 0x01000000),
-       DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
-       DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+       RTW89_DECL_RFK_WS(0x4490, 0x80000000),
+       RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
+       RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+       RTW89_DECL_RFK_WS(0x12a0, 0x00080000),
+       RTW89_DECL_RFK_WS(0x0700, 0x01000000),
+       RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
+       RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_b[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
-       DECL_RFK_WS(0x32b8, 0x40000000),
-       DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
-       DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
-       DECL_RFK_WS(0x32b8, 0x10000000),
-       DECL_RFK_WS(0x78c8, 0x01000000),
-       DECL_RFK_WS(0x7864, 0xc0000000),
-       DECL_RFK_WS(0x2008, 0x01ffffff),
-       DECL_RFK_WS(0x2c1c, 0x00000004),
-       DECL_RFK_WS(0x2700, 0x08000000),
-       DECL_RFK_WS(0x0c70, 0x000003ff),
-       DECL_RFK_WS(0x0c60, 0x00000003),
-       DECL_RFK_WS(0x0c6c, 0x00000001),
-       DECL_RFK_WS(0x78ac, 0x08000000),
-       DECL_RFK_WS(0x2c3c, 0x00000200),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+       RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+       RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+       RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+       RTW89_DECL_RFK_WS(0x32b8, 0x10000000),
+       RTW89_DECL_RFK_WS(0x78c8, 0x01000000),
+       RTW89_DECL_RFK_WS(0x7864, 0xc0000000),
+       RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+       RTW89_DECL_RFK_WS(0x2c1c, 0x00000004),
+       RTW89_DECL_RFK_WS(0x2700, 0x08000000),
+       RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+       RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+       RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+       RTW89_DECL_RFK_WS(0x78ac, 0x08000000),
+       RTW89_DECL_RFK_WS(0x2c3c, 0x00000200),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_b[] = {
-       DECL_RFK_WS(0x6490, 0x80000000),
-       DECL_RFK_WS(0x32a0, 0x00007000),
-       DECL_RFK_WS(0x32a0, 0x00008000),
-       DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
-       DECL_RFK_WS(0x32a0, 0x00080000),
-       DECL_RFK_WS(0x2700, 0x01000000),
-       DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
-       DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+       RTW89_DECL_RFK_WS(0x6490, 0x80000000),
+       RTW89_DECL_RFK_WS(0x32a0, 0x00007000),
+       RTW89_DECL_RFK_WS(0x32a0, 0x00008000),
+       RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+       RTW89_DECL_RFK_WS(0x32a0, 0x00080000),
+       RTW89_DECL_RFK_WS(0x2700, 0x01000000),
+       RTW89_DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
+       RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_s_defs_ab[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
-       DECL_RFK_WS(0x12b8, 0x40000000),
-       DECL_RFK_WS(0x32b8, 0x40000000),
-       DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
-       DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
-       DECL_RFK_WS(0x12b8, 0x10000000),
-       DECL_RFK_WS(0x58c8, 0x01000000),
-       DECL_RFK_WS(0x78c8, 0x01000000),
-       DECL_RFK_WS(0x5864, 0xc0000000),
-       DECL_RFK_WS(0x7864, 0xc0000000),
-       DECL_RFK_WS(0x2008, 0x01ffffff),
-       DECL_RFK_WS(0x0c1c, 0x00000004),
-       DECL_RFK_WS(0x0700, 0x08000000),
-       DECL_RFK_WS(0x0c70, 0x000003ff),
-       DECL_RFK_WS(0x0c60, 0x00000003),
-       DECL_RFK_WS(0x0c6c, 0x00000001),
-       DECL_RFK_WS(0x58ac, 0x08000000),
-       DECL_RFK_WS(0x78ac, 0x08000000),
-       DECL_RFK_WS(0x0c3c, 0x00000200),
-       DECL_RFK_WS(0x2344, 0x80000000),
-       DECL_RFK_WS(0x4490, 0x80000000),
-       DECL_RFK_WS(0x12a0, 0x00007000),
-       DECL_RFK_WS(0x12a0, 0x00008000),
-       DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
-       DECL_RFK_WS(0x12a0, 0x00080000),
-       DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
-       DECL_RFK_WS(0x32a0, 0x00080000),
-       DECL_RFK_WS(0x0700, 0x01000000),
-       DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
-       DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
-       DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+       RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+       RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+       RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+       RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+       RTW89_DECL_RFK_WS(0x12b8, 0x10000000),
+       RTW89_DECL_RFK_WS(0x58c8, 0x01000000),
+       RTW89_DECL_RFK_WS(0x78c8, 0x01000000),
+       RTW89_DECL_RFK_WS(0x5864, 0xc0000000),
+       RTW89_DECL_RFK_WS(0x7864, 0xc0000000),
+       RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+       RTW89_DECL_RFK_WS(0x0c1c, 0x00000004),
+       RTW89_DECL_RFK_WS(0x0700, 0x08000000),
+       RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+       RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+       RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+       RTW89_DECL_RFK_WS(0x58ac, 0x08000000),
+       RTW89_DECL_RFK_WS(0x78ac, 0x08000000),
+       RTW89_DECL_RFK_WS(0x0c3c, 0x00000200),
+       RTW89_DECL_RFK_WS(0x2344, 0x80000000),
+       RTW89_DECL_RFK_WS(0x4490, 0x80000000),
+       RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
+       RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+       RTW89_DECL_RFK_WS(0x12a0, 0x00080000),
+       RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+       RTW89_DECL_RFK_WS(0x32a0, 0x00080000),
+       RTW89_DECL_RFK_WS(0x0700, 0x01000000),
+       RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+       RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_a[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
-       DECL_RFK_WC(0x12b8, 0x40000000),
-       DECL_RFK_WC(0x5864, 0xc0000000),
-       DECL_RFK_WC(0x2008, 0x01ffffff),
-       DECL_RFK_WC(0x0c1c, 0x00000004),
-       DECL_RFK_WC(0x0700, 0x08000000),
-       DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
-       DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
-       DECL_RFK_WC(0x12a0, 0x000ff000),
-       DECL_RFK_WC(0x0700, 0x07000000),
-       DECL_RFK_WC(0x5864, 0x20000000),
-       DECL_RFK_WC(0x0c3c, 0x00000200),
-       DECL_RFK_WC(0x20fc, 0xffff0000),
-       DECL_RFK_WC(0x58c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+       RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
+       RTW89_DECL_RFK_WC(0x5864, 0xc0000000),
+       RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+       RTW89_DECL_RFK_WC(0x0c1c, 0x00000004),
+       RTW89_DECL_RFK_WC(0x0700, 0x08000000),
+       RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+       RTW89_DECL_RFK_WC(0x12a0, 0x000ff000),
+       RTW89_DECL_RFK_WC(0x0700, 0x07000000),
+       RTW89_DECL_RFK_WC(0x5864, 0x20000000),
+       RTW89_DECL_RFK_WC(0x0c3c, 0x00000200),
+       RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+       RTW89_DECL_RFK_WC(0x58c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_b[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
-       DECL_RFK_WC(0x32b8, 0x40000000),
-       DECL_RFK_WC(0x7864, 0xc0000000),
-       DECL_RFK_WC(0x2008, 0x01ffffff),
-       DECL_RFK_WC(0x2c1c, 0x00000004),
-       DECL_RFK_WC(0x2700, 0x08000000),
-       DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
-       DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
-       DECL_RFK_WC(0x32a0, 0x000ff000),
-       DECL_RFK_WC(0x2700, 0x07000000),
-       DECL_RFK_WC(0x7864, 0x20000000),
-       DECL_RFK_WC(0x2c3c, 0x00000200),
-       DECL_RFK_WC(0x20fc, 0xffff0000),
-       DECL_RFK_WC(0x78c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+       RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
+       RTW89_DECL_RFK_WC(0x7864, 0xc0000000),
+       RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+       RTW89_DECL_RFK_WC(0x2c1c, 0x00000004),
+       RTW89_DECL_RFK_WC(0x2700, 0x08000000),
+       RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+       RTW89_DECL_RFK_WC(0x32a0, 0x000ff000),
+       RTW89_DECL_RFK_WC(0x2700, 0x07000000),
+       RTW89_DECL_RFK_WC(0x7864, 0x20000000),
+       RTW89_DECL_RFK_WC(0x2c3c, 0x00000200),
+       RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+       RTW89_DECL_RFK_WC(0x78c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_ab[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
-       DECL_RFK_WC(0x12b8, 0x40000000),
-       DECL_RFK_WC(0x32b8, 0x40000000),
-       DECL_RFK_WC(0x5864, 0xc0000000),
-       DECL_RFK_WC(0x7864, 0xc0000000),
-       DECL_RFK_WC(0x2008, 0x01ffffff),
-       DECL_RFK_WC(0x0c1c, 0x00000004),
-       DECL_RFK_WC(0x0700, 0x08000000),
-       DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
-       DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
-       DECL_RFK_WC(0x12a0, 0x000ff000),
-       DECL_RFK_WC(0x32a0, 0x000ff000),
-       DECL_RFK_WC(0x0700, 0x07000000),
-       DECL_RFK_WC(0x5864, 0x20000000),
-       DECL_RFK_WC(0x7864, 0x20000000),
-       DECL_RFK_WC(0x0c3c, 0x00000200),
-       DECL_RFK_WC(0x20fc, 0xffff0000),
-       DECL_RFK_WC(0x58c8, 0x01000000),
-       DECL_RFK_WC(0x78c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+       RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
+       RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
+       RTW89_DECL_RFK_WC(0x5864, 0xc0000000),
+       RTW89_DECL_RFK_WC(0x7864, 0xc0000000),
+       RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+       RTW89_DECL_RFK_WC(0x0c1c, 0x00000004),
+       RTW89_DECL_RFK_WC(0x0700, 0x08000000),
+       RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+       RTW89_DECL_RFK_WC(0x12a0, 0x000ff000),
+       RTW89_DECL_RFK_WC(0x32a0, 0x000ff000),
+       RTW89_DECL_RFK_WC(0x0700, 0x07000000),
+       RTW89_DECL_RFK_WC(0x5864, 0x20000000),
+       RTW89_DECL_RFK_WC(0x7864, 0x20000000),
+       RTW89_DECL_RFK_WC(0x0c3c, 0x00000200),
+       RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+       RTW89_DECL_RFK_WC(0x58c8, 0x01000000),
+       RTW89_DECL_RFK_WC(0x78c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_f[] = {
-       DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f),
-       DECL_RFK_DELAY(1),
-       DECL_RFK_WM(0x030c, 0xff000000, 0x00000003),
-       DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001),
-       DECL_RFK_DELAY(1),
-       DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041),
-       DECL_RFK_WS(0x8074, 0x80000000),
+       RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f),
+       RTW89_DECL_RFK_DELAY(1),
+       RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001),
+       RTW89_DECL_RFK_DELAY(1),
+       RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041),
+       RTW89_DECL_RFK_WS(0x8074, 0x80000000),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_r[] = {
-       DECL_RFK_WC(0x8074, 0x80000000),
-       DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f),
-       DECL_RFK_DELAY(1),
-       DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
-       DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
-       DECL_RFK_DELAY(1),
-       DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+       RTW89_DECL_RFK_WC(0x8074, 0x80000000),
+       RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f),
+       RTW89_DECL_RFK_DELAY(1),
+       RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+       RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+       RTW89_DECL_RFK_DELAY(1),
+       RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_dpk_pas_read_defs[] = {
-       DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006),
-       DECL_RFK_WC(0x80bc, 0x00004000),
-       DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008),
+       RTW89_DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006),
+       RTW89_DECL_RFK_WC(0x80bc, 0x00004000),
+       RTW89_DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008),
 };
 
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_nondbcc_path01[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
-       DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
-       DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
-       DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
-       DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
-       DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
-       DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
-       DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
-       DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
-       DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
-       DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
-       DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
-       DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
-       DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
-       DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
-       DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
-       DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
-       DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
-       DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
-       DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
-       DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
-       DECL_RFK_WM(0x2344, 0x80000000, 0x00000001),
-       DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
-       DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
-       DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
-       DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
-       DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
-       DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
-       DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
-       DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
-       DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
-       DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
-       DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+       RTW89_DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+       RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+       RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+       RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+       RTW89_DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
+       RTW89_DECL_RFK_WM(0x2344, 0x80000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+       RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path0[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
-       DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
-       DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
-       DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
-       DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
-       DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
-       DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
-       DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
-       DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
-       DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
-       DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
-       DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
-       DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
-       DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
-       DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
-       DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
-       DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
-       DECL_RFK_WM(0x2320, 0x00000001, 0x00000001),
-       DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
-       DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
-       DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
-       DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
-       DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
-       DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
-       DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
-       DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+       RTW89_DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+       RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+       RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+       RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+       RTW89_DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
+       RTW89_DECL_RFK_WM(0x2320, 0x00000001, 0x00000001),
+       RTW89_DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
+       RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path1[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
-       DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
-       DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
-       DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
-       DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
-       DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001),
-       DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
-       DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
-       DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
-       DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001),
-       DECL_RFK_WM(0x2700, 0x08000000, 0x00000001),
-       DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
-       DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
-       DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
-       DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
-       DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001),
-       DECL_RFK_WM(0x6490, 0x80000000, 0x00000001),
-       DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007),
-       DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001),
-       DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
-       DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
-       DECL_RFK_WM(0x2700, 0x01000000, 0x00000001),
-       DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
-       DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+       RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+       RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+       RTW89_DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001),
+       RTW89_DECL_RFK_WM(0x2700, 0x08000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+       RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+       RTW89_DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001),
+       RTW89_DECL_RFK_WM(0x6490, 0x80000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007),
+       RTW89_DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+       RTW89_DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x2700, 0x01000000, 0x00000001),
+       RTW89_DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
+       RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_nondbcc_path01[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
-       DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
-       DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
-       DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
-       DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
-       DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
-       DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
-       DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
-       DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
-       DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
-       DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
-       DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
-       DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
-       DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
-       DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
-       DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
-       DECL_RFK_WM(0x2320, 0x00000001, 0x00000000),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
-       DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
-       DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+       RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+       RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
+       RTW89_DECL_RFK_WM(0x2320, 0x00000001, 0x00000000),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path0[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
-       DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
-       DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
-       DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
-       DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
-       DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
-       DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
-       DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
-       DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
-       DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
-       DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
-       DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
-       DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+       RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+       RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0);
 
 static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path1[] = {
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
-       DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
-       DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
-       DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
-       DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000),
-       DECL_RFK_WM(0x2700, 0x08000000, 0x00000000),
-       DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
-       DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
-       DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
-       DECL_RFK_WM(0x2700, 0x07000000, 0x00000000),
-       DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
-       DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000),
-       DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
-       DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1);
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+       RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+       RTW89_DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000),
+       RTW89_DECL_RFK_WM(0x2700, 0x08000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+       RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+       RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x2700, 0x07000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000),
+       RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+       RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1);
index 4a4a45d..33e6c40 100644 (file)
@@ -5,54 +5,7 @@
 #ifndef __RTW89_8852A_RFK_TABLE_H__
 #define __RTW89_8852A_RFK_TABLE_H__
 
-#include "core.h"
-
-enum rtw89_rfk_flag {
-       RTW89_RFK_F_WRF = 0,
-       RTW89_RFK_F_WM = 1,
-       RTW89_RFK_F_WS = 2,
-       RTW89_RFK_F_WC = 3,
-       RTW89_RFK_F_DELAY = 4,
-       RTW89_RFK_F_NUM,
-};
-
-struct rtw89_rfk_tbl {
-       const struct rtw89_reg5_def *defs;
-       u32 size;
-};
-
-#define DECLARE_RFK_TBL(_name)                 \
-const struct rtw89_rfk_tbl _name ## _tbl = {   \
-       .defs = _name,                          \
-       .size = ARRAY_SIZE(_name),              \
-}
-
-#define DECL_RFK_WRF(_path, _addr, _mask, _data)       \
-       {.flag = RTW89_RFK_F_WRF,                       \
-        .path = _path,                                 \
-        .addr = _addr,                                 \
-        .mask = _mask,                                 \
-        .data = _data,}
-
-#define DECL_RFK_WM(_addr, _mask, _data)       \
-       {.flag = RTW89_RFK_F_WM,                \
-        .addr = _addr,                         \
-        .mask = _mask,                         \
-        .data = _data,}
-
-#define DECL_RFK_WS(_addr, _mask)      \
-       {.flag = RTW89_RFK_F_WS,        \
-        .addr = _addr,                 \
-        .mask = _mask,}
-
-#define DECL_RFK_WC(_addr, _mask)      \
-       {.flag = RTW89_RFK_F_WC,        \
-        .addr = _addr,                 \
-        .mask = _mask,}
-
-#define DECL_RFK_DELAY(_data)          \
-       {.flag = RTW89_RFK_F_DELAY,     \
-        .data = _data,}
+#include "phy.h"
 
 extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_tbl;
 extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_2g_tbl;
index de93280..48459ab 100644 (file)
@@ -8,8 +8,15 @@
 #include "pci.h"
 #include "rtw8852a.h"
 
+static const struct rtw89_pci_info rtw8852a_pci_info = {
+       .dma_addr_set           = &rtw89_pci_ch_dma_addr_set,
+};
+
 static const struct rtw89_driver_info rtw89_8852ae_info = {
        .chip = &rtw8852a_chip_info,
+       .bus = {
+               .pci = &rtw8852a_pci_info,
+       },
 };
 
 static const struct pci_device_id rtw89_8852ae_id_table[] = {
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
new file mode 100644 (file)
index 0000000..58920e9
--- /dev/null
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022  Realtek Corporation
+ */
+
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852c.h"
+
+static const struct rtw89_dle_mem rtw8852c_dle_mem_pcie[] = {
+       [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_wde_size19, &rtw89_ple_size19,
+                          &rtw89_wde_qt18, &rtw89_wde_qt18, &rtw89_ple_qt46,
+                          &rtw89_ple_qt47},
+       [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_wde_size18,
+                           &rtw89_ple_size18, &rtw89_wde_qt17, &rtw89_wde_qt17,
+                           &rtw89_ple_qt44, &rtw89_ple_qt45},
+       [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+                              NULL},
+};
+
+static const u32 rtw8852c_h2c_regs[RTW89_H2CREG_MAX] = {
+       R_AX_H2CREG_DATA0_V1, R_AX_H2CREG_DATA1_V1, R_AX_H2CREG_DATA2_V1,
+       R_AX_H2CREG_DATA3_V1
+};
+
+static const u32 rtw8852c_c2h_regs[RTW89_H2CREG_MAX] = {
+       R_AX_C2HREG_DATA0_V1, R_AX_C2HREG_DATA1_V1, R_AX_C2HREG_DATA2_V1,
+       R_AX_C2HREG_DATA3_V1
+};
+
+static const struct rtw89_page_regs rtw8852c_page_regs = {
+       .hci_fc_ctrl    = R_AX_HCI_FC_CTRL_V1,
+       .ch_page_ctrl   = R_AX_CH_PAGE_CTRL_V1,
+       .ach_page_ctrl  = R_AX_ACH0_PAGE_CTRL_V1,
+       .ach_page_info  = R_AX_ACH0_PAGE_INFO_V1,
+       .pub_page_info3 = R_AX_PUB_PAGE_INFO3_V1,
+       .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1_V1,
+       .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2_V1,
+       .pub_page_info1 = R_AX_PUB_PAGE_INFO1_V1,
+       .pub_page_info2 = R_AX_PUB_PAGE_INFO2_V1,
+       .wp_page_ctrl1  = R_AX_WP_PAGE_CTRL1_V1,
+       .wp_page_ctrl2  = R_AX_WP_PAGE_CTRL2_V1,
+       .wp_page_info1  = R_AX_WP_PAGE_INFO1_V1,
+};
+
+static const struct rtw89_reg_def rtw8852c_dcfo_comp = {
+       R_DCFO_COMP_S0_V1, B_DCFO_COMP_S0_V1_MSK
+};
+
+static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
+{
+       u32 val32;
+       u32 ret;
+
+       val32 = rtw89_read32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_PAD_HCI_SEL_V2_MASK);
+       if (val32 == MAC_AX_HCI_SEL_PCIE_USB)
+               rtw89_write32_set(rtwdev, R_AX_LDO_AON_CTRL0, B_AX_PD_REGU_L);
+
+       rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN |
+                                                   B_AX_AFSM_PCIE_SUS_EN);
+       rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC);
+       rtw89_write32_set(rtwdev, R_AX_WLLPS_CTRL, B_AX_DIS_WLBT_LPSEN_LOPC);
+       rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN);
+       rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+       ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR,
+                               1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+       if (ret)
+               return ret;
+
+       rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+       rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC);
+
+       ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFN_ONMAC),
+                               1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+       if (ret)
+               return ret;
+
+       rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+       rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+       rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+       rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+
+       rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+       rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
+
+       rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, B_AX_CMAC1_FEN);
+       rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, B_AX_R_SYM_ISO_CMAC12PP);
+       rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, B_AX_R_SYM_WLCMAC1_P4_PC_EN |
+                                                 B_AX_R_SYM_WLCMAC1_P3_PC_EN |
+                                                 B_AX_R_SYM_WLCMAC1_P2_PC_EN |
+                                                 B_AX_R_SYM_WLCMAC1_P1_PC_EN |
+                                                 B_AX_R_SYM_WLCMAC1_PC_EN);
+       rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+                                     XTAL_SI_GND_SHDN_WL, XTAL_SI_GND_SHDN_WL);
+       if (ret)
+               return ret;
+
+       rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+                                     XTAL_SI_SHDN_WL, XTAL_SI_SHDN_WL);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_WEI,
+                                     XTAL_SI_OFF_WEI);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_EI,
+                                     XTAL_SI_OFF_EI);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_RFC2RF);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_WEI,
+                                     XTAL_SI_PON_WEI);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_EI,
+                                     XTAL_SI_PON_EI);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP);
+       if (ret)
+               return ret;
+
+       rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+       rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+       rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+       fsleep(1000);
+
+       rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+       rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+       rtw89_write32_set(rtwdev, R_AX_GPIO0_15_EECS_EESK_LED1_PULL_LOW_EN,
+                         B_AX_EECS_PULL_LOW_EN | B_AX_EESK_PULL_LOW_EN |
+                         B_AX_LED1_PULL_LOW_EN);
+
+       rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN,
+                         B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MPDU_PROC_EN |
+                         B_AX_WD_RLS_EN | B_AX_DLE_WDE_EN | B_AX_TXPKT_CTRL_EN |
+                         B_AX_STA_SCH_EN | B_AX_DLE_PLE_EN | B_AX_PKT_BUF_EN |
+                         B_AX_DMAC_TBL_EN | B_AX_PKT_IN_EN | B_AX_DLE_CPUIO_EN |
+                         B_AX_DISPATCHER_EN | B_AX_BBRPT_EN | B_AX_MAC_SEC_EN |
+                         B_AX_MAC_UN_EN | B_AX_H_AXIDMA_EN);
+
+       rtw89_write32_set(rtwdev, R_AX_CMAC_FUNC_EN,
+                         B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
+                         B_AX_FORCE_CMACREG_GCKEN | B_AX_PHYINTF_EN |
+                         B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | B_AX_SCHEDULER_EN |
+                         B_AX_TMAC_EN | B_AX_RMAC_EN);
+
+       return 0;
+}
+
+static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev)
+{
+       u32 val32;
+       u32 ret;
+
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF,
+                                     XTAL_SI_RFC2RF);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_EI);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_WEI);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0, XTAL_SI_RF00);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0, XTAL_SI_RF10);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_SRAM2RFC,
+                                     XTAL_SI_SRAM2RFC);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_EI);
+       if (ret)
+               return ret;
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_WEI);
+       if (ret)
+               return ret;
+
+       rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+       rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
+       rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
+                         B_AX_R_SYM_FEN_WLBBGLB_1 | B_AX_R_SYM_FEN_WLBBFUN_1);
+       rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SHDN_WL);
+       if (ret)
+               return ret;
+
+       rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+       ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_GND_SHDN_WL);
+       if (ret)
+               return ret;
+
+       rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_OFFMAC);
+
+       ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFM_OFFMAC),
+                               1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+       if (ret)
+               return ret;
+
+       rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, 0x0001A0B0);
+       rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_XTAL_OFF_A_DIE);
+       rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+       return 0;
+}
+
+static void rtw8852c_e_efuse_parsing(struct rtw89_efuse *efuse,
+                                    struct rtw8852c_efuse *map)
+{
+       ether_addr_copy(efuse->addr, map->e.mac_addr);
+       efuse->rfe_type = map->rfe_type;
+       efuse->xtal_cap = map->xtal_k;
+}
+
+static void rtw8852c_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
+                                       struct rtw8852c_efuse *map)
+{
+       struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+       struct rtw8852c_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
+       u8 *bw40_1s_tssi_6g_ofst[] = {map->bw40_1s_tssi_6g_a, map->bw40_1s_tssi_6g_b};
+       u8 i, j;
+
+       tssi->thermal[RF_PATH_A] = map->path_a_therm;
+       tssi->thermal[RF_PATH_B] = map->path_b_therm;
+
+       for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+               memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
+                      sizeof(ofst[i]->cck_tssi));
+
+               for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
+                       rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+                                   "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
+                                   i, j, tssi->tssi_cck[i][j]);
+
+               memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
+                      sizeof(ofst[i]->bw40_tssi));
+               memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
+                      ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
+               memcpy(tssi->tssi_6g_mcs[i], bw40_1s_tssi_6g_ofst[i],
+                      sizeof(tssi->tssi_6g_mcs[i]));
+
+               for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
+                       rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+                                   "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
+                                   i, j, tssi->tssi_mcs[i][j]);
+       }
+}
+
+static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
+{
+       struct rtw89_efuse *efuse = &rtwdev->efuse;
+       struct rtw8852c_efuse *map;
+
+       map = (struct rtw8852c_efuse *)log_map;
+
+       efuse->country_code[0] = map->country_code[0];
+       efuse->country_code[1] = map->country_code[1];
+       rtw8852c_efuse_parsing_tssi(rtwdev, map);
+
+       switch (rtwdev->hci.type) {
+       case RTW89_HCI_TYPE_PCIE:
+               rtw8852c_e_efuse_parsing(efuse, map);
+               break;
+       default:
+               return -ENOTSUPP;
+       }
+
+       rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
+
+       return 0;
+}
+
+static void rtw8852c_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+       struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+       static const u32 tssi_trim_addr[RF_PATH_NUM_8852C] = {0x5D6, 0x5AB};
+       static const u32 tssi_trim_addr_6g[RF_PATH_NUM_8852C] = {0x5CE, 0x5A3};
+       u32 addr = rtwdev->chip->phycap_addr;
+       bool pg = false;
+       u32 ofst;
+       u8 i, j;
+
+       for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+               for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) {
+                       /* addrs are in decreasing order */
+                       ofst = tssi_trim_addr[i] - addr - j;
+                       tssi->tssi_trim[i][j] = phycap_map[ofst];
+
+                       if (phycap_map[ofst] != 0xff)
+                               pg = true;
+               }
+
+               for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM_6G; j++) {
+                       /* addrs are in decreasing order */
+                       ofst = tssi_trim_addr_6g[i] - addr - j;
+                       tssi->tssi_trim_6g[i][j] = phycap_map[ofst];
+
+                       if (phycap_map[ofst] != 0xff)
+                               pg = true;
+               }
+       }
+
+       if (!pg) {
+               memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim));
+               memset(tssi->tssi_trim_6g, 0, sizeof(tssi->tssi_trim_6g));
+               rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+                           "[TSSI][TRIM] no PG, set all trim info to 0\n");
+       }
+
+       for (i = 0; i < RF_PATH_NUM_8852C; i++)
+               for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++)
+                       rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+                                   "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n",
+                                   i, j, tssi->tssi_trim[i][j],
+                                   tssi_trim_addr[i] - j);
+}
+
+static void rtw8852c_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
+                                                u8 *phycap_map)
+{
+       struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+       static const u32 thm_trim_addr[RF_PATH_NUM_8852C] = {0x5DF, 0x5DC};
+       u32 addr = rtwdev->chip->phycap_addr;
+       u8 i;
+
+       for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+               info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr];
+
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n",
+                           i, info->thermal_trim[i]);
+
+               if (info->thermal_trim[i] != 0xff)
+                       info->pg_thermal_trim = true;
+       }
+}
+
+static void rtw8852c_thermal_trim(struct rtw89_dev *rtwdev)
+{
+#define __thm_setting(raw)                             \
+({                                                     \
+       u8 __v = (raw);                                 \
+       ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1);       \
+})
+       struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+       u8 i, val;
+
+       if (!info->pg_thermal_trim) {
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[THERMAL][TRIM] no PG, do nothing\n");
+
+               return;
+       }
+
+       for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+               val = __thm_setting(info->thermal_trim[i]);
+               rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val);
+
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n",
+                           i, val);
+       }
+#undef __thm_setting
+}
+
+static void rtw8852c_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
+                                                u8 *phycap_map)
+{
+       struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+       static const u32 pabias_trim_addr[RF_PATH_NUM_8852C] = {0x5DE, 0x5DB};
+       u32 addr = rtwdev->chip->phycap_addr;
+       u8 i;
+
+       for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+               info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
+
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
+                           i, info->pa_bias_trim[i]);
+
+               if (info->pa_bias_trim[i] != 0xff)
+                       info->pg_pa_bias_trim = true;
+       }
+}
+
+static void rtw8852c_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+       struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+       u8 pabias_2g, pabias_5g;
+       u8 i;
+
+       if (!info->pg_pa_bias_trim) {
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+               return;
+       }
+
+       for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+               pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+               pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+                           i, pabias_2g, pabias_5g);
+
+               rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g);
+               rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g);
+       }
+}
+
+static int rtw8852c_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+       rtw8852c_phycap_parsing_tssi(rtwdev, phycap_map);
+       rtw8852c_phycap_parsing_thermal_trim(rtwdev, phycap_map);
+       rtw8852c_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
+
+       return 0;
+}
+
+static void rtw8852c_power_trim(struct rtw89_dev *rtwdev)
+{
+       rtw8852c_thermal_trim(rtwdev);
+       rtw8852c_pa_bias_trim(rtwdev);
+}
+
+static
+void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+                                    s8 pw_ofst, enum rtw89_mac_idx mac_idx)
+{
+       s8 pw_ofst_2tx;
+       s8 val_1t;
+       s8 val_2t;
+       u32 reg;
+       u8 i;
+
+       if (pw_ofst < -32 || pw_ofst > 31) {
+               rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst);
+               return;
+       }
+       val_1t = pw_ofst << 2;
+       pw_ofst_2tx = max(pw_ofst - 3, -32);
+       val_2t = pw_ofst_2tx << 2;
+
+       rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] val_1tx=0x%x\n", val_1t);
+       rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] val_2tx=0x%x\n", val_2t);
+
+       for (i = 0; i < 4; i++) {
+               /* 1TX */
+               reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx);
+               rtw89_write32_mask(rtwdev, reg,
+                                  B_AX_PWR_UL_TB_1T_V1_MASK << (8 * i),
+                                  val_1t);
+               /* 2TX */
+               reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx);
+               rtw89_write32_mask(rtwdev, reg,
+                                  B_AX_PWR_UL_TB_2T_V1_MASK << (8 * i),
+                                  val_2t);
+       }
+}
+
+static const struct rtw89_chip_ops rtw8852c_chip_ops = {
+       .read_efuse             = rtw8852c_read_efuse,
+       .read_phycap            = rtw8852c_read_phycap,
+       .power_trim             = rtw8852c_power_trim,
+       .read_rf                = rtw89_phy_read_rf_v1,
+       .write_rf               = rtw89_phy_write_rf_v1,
+       .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
+       .pwr_on_func            = rtw8852c_pwr_on_func,
+       .pwr_off_func           = rtw8852c_pwr_off_func,
+       .cfg_ctrl_path          = rtw89_mac_cfg_ctrl_path_v1,
+       .mac_cfg_gnt            = rtw89_mac_cfg_gnt_v1,
+       .stop_sch_tx            = rtw89_mac_stop_sch_tx_v1,
+       .resume_sch_tx          = rtw89_mac_resume_sch_tx_v1,
+};
+
+const struct rtw89_chip_info rtw8852c_chip_info = {
+       .chip_id                = RTL8852C,
+       .ops                    = &rtw8852c_chip_ops,
+       .fw_name                = "rtw89/rtw8852c_fw.bin",
+       .dle_mem                = rtw8852c_dle_mem_pcie,
+       .rf_base_addr           = {0xe000, 0xf000},
+       .pwr_on_seq             = NULL,
+       .pwr_off_seq            = NULL,
+       .sec_ctrl_efuse_size    = 4,
+       .physical_efuse_size    = 1216,
+       .logical_efuse_size     = 2048,
+       .limit_efuse_size       = 1280,
+       .dav_phy_efuse_size     = 96,
+       .dav_log_efuse_size     = 16,
+       .phycap_addr            = 0x590,
+       .phycap_size            = 0x60,
+       .hci_func_en_addr       = R_AX_HCI_FUNC_EN_V1,
+       .h2c_ctrl_reg           = R_AX_H2CREG_CTRL_V1,
+       .h2c_regs               = rtw8852c_h2c_regs,
+       .c2h_ctrl_reg           = R_AX_C2HREG_CTRL_V1,
+       .c2h_regs               = rtw8852c_c2h_regs,
+       .page_regs              = &rtw8852c_page_regs,
+       .dcfo_comp              = &rtw8852c_dcfo_comp,
+       .dcfo_comp_sft          = 5,
+};
+EXPORT_SYMBOL(rtw8852c_chip_info);
+
+MODULE_FIRMWARE("rtw89/rtw8852c_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852C driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.h b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
new file mode 100644 (file)
index 0000000..d059471
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022  Realtek Corporation
+ */
+
+#ifndef __RTW89_8852C_H__
+#define __RTW89_8852C_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8852C 2
+
+struct rtw8852c_u_efuse {
+       u8 rsvd[0x38];
+       u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852c_e_efuse {
+       u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852c_tssi_offset {
+       u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
+       u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
+       u8 rsvd[7];
+       u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
+} __packed;
+
+struct rtw8852c_efuse {
+       u8 rsvd[0x210];
+       struct rtw8852c_tssi_offset path_a_tssi;
+       u8 rsvd1[10];
+       struct rtw8852c_tssi_offset path_b_tssi;
+       u8 rsvd2[94];
+       u8 channel_plan;
+       u8 xtal_k;
+       u8 rsvd3;
+       u8 iqk_lck;
+       u8 rsvd4[5];
+       u8 reg_setting:2;
+       u8 tx_diversity:1;
+       u8 rx_diversity:2;
+       u8 ac_mode:1;
+       u8 module_type:2;
+       u8 rsvd5;
+       u8 shared_ant:1;
+       u8 coex_type:3;
+       u8 ant_iso:1;
+       u8 radio_on_off:1;
+       u8 rsvd6:2;
+       u8 eeprom_version;
+       u8 customer_id;
+       u8 tx_bb_swing_2g;
+       u8 tx_bb_swing_5g;
+       u8 tx_cali_pwr_trk_mode;
+       u8 trx_path_selection;
+       u8 rfe_type;
+       u8 country_code[2];
+       u8 rsvd7[3];
+       u8 path_a_therm;
+       u8 path_b_therm;
+       u8 rsvd8[46];
+       u8 bw40_1s_tssi_6g_a[TSSI_MCS_6G_CH_GROUP_NUM];
+       u8 rsvd9[10];
+       u8 bw40_1s_tssi_6g_b[TSSI_MCS_6G_CH_GROUP_NUM];
+       u8 rsvd10[110];
+       u8 channel_plan_6g;
+       u8 rsvd11[71];
+       union {
+               struct rtw8852c_u_efuse u;
+               struct rtw8852c_e_efuse e;
+       };
+} __packed;
+
+extern const struct rtw89_chip_info rtw8852c_chip_info;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
new file mode 100644 (file)
index 0000000..e713705
--- /dev/null
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2022  Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+#include "rtw8852c.h"
+
+static const struct rtw89_pci_info rtw8852c_pci_info = {
+       .dma_addr_set           = &rtw89_pci_ch_dma_addr_set_v1,
+};
+
+static const struct rtw89_driver_info rtw89_8852ce_info = {
+       .chip = &rtw8852c_chip_info,
+       .bus = {
+               .pci = &rtw8852c_pci_info,
+       },
+};
+
+static const struct pci_device_id rtw89_8852ce_id_table[] = {
+       {
+               PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xc852),
+               .driver_data = (kernel_ulong_t)&rtw89_8852ce_info,
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852ce_id_table);
+
+static struct pci_driver rtw89_8852ce_driver = {
+       .name           = "rtw89_8852ce",
+       .id_table       = rtw89_8852ce_id_table,
+       .probe          = rtw89_pci_probe,
+       .remove         = rtw89_pci_remove,
+       .driver.pm      = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8852ce_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852CE driver");
+MODULE_LICENSE("Dual BSD/GPL");
index 12952b1..e06da4b 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <net/mac80211.h>
 #include <linux/sched.h>
+#include <linux/jiffies.h>
 #include "queue.h"
 #include "cw1200.h"
 #include "debug.h"
@@ -94,7 +95,7 @@ static void __cw1200_queue_gc(struct cw1200_queue *queue,
        bool wakeup_stats = false;
 
        list_for_each_entry_safe(item, tmp, &queue->queue, head) {
-               if (jiffies - item->queue_timestamp < queue->ttl)
+               if (time_is_after_jiffies(item->queue_timestamp + queue->ttl))
                        break;
                --queue->num_queued;
                --queue->link_map_cache[item->txpriv.link_id];
index e64e4e5..82bc0d4 100644 (file)
@@ -521,7 +521,7 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len
        zd->rxdatas = 0;
        zd->rxlen = 0;
        for (seq=0; len > 0; seq++) {
-               request = kmalloc(16, gfp_mask);
+               request = kzalloc(16, gfp_mask);
                if (!request)
                        return -ENOMEM;
                urb = usb_alloc_urb(0, gfp_mask);
@@ -529,7 +529,6 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len
                        kfree(request);
                        return -ENOMEM;
                }
-               memset(request, 0, 16);
                reqlen = len>12 ? 12 : len;
                request[0] = ZD1201_USB_RESREQ;
                request[1] = seq;
index 7748f07..daa4e61 100644 (file)
@@ -424,14 +424,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue)
                        queue->tx_link[id] = TX_LINK_NONE;
                        skb = queue->tx_skbs[id];
                        queue->tx_skbs[id] = NULL;
-                       if (unlikely(gnttab_query_foreign_access(
-                               queue->grant_tx_ref[id]) != 0)) {
+                       if (unlikely(!gnttab_end_foreign_access_ref(
+                               queue->grant_tx_ref[id], GNTMAP_readonly))) {
                                dev_alert(dev,
                                          "Grant still in use by backend domain\n");
                                goto err;
                        }
-                       gnttab_end_foreign_access_ref(
-                               queue->grant_tx_ref[id], GNTMAP_readonly);
                        gnttab_release_grant_reference(
                                &queue->gref_tx_head, queue->grant_tx_ref[id]);
                        queue->grant_tx_ref[id] = GRANT_INVALID_REF;
@@ -990,7 +988,6 @@ static int xennet_get_responses(struct netfront_queue *queue,
        struct device *dev = &queue->info->netdev->dev;
        struct bpf_prog *xdp_prog;
        struct xdp_buff xdp;
-       unsigned long ret;
        int slots = 1;
        int err = 0;
        u32 verdict;
@@ -1032,8 +1029,13 @@ static int xennet_get_responses(struct netfront_queue *queue,
                        goto next;
                }
 
-               ret = gnttab_end_foreign_access_ref(ref, 0);
-               BUG_ON(!ret);
+               if (!gnttab_end_foreign_access_ref(ref, 0)) {
+                       dev_alert(dev,
+                                 "Grant still in use by backend domain\n");
+                       queue->info->broken = true;
+                       dev_alert(dev, "Disabled for further use\n");
+                       return -EINVAL;
+               }
 
                gnttab_release_grant_reference(&queue->gref_rx_head, ref);
 
@@ -1254,6 +1256,10 @@ static int xennet_poll(struct napi_struct *napi, int budget)
                                           &need_xdp_flush);
 
                if (unlikely(err)) {
+                       if (queue->info->broken) {
+                               spin_unlock(&queue->rx_lock);
+                               return 0;
+                       }
 err:
                        while ((skb = __skb_dequeue(&tmpq)))
                                __skb_queue_tail(&errq, skb);
@@ -1918,7 +1924,7 @@ static int setup_netfront(struct xenbus_device *dev,
                        struct netfront_queue *queue, unsigned int feature_split_evtchn)
 {
        struct xen_netif_tx_sring *txs;
-       struct xen_netif_rx_sring *rxs;
+       struct xen_netif_rx_sring *rxs = NULL;
        grant_ref_t gref;
        int err;
 
@@ -1938,21 +1944,21 @@ static int setup_netfront(struct xenbus_device *dev,
 
        err = xenbus_grant_ring(dev, txs, 1, &gref);
        if (err < 0)
-               goto grant_tx_ring_fail;
+               goto fail;
        queue->tx_ring_ref = gref;
 
        rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
        if (!rxs) {
                err = -ENOMEM;
                xenbus_dev_fatal(dev, err, "allocating rx ring page");
-               goto alloc_rx_ring_fail;
+               goto fail;
        }
        SHARED_RING_INIT(rxs);
        FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
 
        err = xenbus_grant_ring(dev, rxs, 1, &gref);
        if (err < 0)
-               goto grant_rx_ring_fail;
+               goto fail;
        queue->rx_ring_ref = gref;
 
        if (feature_split_evtchn)
@@ -1965,22 +1971,28 @@ static int setup_netfront(struct xenbus_device *dev,
                err = setup_netfront_single(queue);
 
        if (err)
-               goto alloc_evtchn_fail;
+               goto fail;
 
        return 0;
 
        /* If we fail to setup netfront, it is safe to just revoke access to
         * granted pages because backend is not accessing it at this point.
         */
-alloc_evtchn_fail:
-       gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
-grant_rx_ring_fail:
-       free_page((unsigned long)rxs);
-alloc_rx_ring_fail:
-       gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
-grant_tx_ring_fail:
-       free_page((unsigned long)txs);
-fail:
+ fail:
+       if (queue->rx_ring_ref != GRANT_INVALID_REF) {
+               gnttab_end_foreign_access(queue->rx_ring_ref, 0,
+                                         (unsigned long)rxs);
+               queue->rx_ring_ref = GRANT_INVALID_REF;
+       } else {
+               free_page((unsigned long)rxs);
+       }
+       if (queue->tx_ring_ref != GRANT_INVALID_REF) {
+               gnttab_end_foreign_access(queue->tx_ring_ref, 0,
+                                         (unsigned long)txs);
+               queue->tx_ring_ref = GRANT_INVALID_REF;
+       } else {
+               free_page((unsigned long)txs);
+       }
        return err;
 }
 
index d7db1a0..00d8ea6 100644 (file)
@@ -1612,7 +1612,9 @@ free_nfc_dev:
        nfc_digital_free_device(dev->nfc_digital_dev);
 
 error:
+       usb_kill_urb(dev->in_urb);
        usb_free_urb(dev->in_urb);
+       usb_kill_urb(dev->out_urb);
        usb_free_urb(dev->out_urb);
        usb_put_dev(dev->udev);
 
index a86b5ed..42dc0e5 100644 (file)
@@ -570,8 +570,7 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
 
        if (phy->powered)
                st21nfca_hci_i2c_disable(phy);
-       if (phy->pending_skb)
-               kfree_skb(phy->pending_skb);
+       kfree_skb(phy->pending_skb);
 
        return 0;
 }
index c3669c2..0e91cd9 100644 (file)
@@ -22,3 +22,13 @@ config PHY_FSL_IMX8M_PCIE
        help
          Enable this to add support for the PCIE PHY as found on
          i.MX8M family of SOCs.
+
+config PHY_FSL_LYNX_28G
+       tristate "Freescale Layerscape Lynx 28G SerDes PHY support"
+       depends on OF
+       select GENERIC_PHY
+       help
+         Enable this to add support for the Lynx SerDes 28G PHY as
+         found on NXP's Layerscape platforms such as LX2160A.
+         Used to change the protocol running on SerDes lanes at runtime.
+         Only useful for a restricted set of Ethernet protocols.
index 55d07c7..3518d5d 100644 (file)
@@ -2,3 +2,4 @@
 obj-$(CONFIG_PHY_FSL_IMX8MQ_USB)       += phy-fsl-imx8mq-usb.o
 obj-$(CONFIG_PHY_MIXEL_MIPI_DPHY)      += phy-fsl-imx8-mipi-dphy.o
 obj-$(CONFIG_PHY_FSL_IMX8M_PCIE)       += phy-fsl-imx8m-pcie.o
+obj-$(CONFIG_PHY_FSL_LYNX_28G)         += phy-fsl-lynx-28g.o
diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c
new file mode 100644 (file)
index 0000000..569f12a
--- /dev/null
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021-2022 NXP. */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#define LYNX_28G_NUM_LANE                      8
+#define LYNX_28G_NUM_PLL                       2
+
+/* General registers per SerDes block */
+#define LYNX_28G_PCC8                          0x10a0
+#define LYNX_28G_PCC8_SGMII                    0x1
+#define LYNX_28G_PCC8_SGMII_DIS                        0x0
+
+#define LYNX_28G_PCCC                          0x10b0
+#define LYNX_28G_PCCC_10GBASER                 0x9
+#define LYNX_28G_PCCC_USXGMII                  0x1
+#define LYNX_28G_PCCC_SXGMII_DIS               0x0
+
+#define LYNX_28G_LNa_PCC_OFFSET(lane)          (4 * (LYNX_28G_NUM_LANE - (lane->id) - 1))
+
+/* Per PLL registers */
+#define LYNX_28G_PLLnRSTCTL(pll)               (0x400 + (pll) * 0x100 + 0x0)
+#define LYNX_28G_PLLnRSTCTL_DIS(rstctl)                (((rstctl) & BIT(24)) >> 24)
+#define LYNX_28G_PLLnRSTCTL_LOCK(rstctl)       (((rstctl) & BIT(23)) >> 23)
+
+#define LYNX_28G_PLLnCR0(pll)                  (0x400 + (pll) * 0x100 + 0x4)
+#define LYNX_28G_PLLnCR0_REFCLK_SEL(cr0)       (((cr0) & GENMASK(20, 16)))
+#define LYNX_28G_PLLnCR0_REFCLK_SEL_100MHZ     0x0
+#define LYNX_28G_PLLnCR0_REFCLK_SEL_125MHZ     0x10000
+#define LYNX_28G_PLLnCR0_REFCLK_SEL_156MHZ     0x20000
+#define LYNX_28G_PLLnCR0_REFCLK_SEL_150MHZ     0x30000
+#define LYNX_28G_PLLnCR0_REFCLK_SEL_161MHZ     0x40000
+
+#define LYNX_28G_PLLnCR1(pll)                  (0x400 + (pll) * 0x100 + 0x8)
+#define LYNX_28G_PLLnCR1_FRATE_SEL(cr1)                (((cr1) & GENMASK(28, 24)))
+#define LYNX_28G_PLLnCR1_FRATE_5G_10GVCO       0x0
+#define LYNX_28G_PLLnCR1_FRATE_5G_25GVCO       0x10000000
+#define LYNX_28G_PLLnCR1_FRATE_10G_20GVCO      0x6000000
+
+/* Per SerDes lane registers */
+/* Lane a General Control Register */
+#define LYNX_28G_LNaGCR0(lane)                 (0x800 + (lane) * 0x100 + 0x0)
+#define LYNX_28G_LNaGCR0_PROTO_SEL_MSK         GENMASK(7, 3)
+#define LYNX_28G_LNaGCR0_PROTO_SEL_SGMII       0x8
+#define LYNX_28G_LNaGCR0_PROTO_SEL_XFI         0x50
+#define LYNX_28G_LNaGCR0_IF_WIDTH_MSK          GENMASK(2, 0)
+#define LYNX_28G_LNaGCR0_IF_WIDTH_10_BIT       0x0
+#define LYNX_28G_LNaGCR0_IF_WIDTH_20_BIT       0x2
+
+/* Lane a Tx Reset Control Register */
+#define LYNX_28G_LNaTRSTCTL(lane)              (0x800 + (lane) * 0x100 + 0x20)
+#define LYNX_28G_LNaTRSTCTL_HLT_REQ            BIT(27)
+#define LYNX_28G_LNaTRSTCTL_RST_DONE           BIT(30)
+#define LYNX_28G_LNaTRSTCTL_RST_REQ            BIT(31)
+
+/* Lane a Tx General Control Register */
+#define LYNX_28G_LNaTGCR0(lane)                        (0x800 + (lane) * 0x100 + 0x24)
+#define LYNX_28G_LNaTGCR0_USE_PLLF             0x0
+#define LYNX_28G_LNaTGCR0_USE_PLLS             BIT(28)
+#define LYNX_28G_LNaTGCR0_USE_PLL_MSK          BIT(28)
+#define LYNX_28G_LNaTGCR0_N_RATE_FULL          0x0
+#define LYNX_28G_LNaTGCR0_N_RATE_HALF          0x1000000
+#define LYNX_28G_LNaTGCR0_N_RATE_QUARTER       0x2000000
+#define LYNX_28G_LNaTGCR0_N_RATE_MSK           GENMASK(26, 24)
+
+#define LYNX_28G_LNaTECR0(lane)                        (0x800 + (lane) * 0x100 + 0x30)
+
+/* Lane a Rx Reset Control Register */
+#define LYNX_28G_LNaRRSTCTL(lane)              (0x800 + (lane) * 0x100 + 0x40)
+#define LYNX_28G_LNaRRSTCTL_HLT_REQ            BIT(27)
+#define LYNX_28G_LNaRRSTCTL_RST_DONE           BIT(30)
+#define LYNX_28G_LNaRRSTCTL_RST_REQ            BIT(31)
+#define LYNX_28G_LNaRRSTCTL_CDR_LOCK           BIT(12)
+
+/* Lane a Rx General Control Register */
+#define LYNX_28G_LNaRGCR0(lane)                        (0x800 + (lane) * 0x100 + 0x44)
+#define LYNX_28G_LNaRGCR0_USE_PLLF             0x0
+#define LYNX_28G_LNaRGCR0_USE_PLLS             BIT(28)
+#define LYNX_28G_LNaRGCR0_USE_PLL_MSK          BIT(28)
+#define LYNX_28G_LNaRGCR0_N_RATE_MSK           GENMASK(26, 24)
+#define LYNX_28G_LNaRGCR0_N_RATE_FULL          0x0
+#define LYNX_28G_LNaRGCR0_N_RATE_HALF          0x1000000
+#define LYNX_28G_LNaRGCR0_N_RATE_QUARTER       0x2000000
+#define LYNX_28G_LNaRGCR0_N_RATE_MSK           GENMASK(26, 24)
+
+#define LYNX_28G_LNaRGCR1(lane)                        (0x800 + (lane) * 0x100 + 0x48)
+
+#define LYNX_28G_LNaRECR0(lane)                        (0x800 + (lane) * 0x100 + 0x50)
+#define LYNX_28G_LNaRECR1(lane)                        (0x800 + (lane) * 0x100 + 0x54)
+#define LYNX_28G_LNaRECR2(lane)                        (0x800 + (lane) * 0x100 + 0x58)
+
+#define LYNX_28G_LNaRSCCR0(lane)               (0x800 + (lane) * 0x100 + 0x74)
+
+#define LYNX_28G_LNaPSS(lane)                  (0x1000 + (lane) * 0x4)
+#define LYNX_28G_LNaPSS_TYPE(pss)              (((pss) & GENMASK(30, 24)) >> 24)
+#define LYNX_28G_LNaPSS_TYPE_SGMII             0x4
+#define LYNX_28G_LNaPSS_TYPE_XFI               0x28
+
+#define LYNX_28G_SGMIIaCR1(lane)               (0x1804 + (lane) * 0x10)
+#define LYNX_28G_SGMIIaCR1_SGPCS_EN            BIT(11)
+#define LYNX_28G_SGMIIaCR1_SGPCS_DIS           0x0
+#define LYNX_28G_SGMIIaCR1_SGPCS_MSK           BIT(11)
+
+struct lynx_28g_priv;
+
+struct lynx_28g_pll {
+       struct lynx_28g_priv *priv;
+       u32 rstctl, cr0, cr1;
+       int id;
+       DECLARE_PHY_INTERFACE_MASK(supported);
+};
+
+struct lynx_28g_lane {
+       struct lynx_28g_priv *priv;
+       struct phy *phy;
+       bool powered_up;
+       bool init;
+       unsigned int id;
+       phy_interface_t interface;
+};
+
+struct lynx_28g_priv {
+       void __iomem *base;
+       struct device *dev;
+       struct lynx_28g_pll pll[LYNX_28G_NUM_PLL];
+       struct lynx_28g_lane lane[LYNX_28G_NUM_LANE];
+
+       struct delayed_work cdr_check;
+};
+
+static void lynx_28g_rmw(struct lynx_28g_priv *priv, unsigned long off,
+                        u32 val, u32 mask)
+{
+       void __iomem *reg = priv->base + off;
+       u32 orig, tmp;
+
+       orig = ioread32(reg);
+       tmp = orig & ~mask;
+       tmp |= val;
+       iowrite32(tmp, reg);
+}
+
+#define lynx_28g_lane_rmw(lane, reg, val, mask)        \
+       lynx_28g_rmw((lane)->priv, LYNX_28G_##reg(lane->id), \
+                    LYNX_28G_##reg##_##val, LYNX_28G_##reg##_##mask)
+#define lynx_28g_lane_read(lane, reg)                  \
+       ioread32((lane)->priv->base + LYNX_28G_##reg((lane)->id))
+#define lynx_28g_pll_read(pll, reg)                    \
+       ioread32((pll)->priv->base + LYNX_28G_##reg((pll)->id))
+
+static bool lynx_28g_supports_interface(struct lynx_28g_priv *priv, int intf)
+{
+       int i;
+
+       for (i = 0; i < LYNX_28G_NUM_PLL; i++) {
+               if (LYNX_28G_PLLnRSTCTL_DIS(priv->pll[i].rstctl))
+                       continue;
+
+               if (test_bit(intf, priv->pll[i].supported))
+                       return true;
+       }
+
+       return false;
+}
+
+static struct lynx_28g_pll *lynx_28g_pll_get(struct lynx_28g_priv *priv,
+                                            phy_interface_t intf)
+{
+       struct lynx_28g_pll *pll;
+       int i;
+
+       for (i = 0; i < LYNX_28G_NUM_PLL; i++) {
+               pll = &priv->pll[i];
+
+               if (LYNX_28G_PLLnRSTCTL_DIS(pll->rstctl))
+                       continue;
+
+               if (test_bit(intf, pll->supported))
+                       return pll;
+       }
+
+       return NULL;
+}
+
+static void lynx_28g_lane_set_nrate(struct lynx_28g_lane *lane,
+                                   struct lynx_28g_pll *pll,
+                                   phy_interface_t intf)
+{
+       switch (LYNX_28G_PLLnCR1_FRATE_SEL(pll->cr1)) {
+       case LYNX_28G_PLLnCR1_FRATE_5G_10GVCO:
+       case LYNX_28G_PLLnCR1_FRATE_5G_25GVCO:
+               switch (intf) {
+               case PHY_INTERFACE_MODE_SGMII:
+               case PHY_INTERFACE_MODE_1000BASEX:
+                       lynx_28g_lane_rmw(lane, LNaTGCR0, N_RATE_QUARTER, N_RATE_MSK);
+                       lynx_28g_lane_rmw(lane, LNaRGCR0, N_RATE_QUARTER, N_RATE_MSK);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case LYNX_28G_PLLnCR1_FRATE_10G_20GVCO:
+               switch (intf) {
+               case PHY_INTERFACE_MODE_10GBASER:
+               case PHY_INTERFACE_MODE_USXGMII:
+                       lynx_28g_lane_rmw(lane, LNaTGCR0, N_RATE_FULL, N_RATE_MSK);
+                       lynx_28g_lane_rmw(lane, LNaRGCR0, N_RATE_FULL, N_RATE_MSK);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+static void lynx_28g_lane_set_pll(struct lynx_28g_lane *lane,
+                                 struct lynx_28g_pll *pll)
+{
+       if (pll->id == 0) {
+               lynx_28g_lane_rmw(lane, LNaTGCR0, USE_PLLF, USE_PLL_MSK);
+               lynx_28g_lane_rmw(lane, LNaRGCR0, USE_PLLF, USE_PLL_MSK);
+       } else {
+               lynx_28g_lane_rmw(lane, LNaTGCR0, USE_PLLS, USE_PLL_MSK);
+               lynx_28g_lane_rmw(lane, LNaRGCR0, USE_PLLS, USE_PLL_MSK);
+       }
+}
+
+static void lynx_28g_cleanup_lane(struct lynx_28g_lane *lane)
+{
+       u32 lane_offset = LYNX_28G_LNa_PCC_OFFSET(lane);
+       struct lynx_28g_priv *priv = lane->priv;
+
+       /* Cleanup the protocol configuration registers of the current protocol */
+       switch (lane->interface) {
+       case PHY_INTERFACE_MODE_10GBASER:
+               lynx_28g_rmw(priv, LYNX_28G_PCCC,
+                            LYNX_28G_PCCC_SXGMII_DIS << lane_offset,
+                            GENMASK(3, 0) << lane_offset);
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_1000BASEX:
+               lynx_28g_rmw(priv, LYNX_28G_PCC8,
+                            LYNX_28G_PCC8_SGMII_DIS << lane_offset,
+                            GENMASK(3, 0) << lane_offset);
+               break;
+       default:
+               break;
+       }
+}
+
+static void lynx_28g_lane_set_sgmii(struct lynx_28g_lane *lane)
+{
+       u32 lane_offset = LYNX_28G_LNa_PCC_OFFSET(lane);
+       struct lynx_28g_priv *priv = lane->priv;
+       struct lynx_28g_pll *pll;
+
+       lynx_28g_cleanup_lane(lane);
+
+       /* Setup the lane to run in SGMII */
+       lynx_28g_rmw(priv, LYNX_28G_PCC8,
+                    LYNX_28G_PCC8_SGMII << lane_offset,
+                    GENMASK(3, 0) << lane_offset);
+
+       /* Setup the protocol select and SerDes parallel interface width */
+       lynx_28g_lane_rmw(lane, LNaGCR0, PROTO_SEL_SGMII, PROTO_SEL_MSK);
+       lynx_28g_lane_rmw(lane, LNaGCR0, IF_WIDTH_10_BIT, IF_WIDTH_MSK);
+
+       /* Switch to the PLL that works with this interface type */
+       pll = lynx_28g_pll_get(priv, PHY_INTERFACE_MODE_SGMII);
+       lynx_28g_lane_set_pll(lane, pll);
+
+       /* Choose the portion of clock net to be used on this lane */
+       lynx_28g_lane_set_nrate(lane, pll, PHY_INTERFACE_MODE_SGMII);
+
+       /* Enable the SGMII PCS */
+       lynx_28g_lane_rmw(lane, SGMIIaCR1, SGPCS_EN, SGPCS_MSK);
+
+       /* Configure the appropriate equalization parameters for the protocol */
+       iowrite32(0x00808006, priv->base + LYNX_28G_LNaTECR0(lane->id));
+       iowrite32(0x04310000, priv->base + LYNX_28G_LNaRGCR1(lane->id));
+       iowrite32(0x9f800000, priv->base + LYNX_28G_LNaRECR0(lane->id));
+       iowrite32(0x001f0000, priv->base + LYNX_28G_LNaRECR1(lane->id));
+       iowrite32(0x00000000, priv->base + LYNX_28G_LNaRECR2(lane->id));
+       iowrite32(0x00000000, priv->base + LYNX_28G_LNaRSCCR0(lane->id));
+}
+
+static void lynx_28g_lane_set_10gbaser(struct lynx_28g_lane *lane)
+{
+       u32 lane_offset = LYNX_28G_LNa_PCC_OFFSET(lane);
+       struct lynx_28g_priv *priv = lane->priv;
+       struct lynx_28g_pll *pll;
+
+       lynx_28g_cleanup_lane(lane);
+
+       /* Enable the SXGMII lane */
+       lynx_28g_rmw(priv, LYNX_28G_PCCC,
+                    LYNX_28G_PCCC_10GBASER << lane_offset,
+                    GENMASK(3, 0) << lane_offset);
+
+       /* Setup the protocol select and SerDes parallel interface width */
+       lynx_28g_lane_rmw(lane, LNaGCR0, PROTO_SEL_XFI, PROTO_SEL_MSK);
+       lynx_28g_lane_rmw(lane, LNaGCR0, IF_WIDTH_20_BIT, IF_WIDTH_MSK);
+
+       /* Switch to the PLL that works with this interface type */
+       pll = lynx_28g_pll_get(priv, PHY_INTERFACE_MODE_10GBASER);
+       lynx_28g_lane_set_pll(lane, pll);
+
+       /* Choose the portion of clock net to be used on this lane */
+       lynx_28g_lane_set_nrate(lane, pll, PHY_INTERFACE_MODE_10GBASER);
+
+       /* Disable the SGMII PCS */
+       lynx_28g_lane_rmw(lane, SGMIIaCR1, SGPCS_DIS, SGPCS_MSK);
+
+       /* Configure the appropriate equalization parameters for the protocol */
+       iowrite32(0x10808307, priv->base + LYNX_28G_LNaTECR0(lane->id));
+       iowrite32(0x10000000, priv->base + LYNX_28G_LNaRGCR1(lane->id));
+       iowrite32(0x00000000, priv->base + LYNX_28G_LNaRECR0(lane->id));
+       iowrite32(0x001f0000, priv->base + LYNX_28G_LNaRECR1(lane->id));
+       iowrite32(0x81000020, priv->base + LYNX_28G_LNaRECR2(lane->id));
+       iowrite32(0x00002000, priv->base + LYNX_28G_LNaRSCCR0(lane->id));
+}
+
+static int lynx_28g_power_off(struct phy *phy)
+{
+       struct lynx_28g_lane *lane = phy_get_drvdata(phy);
+       u32 trstctl, rrstctl;
+
+       if (!lane->powered_up)
+               return 0;
+
+       /* Issue a halt request */
+       lynx_28g_lane_rmw(lane, LNaTRSTCTL, HLT_REQ, HLT_REQ);
+       lynx_28g_lane_rmw(lane, LNaRRSTCTL, HLT_REQ, HLT_REQ);
+
+       /* Wait until the halting process is complete */
+       do {
+               trstctl = lynx_28g_lane_read(lane, LNaTRSTCTL);
+               rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+       } while ((trstctl & LYNX_28G_LNaTRSTCTL_HLT_REQ) ||
+                (rrstctl & LYNX_28G_LNaRRSTCTL_HLT_REQ));
+
+       lane->powered_up = false;
+
+       return 0;
+}
+
+static int lynx_28g_power_on(struct phy *phy)
+{
+       struct lynx_28g_lane *lane = phy_get_drvdata(phy);
+       u32 trstctl, rrstctl;
+
+       if (lane->powered_up)
+               return 0;
+
+       /* Issue a reset request on the lane */
+       lynx_28g_lane_rmw(lane, LNaTRSTCTL, RST_REQ, RST_REQ);
+       lynx_28g_lane_rmw(lane, LNaRRSTCTL, RST_REQ, RST_REQ);
+
+       /* Wait until the reset sequence is completed */
+       do {
+               trstctl = lynx_28g_lane_read(lane, LNaTRSTCTL);
+               rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+       } while (!(trstctl & LYNX_28G_LNaTRSTCTL_RST_DONE) ||
+                !(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
+
+       lane->powered_up = true;
+
+       return 0;
+}
+
+static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+       struct lynx_28g_lane *lane = phy_get_drvdata(phy);
+       struct lynx_28g_priv *priv = lane->priv;
+       int powered_up = lane->powered_up;
+       int err = 0;
+
+       if (mode != PHY_MODE_ETHERNET)
+               return -EOPNOTSUPP;
+
+       if (lane->interface == PHY_INTERFACE_MODE_NA)
+               return -EOPNOTSUPP;
+
+       if (!lynx_28g_supports_interface(priv, submode))
+               return -EOPNOTSUPP;
+
+       /* If the lane is powered up, put the lane into the halt state while
+        * the reconfiguration is being done.
+        */
+       if (powered_up)
+               lynx_28g_power_off(phy);
+
+       switch (submode) {
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_1000BASEX:
+               lynx_28g_lane_set_sgmii(lane);
+               break;
+       case PHY_INTERFACE_MODE_10GBASER:
+               lynx_28g_lane_set_10gbaser(lane);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       lane->interface = submode;
+
+out:
+       /* Power up the lane if necessary */
+       if (powered_up)
+               lynx_28g_power_on(phy);
+
+       return err;
+}
+
+static int lynx_28g_validate(struct phy *phy, enum phy_mode mode, int submode,
+                            union phy_configure_opts *opts __always_unused)
+{
+       struct lynx_28g_lane *lane = phy_get_drvdata(phy);
+       struct lynx_28g_priv *priv = lane->priv;
+
+       if (mode != PHY_MODE_ETHERNET)
+               return -EOPNOTSUPP;
+
+       if (!lynx_28g_supports_interface(priv, submode))
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
+static int lynx_28g_init(struct phy *phy)
+{
+       struct lynx_28g_lane *lane = phy_get_drvdata(phy);
+
+       /* Mark the fact that the lane was init */
+       lane->init = true;
+
+       /* SerDes lanes are powered on at boot time.  Any lane that is managed
+        * by this driver will get powered down at init time aka at dpaa2-eth
+        * probe time.
+        */
+       lane->powered_up = true;
+       lynx_28g_power_off(phy);
+
+       return 0;
+}
+
+static const struct phy_ops lynx_28g_ops = {
+       .init           = lynx_28g_init,
+       .power_on       = lynx_28g_power_on,
+       .power_off      = lynx_28g_power_off,
+       .set_mode       = lynx_28g_set_mode,
+       .validate       = lynx_28g_validate,
+       .owner          = THIS_MODULE,
+};
+
+static void lynx_28g_pll_read_configuration(struct lynx_28g_priv *priv)
+{
+       struct lynx_28g_pll *pll;
+       int i;
+
+       for (i = 0; i < LYNX_28G_NUM_PLL; i++) {
+               pll = &priv->pll[i];
+               pll->priv = priv;
+               pll->id = i;
+
+               pll->rstctl = lynx_28g_pll_read(pll, PLLnRSTCTL);
+               pll->cr0 = lynx_28g_pll_read(pll, PLLnCR0);
+               pll->cr1 = lynx_28g_pll_read(pll, PLLnCR1);
+
+               if (LYNX_28G_PLLnRSTCTL_DIS(pll->rstctl))
+                       continue;
+
+               switch (LYNX_28G_PLLnCR1_FRATE_SEL(pll->cr1)) {
+               case LYNX_28G_PLLnCR1_FRATE_5G_10GVCO:
+               case LYNX_28G_PLLnCR1_FRATE_5G_25GVCO:
+                       /* 5GHz clock net */
+                       __set_bit(PHY_INTERFACE_MODE_1000BASEX, pll->supported);
+                       __set_bit(PHY_INTERFACE_MODE_SGMII, pll->supported);
+                       break;
+               case LYNX_28G_PLLnCR1_FRATE_10G_20GVCO:
+                       /* 10.3125GHz clock net */
+                       __set_bit(PHY_INTERFACE_MODE_10GBASER, pll->supported);
+                       break;
+               default:
+                       /* 6GHz, 12.890625GHz, 8GHz */
+                       break;
+               }
+       }
+}
+
+#define work_to_lynx(w) container_of((w), struct lynx_28g_priv, cdr_check.work)
+
+static void lynx_28g_cdr_lock_check(struct work_struct *work)
+{
+       struct lynx_28g_priv *priv = work_to_lynx(work);
+       struct lynx_28g_lane *lane;
+       u32 rrstctl;
+       int i;
+
+       for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
+               lane = &priv->lane[i];
+
+               if (!lane->init)
+                       continue;
+
+               if (!lane->powered_up)
+                       continue;
+
+               rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+               if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) {
+                       lynx_28g_lane_rmw(lane, LNaRRSTCTL, RST_REQ, RST_REQ);
+                       do {
+                               rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+                       } while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
+               }
+       }
+       queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
+                          msecs_to_jiffies(1000));
+}
+
+static void lynx_28g_lane_read_configuration(struct lynx_28g_lane *lane)
+{
+       u32 pss, protocol;
+
+       pss = lynx_28g_lane_read(lane, LNaPSS);
+       protocol = LYNX_28G_LNaPSS_TYPE(pss);
+       switch (protocol) {
+       case LYNX_28G_LNaPSS_TYPE_SGMII:
+               lane->interface = PHY_INTERFACE_MODE_SGMII;
+               break;
+       case LYNX_28G_LNaPSS_TYPE_XFI:
+               lane->interface = PHY_INTERFACE_MODE_10GBASER;
+               break;
+       default:
+               lane->interface = PHY_INTERFACE_MODE_NA;
+       }
+}
+
+static struct phy *lynx_28g_xlate(struct device *dev,
+                                 struct of_phandle_args *args)
+{
+       struct lynx_28g_priv *priv = dev_get_drvdata(dev);
+       int idx = args->args[0];
+
+       if (WARN_ON(idx >= LYNX_28G_NUM_LANE))
+               return ERR_PTR(-EINVAL);
+
+       return priv->lane[idx].phy;
+}
+
+static int lynx_28g_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct phy_provider *provider;
+       struct lynx_28g_priv *priv;
+       int i;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+       priv->dev = &pdev->dev;
+
+       priv->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
+
+       lynx_28g_pll_read_configuration(priv);
+
+       for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
+               struct lynx_28g_lane *lane = &priv->lane[i];
+               struct phy *phy;
+
+               memset(lane, 0, sizeof(*lane));
+
+               phy = devm_phy_create(&pdev->dev, NULL, &lynx_28g_ops);
+               if (IS_ERR(phy))
+                       return PTR_ERR(phy);
+
+               lane->priv = priv;
+               lane->phy = phy;
+               lane->id = i;
+               phy_set_drvdata(phy, lane);
+               lynx_28g_lane_read_configuration(lane);
+       }
+
+       dev_set_drvdata(dev, priv);
+
+       INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check);
+
+       queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
+                          msecs_to_jiffies(1000));
+
+       dev_set_drvdata(&pdev->dev, priv);
+       provider = devm_of_phy_provider_register(&pdev->dev, lynx_28g_xlate);
+
+       return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id lynx_28g_of_match_table[] = {
+       { .compatible = "fsl,lynx-28g" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table);
+
+static struct platform_driver lynx_28g_driver = {
+       .probe  = lynx_28g_probe,
+       .driver = {
+               .name = "lynx-28g",
+               .of_match_table = lynx_28g_of_match_table,
+       },
+};
+module_platform_driver(lynx_28g_driver);
+
+MODULE_AUTHOR("Ioana Ciornei <ioana.ciornei@nxp.com>");
+MODULE_DESCRIPTION("Lynx 28G SerDes PHY driver for Layerscape SoCs");
+MODULE_LICENSE("GPL v2");
index 80d6750..1f40137 100644 (file)
 #include "../core.h"
 #include "pinctrl-sunxi.h"
 
+/*
+ * These lock classes tell lockdep that GPIO IRQs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key sunxi_pinctrl_irq_lock_class;
+static struct lock_class_key sunxi_pinctrl_irq_request_class;
+
 static struct irq_chip sunxi_pinctrl_edge_irq_chip;
 static struct irq_chip sunxi_pinctrl_level_irq_chip;
 
@@ -837,7 +844,8 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
 {
        struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
 
-       return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, true);
+       return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL,
+                                           chip->base + offset, true);
 }
 
 static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -890,7 +898,8 @@ static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
        struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
 
        sunxi_pinctrl_gpio_set(chip, offset, value);
-       return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, false);
+       return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL,
+                                           chip->base + offset, false);
 }
 
 static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -1555,6 +1564,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
        for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) {
                int irqno = irq_create_mapping(pctl->domain, i);
 
+               irq_set_lockdep_class(irqno, &sunxi_pinctrl_irq_lock_class,
+                                     &sunxi_pinctrl_irq_request_class);
                irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip,
                                         handle_edge_irq);
                irq_set_chip_data(irqno, pctl);
index c1c959f..97c1be4 100644 (file)
@@ -6,13 +6,17 @@
 #define pr_fmt(fmt) "IDT_82p33xxx: " fmt
 
 #include <linux/firmware.h>
-#include <linux/i2c.h>
+#include <linux/platform_device.h>
 #include <linux/module.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/delay.h>
+#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/timekeeping.h>
 #include <linux/bitops.h>
+#include <linux/of.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/mfd/idt82p33_reg.h>
 
 #include "ptp_private.h"
 #include "ptp_idt82p33.h"
@@ -24,15 +28,25 @@ MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(FW_FILENAME);
 
 /* Module Parameters */
-static u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
-module_param(sync_tod_timeout, uint, 0);
-MODULE_PARM_DESC(sync_tod_timeout,
-"duration in second to keep SYNC_TOD on (set to 0 to keep it always on)");
-
 static u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
 module_param(phase_snap_threshold, uint, 0);
 MODULE_PARM_DESC(phase_snap_threshold,
-"threshold (150000ns by default) below which adjtime would ignore");
+"threshold (10000ns by default) below which adjtime would use double dco");
+
+static char *firmware;
+module_param(firmware, charp, 0);
+
+static inline int idt82p33_read(struct idt82p33 *idt82p33, u16 regaddr,
+                               u8 *buf, u16 count)
+{
+       return regmap_bulk_read(idt82p33->regmap, regaddr, buf, count);
+}
+
+static inline int idt82p33_write(struct idt82p33 *idt82p33, u16 regaddr,
+                                u8 *buf, u16 count)
+{
+       return regmap_bulk_write(idt82p33->regmap, regaddr, buf, count);
+}
 
 static void idt82p33_byte_array_to_timespec(struct timespec64 *ts,
                                            u8 buf[TOD_BYTE_COUNT])
@@ -78,110 +92,6 @@ static void idt82p33_timespec_to_byte_array(struct timespec64 const *ts,
        }
 }
 
-static int idt82p33_xfer_read(struct idt82p33 *idt82p33,
-                             unsigned char regaddr,
-                             unsigned char *buf,
-                             unsigned int count)
-{
-       struct i2c_client *client = idt82p33->client;
-       struct i2c_msg msg[2];
-       int cnt;
-
-       msg[0].addr = client->addr;
-       msg[0].flags = 0;
-       msg[0].len = 1;
-       msg[0].buf = &regaddr;
-
-       msg[1].addr = client->addr;
-       msg[1].flags = I2C_M_RD;
-       msg[1].len = count;
-       msg[1].buf = buf;
-
-       cnt = i2c_transfer(client->adapter, msg, 2);
-       if (cnt < 0) {
-               dev_err(&client->dev, "i2c_transfer returned %d\n", cnt);
-               return cnt;
-       } else if (cnt != 2) {
-               dev_err(&client->dev,
-                       "i2c_transfer sent only %d of %d messages\n", cnt, 2);
-               return -EIO;
-       }
-       return 0;
-}
-
-static int idt82p33_xfer_write(struct idt82p33 *idt82p33,
-                              u8 regaddr,
-                              u8 *buf,
-                              u16 count)
-{
-       struct i2c_client *client = idt82p33->client;
-       /* we add 1 byte for device register */
-       u8 msg[IDT82P33_MAX_WRITE_COUNT + 1];
-       int err;
-
-       if (count > IDT82P33_MAX_WRITE_COUNT)
-               return -EINVAL;
-
-       msg[0] = regaddr;
-       memcpy(&msg[1], buf, count);
-
-       err = i2c_master_send(client, msg, count + 1);
-       if (err < 0) {
-               dev_err(&client->dev, "i2c_master_send returned %d\n", err);
-               return err;
-       }
-
-       return 0;
-}
-
-static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val)
-{
-       int err;
-
-       if (idt82p33->page_offset == val)
-               return 0;
-
-       err = idt82p33_xfer_write(idt82p33, PAGE_ADDR, &val, sizeof(val));
-       if (err)
-               dev_err(&idt82p33->client->dev,
-                       "failed to set page offset %d\n", val);
-       else
-               idt82p33->page_offset = val;
-
-       return err;
-}
-
-static int idt82p33_rdwr(struct idt82p33 *idt82p33, unsigned int regaddr,
-                        unsigned char *buf, unsigned int count, bool write)
-{
-       u8 offset, page;
-       int err;
-
-       page = _PAGE(regaddr);
-       offset = _OFFSET(regaddr);
-
-       err = idt82p33_page_offset(idt82p33, page);
-       if (err)
-               return err;
-
-       if (write)
-               return idt82p33_xfer_write(idt82p33, offset, buf, count);
-
-       return idt82p33_xfer_read(idt82p33, offset, buf, count);
-}
-
-static int idt82p33_read(struct idt82p33 *idt82p33, unsigned int regaddr,
-                       unsigned char *buf, unsigned int count)
-{
-       return idt82p33_rdwr(idt82p33, regaddr, buf, count, false);
-}
-
-static int idt82p33_write(struct idt82p33 *idt82p33, unsigned int regaddr,
-                       unsigned char *buf, unsigned int count)
-{
-       return idt82p33_rdwr(idt82p33, regaddr, buf, count, true);
-}
-
 static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel,
                                  enum pll_mode mode)
 {
@@ -206,7 +116,7 @@ static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel,
        if (err)
                return err;
 
-       channel->pll_mode = dpll_mode;
+       channel->pll_mode = mode;
 
        return 0;
 }
@@ -467,7 +377,7 @@ static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
        err = idt82p33_measure_settime_gettime_gap_overhead(channel, &gap_ns);
 
        if (err) {
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "Failed in %s with err %d!\n", __func__, err);
                return err;
        }
@@ -499,8 +409,8 @@ static int idt82p33_check_and_set_masks(struct idt82p33 *idt82p33,
 
        if (page == PLLMASK_ADDR_HI && offset == PLLMASK_ADDR_LO) {
                if ((val & 0xfc) || !(val & 0x3)) {
-                       dev_err(&idt82p33->client->dev,
-                               "Invalid PLL mask 0x%hhx\n", val);
+                       dev_err(idt82p33->dev,
+                               "Invalid PLL mask 0x%x\n", val);
                        err = -EINVAL;
                } else {
                        idt82p33->pll_mask = val;
@@ -520,14 +430,14 @@ static void idt82p33_display_masks(struct idt82p33 *idt82p33)
 {
        u8 mask, i;
 
-       dev_info(&idt82p33->client->dev,
+       dev_info(idt82p33->dev,
                 "pllmask = 0x%02x\n", idt82p33->pll_mask);
 
        for (i = 0; i < MAX_PHC_PLL; i++) {
                mask = 1 << i;
 
                if (mask & idt82p33->pll_mask)
-                       dev_info(&idt82p33->client->dev,
+                       dev_info(idt82p33->dev,
                                 "PLL%d output_mask = 0x%04x\n",
                                 i, idt82p33->channel[i].output_mask);
        }
@@ -539,11 +449,6 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
        u8 sync_cnfg;
        int err;
 
-       /* Turn it off after sync_tod_timeout seconds */
-       if (enable && sync_tod_timeout)
-               ptp_schedule_worker(channel->ptp_clock,
-                                   sync_tod_timeout * HZ);
-
        err = idt82p33_read(idt82p33, channel->dpll_sync_cnfg,
                            &sync_cnfg, sizeof(sync_cnfg));
        if (err)
@@ -557,22 +462,6 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
                              &sync_cnfg, sizeof(sync_cnfg));
 }
 
-static long idt82p33_sync_tod_work_handler(struct ptp_clock_info *ptp)
-{
-       struct idt82p33_channel *channel =
-                       container_of(ptp, struct idt82p33_channel, caps);
-       struct idt82p33 *idt82p33 = channel->idt82p33;
-
-       mutex_lock(&idt82p33->reg_lock);
-
-       (void)idt82p33_sync_tod(channel, false);
-
-       mutex_unlock(&idt82p33->reg_lock);
-
-       /* Return a negative value here to not reschedule */
-       return -1;
-}
-
 static int idt82p33_output_enable(struct idt82p33_channel *channel,
                                  bool enable, unsigned int outn)
 {
@@ -634,18 +523,11 @@ static int idt82p33_enable_tod(struct idt82p33_channel *channel)
        struct idt82p33 *idt82p33 = channel->idt82p33;
        struct timespec64 ts = {0, 0};
        int err;
-       u8 val;
-
-       val = 0;
-       err = idt82p33_write(idt82p33, channel->dpll_input_mode_cnfg,
-                            &val, sizeof(val));
-       if (err)
-               return err;
 
        err = idt82p33_measure_tod_write_overhead(channel);
 
        if (err) {
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "Failed in %s with err %d!\n", __func__, err);
                return err;
        }
@@ -673,16 +555,14 @@ static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33)
 }
 
 static int idt82p33_enable(struct ptp_clock_info *ptp,
-                        struct ptp_clock_request *rq, int on)
+                          struct ptp_clock_request *rq, int on)
 {
        struct idt82p33_channel *channel =
                        container_of(ptp, struct idt82p33_channel, caps);
        struct idt82p33 *idt82p33 = channel->idt82p33;
-       int err;
-
-       err = -EOPNOTSUPP;
+       int err = -EOPNOTSUPP;
 
-       mutex_lock(&idt82p33->reg_lock);
+       mutex_lock(idt82p33->lock);
 
        if (rq->type == PTP_CLK_REQ_PEROUT) {
                if (!on)
@@ -690,15 +570,18 @@ static int idt82p33_enable(struct ptp_clock_info *ptp,
                                                     &rq->perout);
                /* Only accept a 1-PPS aligned to the second. */
                else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
-                   rq->perout.period.nsec) {
+                        rq->perout.period.nsec)
                        err = -ERANGE;
-               else
+               else
                        err = idt82p33_perout_enable(channel, true,
                                                     &rq->perout);
        }
 
-       mutex_unlock(&idt82p33->reg_lock);
+       mutex_unlock(idt82p33->lock);
 
+       if (err)
+               dev_err(idt82p33->dev,
+                       "Failed in %s with err %d!\n", __func__, err);
        return err;
 }
 
@@ -727,11 +610,11 @@ static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns)
        val[3] = (offset_regval >> 24) & 0x1F;
        val[3] |= PH_OFFSET_EN;
 
-       mutex_lock(&idt82p33->reg_lock);
+       mutex_lock(idt82p33->lock);
 
        err = idt82p33_dpll_set_mode(channel, PLL_MODE_WPH);
        if (err) {
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "Failed in %s with err %d!\n", __func__, err);
                goto out;
        }
@@ -740,7 +623,7 @@ static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns)
                             sizeof(val));
 
 out:
-       mutex_unlock(&idt82p33->reg_lock);
+       mutex_unlock(idt82p33->lock);
        return err;
 }
 
@@ -751,12 +634,12 @@ static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
        struct idt82p33 *idt82p33 = channel->idt82p33;
        int err;
 
-       mutex_lock(&idt82p33->reg_lock);
+       mutex_lock(idt82p33->lock);
        err = _idt82p33_adjfine(channel, scaled_ppm);
+       mutex_unlock(idt82p33->lock);
        if (err)
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "Failed in %s with err %d!\n", __func__, err);
-       mutex_unlock(&idt82p33->reg_lock);
 
        return err;
 }
@@ -768,29 +651,20 @@ static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns)
        struct idt82p33 *idt82p33 = channel->idt82p33;
        int err;
 
-       mutex_lock(&idt82p33->reg_lock);
+       mutex_lock(idt82p33->lock);
 
        if (abs(delta_ns) < phase_snap_threshold) {
-               mutex_unlock(&idt82p33->reg_lock);
+               mutex_unlock(idt82p33->lock);
                return 0;
        }
 
        err = _idt82p33_adjtime(channel, delta_ns);
 
-       if (err) {
-               mutex_unlock(&idt82p33->reg_lock);
-               dev_err(&idt82p33->client->dev,
-                       "Adjtime failed in %s with err %d!\n", __func__, err);
-               return err;
-       }
+       mutex_unlock(idt82p33->lock);
 
-       err = idt82p33_sync_tod(channel, true);
        if (err)
-               dev_err(&idt82p33->client->dev,
-                       "Sync_tod failed in %s with err %d!\n", __func__, err);
-
-       mutex_unlock(&idt82p33->reg_lock);
-
+               dev_err(idt82p33->dev,
+                       "Failed in %s with err %d!\n", __func__, err);
        return err;
 }
 
@@ -801,31 +675,31 @@ static int idt82p33_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
        struct idt82p33 *idt82p33 = channel->idt82p33;
        int err;
 
-       mutex_lock(&idt82p33->reg_lock);
+       mutex_lock(idt82p33->lock);
        err = _idt82p33_gettime(channel, ts);
+       mutex_unlock(idt82p33->lock);
+
        if (err)
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "Failed in %s with err %d!\n", __func__, err);
-       mutex_unlock(&idt82p33->reg_lock);
-
        return err;
 }
 
 static int idt82p33_settime(struct ptp_clock_info *ptp,
-                       const struct timespec64 *ts)
+                           const struct timespec64 *ts)
 {
        struct idt82p33_channel *channel =
                        container_of(ptp, struct idt82p33_channel, caps);
        struct idt82p33 *idt82p33 = channel->idt82p33;
        int err;
 
-       mutex_lock(&idt82p33->reg_lock);
+       mutex_lock(idt82p33->lock);
        err = _idt82p33_settime(channel, ts);
+       mutex_unlock(idt82p33->lock);
+
        if (err)
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "Failed in %s with err %d!\n", __func__, err);
-       mutex_unlock(&idt82p33->reg_lock);
-
        return err;
 }
 
@@ -864,7 +738,7 @@ static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
 static void idt82p33_caps_init(struct ptp_clock_info *caps)
 {
        caps->owner = THIS_MODULE;
-       caps->max_adj = 92000;
+       caps->max_adj = DCO_MAX_PPB;
        caps->n_per_out = 11;
        caps->adjphase = idt82p33_adjwritephase;
        caps->adjfine = idt82p33_adjfine;
@@ -872,7 +746,6 @@ static void idt82p33_caps_init(struct ptp_clock_info *caps)
        caps->gettime64 = idt82p33_gettime;
        caps->settime64 = idt82p33_settime;
        caps->enable = idt82p33_enable;
-       caps->do_aux_work = idt82p33_sync_tod_work_handler;
 }
 
 static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
@@ -887,7 +760,7 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
 
        err = idt82p33_channel_init(channel, index);
        if (err) {
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "Channel_init failed in %s with err %d!\n",
                        __func__, err);
                return err;
@@ -912,7 +785,7 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
 
        err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
        if (err) {
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "Dpll_set_mode failed in %s with err %d!\n",
                        __func__, err);
                return err;
@@ -920,13 +793,13 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
 
        err = idt82p33_enable_tod(channel);
        if (err) {
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "Enable_tod failed in %s with err %d!\n",
                        __func__, err);
                return err;
        }
 
-       dev_info(&idt82p33->client->dev, "PLL%d registered as ptp%d\n",
+       dev_info(idt82p33->dev, "PLL%d registered as ptp%d\n",
                 index, channel->ptp_clock->index);
 
        return 0;
@@ -940,25 +813,24 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
        int err;
        s32 len;
 
-       dev_dbg(&idt82p33->client->dev,
-               "requesting firmware '%s'\n", FW_FILENAME);
+       dev_dbg(idt82p33->dev, "requesting firmware '%s'\n", FW_FILENAME);
 
-       err = request_firmware(&fw, FW_FILENAME, &idt82p33->client->dev);
+       err = request_firmware(&fw, FW_FILENAME, idt82p33->dev);
 
        if (err) {
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "Failed in %s with err %d!\n", __func__, err);
                return err;
        }
 
-       dev_dbg(&idt82p33->client->dev, "firmware size %zu bytes\n", fw->size);
+       dev_dbg(idt82p33->dev, "firmware size %zu bytes\n", fw->size);
 
        rec = (struct idt82p33_fwrc *) fw->data;
 
        for (len = fw->size; len > 0; len -= sizeof(*rec)) {
 
                if (rec->reserved) {
-                       dev_err(&idt82p33->client->dev,
+                       dev_err(idt82p33->dev,
                                "bad firmware, reserved field non-zero\n");
                        err = -EINVAL;
                } else {
@@ -973,16 +845,11 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
                }
 
                if (err == 0) {
-                       /* maximum 8 pages  */
-                       if (page >= PAGE_NUM)
-                               continue;
-
                        /* Page size 128, last 4 bytes of page skipped */
-                       if (((loaddr > 0x7b) && (loaddr <= 0x7f))
-                            || loaddr > 0xfb)
+                       if (loaddr > 0x7b)
                                continue;
 
-                       err = idt82p33_write(idt82p33, _ADDR(page, loaddr),
+                       err = idt82p33_write(idt82p33, REG_ADDR(page, loaddr),
                                             &val, sizeof(val));
                }
 
@@ -997,36 +864,34 @@ out:
 }
 
 
-static int idt82p33_probe(struct i2c_client *client,
-                         const struct i2c_device_id *id)
+static int idt82p33_probe(struct platform_device *pdev)
 {
+       struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
        struct idt82p33 *idt82p33;
        int err;
        u8 i;
 
-       (void)id;
-
-       idt82p33 = devm_kzalloc(&client->dev,
+       idt82p33 = devm_kzalloc(&pdev->dev,
                                sizeof(struct idt82p33), GFP_KERNEL);
        if (!idt82p33)
                return -ENOMEM;
 
-       mutex_init(&idt82p33->reg_lock);
-
-       idt82p33->client = client;
-       idt82p33->page_offset = 0xff;
+       idt82p33->dev = &pdev->dev;
+       idt82p33->mfd = pdev->dev.parent;
+       idt82p33->lock = &ddata->lock;
+       idt82p33->regmap = ddata->regmap;
        idt82p33->tod_write_overhead_ns = 0;
        idt82p33->calculate_overhead_flag = 0;
        idt82p33->pll_mask = DEFAULT_PLL_MASK;
        idt82p33->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
        idt82p33->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
 
-       mutex_lock(&idt82p33->reg_lock);
+       mutex_lock(idt82p33->lock);
 
        err = idt82p33_load_firmware(idt82p33);
 
        if (err)
-               dev_warn(&idt82p33->client->dev,
+               dev_warn(idt82p33->dev,
                         "loading firmware failed with %d\n", err);
 
        if (idt82p33->pll_mask) {
@@ -1034,7 +899,7 @@ static int idt82p33_probe(struct i2c_client *client,
                        if (idt82p33->pll_mask & (1 << i)) {
                                err = idt82p33_enable_channel(idt82p33, i);
                                if (err) {
-                                       dev_err(&idt82p33->client->dev,
+                                       dev_err(idt82p33->dev,
                                                "Failed in %s with err %d!\n",
                                                __func__, err);
                                        break;
@@ -1042,69 +907,38 @@ static int idt82p33_probe(struct i2c_client *client,
                        }
                }
        } else {
-               dev_err(&idt82p33->client->dev,
+               dev_err(idt82p33->dev,
                        "no PLLs flagged as PHCs, nothing to do\n");
                err = -ENODEV;
        }
 
-       mutex_unlock(&idt82p33->reg_lock);
+       mutex_unlock(idt82p33->lock);
 
        if (err) {
                idt82p33_ptp_clock_unregister_all(idt82p33);
                return err;
        }
 
-       i2c_set_clientdata(client, idt82p33);
+       platform_set_drvdata(pdev, idt82p33);
 
        return 0;
 }
 
-static int idt82p33_remove(struct i2c_client *client)
+static int idt82p33_remove(struct platform_device *pdev)
 {
-       struct idt82p33 *idt82p33 = i2c_get_clientdata(client);
+       struct idt82p33 *idt82p33 = platform_get_drvdata(pdev);
 
        idt82p33_ptp_clock_unregister_all(idt82p33);
-       mutex_destroy(&idt82p33->reg_lock);
 
        return 0;
 }
 
-#ifdef CONFIG_OF
-static const struct of_device_id idt82p33_dt_id[] = {
-       { .compatible = "idt,82p33810" },
-       { .compatible = "idt,82p33813" },
-       { .compatible = "idt,82p33814" },
-       { .compatible = "idt,82p33831" },
-       { .compatible = "idt,82p33910" },
-       { .compatible = "idt,82p33913" },
-       { .compatible = "idt,82p33914" },
-       { .compatible = "idt,82p33931" },
-       {},
-};
-MODULE_DEVICE_TABLE(of, idt82p33_dt_id);
-#endif
-
-static const struct i2c_device_id idt82p33_i2c_id[] = {
-       { "idt82p33810", },
-       { "idt82p33813", },
-       { "idt82p33814", },
-       { "idt82p33831", },
-       { "idt82p33910", },
-       { "idt82p33913", },
-       { "idt82p33914", },
-       { "idt82p33931", },
-       {},
-};
-MODULE_DEVICE_TABLE(i2c, idt82p33_i2c_id);
-
-static struct i2c_driver idt82p33_driver = {
+static struct platform_driver idt82p33_driver = {
        .driver = {
-               .of_match_table = of_match_ptr(idt82p33_dt_id),
-               .name           = "idt82p33",
+               .name = "82p33x1x-phc",
        },
-       .probe          = idt82p33_probe,
-       .remove         = idt82p33_remove,
-       .id_table       = idt82p33_i2c_id,
+       .probe = idt82p33_probe,
+       .remove = idt82p33_remove,
 };
 
-module_i2c_driver(idt82p33_driver);
+module_platform_driver(idt82p33_driver);
index 1c7a0f0..0ea1c35 100644 (file)
@@ -8,94 +8,19 @@
 #define PTP_IDT82P33_H
 
 #include <linux/ktime.h>
-#include <linux/workqueue.h>
+#include <linux/mfd/idt82p33_reg.h>
+#include <linux/regmap.h>
 
-
-/* Register Map - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf */
-#define PAGE_NUM (8)
-#define _ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
-#define _PAGE(addr) (((addr) >> 0x7) & 0x7)
-#define _OFFSET(addr)  ((addr) & 0x7f)
-
-#define DPLL1_TOD_CNFG 0x134
-#define DPLL2_TOD_CNFG 0x1B4
-
-#define DPLL1_TOD_STS 0x10B
-#define DPLL2_TOD_STS 0x18B
-
-#define DPLL1_TOD_TRIGGER 0x115
-#define DPLL2_TOD_TRIGGER 0x195
-
-#define DPLL1_OPERATING_MODE_CNFG 0x120
-#define DPLL2_OPERATING_MODE_CNFG 0x1A0
-
-#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
-#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
-
-#define DPLL1_PHASE_OFFSET_CNFG 0x143
-#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
-
-#define DPLL1_SYNC_EDGE_CNFG 0X140
-#define DPLL2_SYNC_EDGE_CNFG 0X1C0
-
-#define DPLL1_INPUT_MODE_CNFG 0X116
-#define DPLL2_INPUT_MODE_CNFG 0X196
-
-#define OUT_MUX_CNFG(outn) _ADDR(0x6, (0xC * (outn)))
-
-#define PAGE_ADDR 0x7F
-/* Register Map end */
-
-/* Register definitions - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf*/
-#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
-#define SYNC_TOD BIT(1)
-#define PH_OFFSET_EN BIT(7)
-#define SQUELCH_ENABLE BIT(5)
-
-/* Bit definitions for the DPLL_MODE register */
-#define PLL_MODE_SHIFT                    (0)
-#define PLL_MODE_MASK                     (0x1F)
-
-#define PEROUT_ENABLE_OUTPUT_MASK         (0xdeadbeef)
-
-enum pll_mode {
-       PLL_MODE_MIN = 0,
-       PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
-       PLL_MODE_FORCE_FREERUN = 1,
-       PLL_MODE_FORCE_HOLDOVER = 2,
-       PLL_MODE_FORCE_LOCKED = 4,
-       PLL_MODE_FORCE_PRE_LOCKED2 = 5,
-       PLL_MODE_FORCE_PRE_LOCKED = 6,
-       PLL_MODE_FORCE_LOST_PHASE = 7,
-       PLL_MODE_DCO = 10,
-       PLL_MODE_WPH = 18,
-       PLL_MODE_MAX = PLL_MODE_WPH,
-};
-
-enum hw_tod_trig_sel {
-       HW_TOD_TRIG_SEL_MIN = 0,
-       HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
-       HW_TOD_TRIG_SEL_SYNC_SEL = 1,
-       HW_TOD_TRIG_SEL_IN12 = 2,
-       HW_TOD_TRIG_SEL_IN13 = 3,
-       HW_TOD_TRIG_SEL_IN14 = 4,
-       HW_TOD_TRIG_SEL_TOD_PPS = 5,
-       HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
-       HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
-       HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
-       HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
-       HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
-       WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
-};
-
-/* Register bit definitions end */
 #define FW_FILENAME    "idt82p33xxx.bin"
-#define MAX_PHC_PLL (2)
-#define TOD_BYTE_COUNT (10)
-#define MAX_MEASURMENT_COUNT (5)
-#define SNAP_THRESHOLD_NS (150000)
-#define SYNC_TOD_TIMEOUT_SEC (5)
-#define IDT82P33_MAX_WRITE_COUNT (512)
+#define MAX_PHC_PLL    (2)
+#define TOD_BYTE_COUNT (10)
+#define DCO_MAX_PPB     (92000)
+#define MAX_MEASURMENT_COUNT   (5)
+#define SNAP_THRESHOLD_NS      (10000)
+#define IMMEDIATE_SNAP_THRESHOLD_NS (50000)
+#define DDCO_THRESHOLD_NS      (5)
+#define IDT82P33_MAX_WRITE_COUNT       (512)
+#define PEROUT_ENABLE_OUTPUT_MASK      (0xdeadbeef)
 
 #define PLLMASK_ADDR_HI        0xFF
 #define PLLMASK_ADDR_LO        0xA5
@@ -116,15 +41,25 @@ enum hw_tod_trig_sel {
 #define DEFAULT_OUTPUT_MASK_PLL0       (0xc0)
 #define DEFAULT_OUTPUT_MASK_PLL1       DEFAULT_OUTPUT_MASK_PLL0
 
+/**
+ * @brief Maximum absolute value for write phase offset in femtoseconds
+ */
+#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll)
+
+/** @brief Phase offset resolution
+ *
+ *  DPLL phase offset = 10^15 fs / ( System Clock  * 2^13)
+ *                    = 10^15 fs / ( 1638400000 * 2^23)
+ *                    = 74.5058059692382 fs
+ */
+#define IDT_T0DPLL_PHASE_RESOL 74506
+
 /* PTP Hardware Clock interface */
 struct idt82p33_channel {
        struct ptp_clock_info   caps;
        struct ptp_clock        *ptp_clock;
-       struct idt82p33 *idt82p33;
-       enum pll_mode   pll_mode;
-       /* task to turn off SYNC_TOD bit after pps sync */
-       struct delayed_work     sync_tod_work;
-       bool                    sync_tod_on;
+       struct idt82p33         *idt82p33;
+       enum pll_mode           pll_mode;
        s32                     current_freq_ppb;
        u8                      output_mask;
        u16                     dpll_tod_cnfg;
@@ -138,15 +73,17 @@ struct idt82p33_channel {
 };
 
 struct idt82p33 {
-       struct idt82p33_channel channel[MAX_PHC_PLL];
-       struct i2c_client       *client;
-       u8      page_offset;
-       u8      pll_mask;
-       ktime_t start_time;
-       int calculate_overhead_flag;
-       s64 tod_write_overhead_ns;
-       /* Protects I2C read/modify/write registers from concurrent access */
-       struct mutex    reg_lock;
+       struct idt82p33_channel channel[MAX_PHC_PLL];
+       struct device           *dev;
+       u8                      pll_mask;
+       /* Mutex to protect operations from being interrupted */
+       struct mutex            *lock;
+       struct regmap           *regmap;
+       struct device           *mfd;
+       /* Overhead calculation for adjtime */
+       ktime_t                 start_time;
+       int                     calculate_overhead_flag;
+       s64                     tod_write_overhead_ns;
 };
 
 /* firmware interface */
@@ -157,18 +94,4 @@ struct idt82p33_fwrc {
        u8 reserved;
 } __packed;
 
-/**
- * @brief Maximum absolute value for write phase offset in femtoseconds
- */
-#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll)
-
-/** @brief Phase offset resolution
- *
- *  DPLL phase offset = 10^15 fs / ( System Clock  * 2^13)
- *                    = 10^15 fs / ( 1638400000 * 2^23)
- *                    = 74.5058059692382 fs
- */
-#define IDT_T0DPLL_PHASE_RESOL 74506
-
-
 #endif /* PTP_IDT82P33_H */
index aaefca6..c3d0fcf 100644 (file)
 #include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/platform_device.h>
+#include <linux/platform_data/i2c-xiic.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/xilinx_spi.h>
 #include <net/devlink.h>
 #include <linux/i2c.h>
 #include <linux/mtd/mtd.h>
+#include <linux/nvmem-consumer.h>
 
 #ifndef PCI_VENDOR_ID_FACEBOOK
 #define PCI_VENDOR_ID_FACEBOOK 0x1d9b
@@ -177,6 +179,35 @@ struct dcf_slave_reg {
 
 #define DCF_S_CTRL_ENABLE      BIT(0)
 
+struct signal_reg {
+       u32     enable;
+       u32     status;
+       u32     polarity;
+       u32     version;
+       u32     __pad0[4];
+       u32     cable_delay;
+       u32     __pad1[3];
+       u32     intr;
+       u32     intr_mask;
+       u32     __pad2[2];
+       u32     start_ns;
+       u32     start_sec;
+       u32     pulse_ns;
+       u32     pulse_sec;
+       u32     period_ns;
+       u32     period_sec;
+       u32     repeat_count;
+};
+
+struct frequency_reg {
+       u32     ctrl;
+       u32     status;
+};
+#define FREQ_STATUS_VALID      BIT(31)
+#define FREQ_STATUS_ERROR      BIT(30)
+#define FREQ_STATUS_OVERRUN    BIT(29)
+#define FREQ_STATUS_MASK       (BIT(24) - 1)
+
 struct ptp_ocp_flash_info {
        const char *name;
        int pci_offset;
@@ -204,6 +235,40 @@ struct ptp_ocp_ext_src {
        int                     irq_vec;
 };
 
+enum ptp_ocp_sma_mode {
+       SMA_MODE_IN,
+       SMA_MODE_OUT,
+};
+
+struct ptp_ocp_sma_connector {
+       enum    ptp_ocp_sma_mode mode;
+       bool    fixed_fcn;
+       bool    fixed_dir;
+       bool    disabled;
+};
+
+struct ocp_attr_group {
+       u64 cap;
+       const struct attribute_group *group;
+};
+
+#define OCP_CAP_BASIC  BIT(0)
+#define OCP_CAP_SIGNAL BIT(1)
+#define OCP_CAP_FREQ   BIT(2)
+
+struct ptp_ocp_signal {
+       ktime_t         period;
+       ktime_t         pulse;
+       ktime_t         phase;
+       ktime_t         start;
+       int             duty;
+       bool            polarity;
+       bool            running;
+};
+
+#define OCP_BOARD_ID_LEN               13
+#define OCP_SERIAL_LEN                 6
+
 struct ptp_ocp {
        struct pci_dev          *pdev;
        struct device           dev;
@@ -213,16 +278,21 @@ struct ptp_ocp {
        struct pps_reg __iomem  *pps_to_ext;
        struct pps_reg __iomem  *pps_to_clk;
        struct gpio_reg __iomem *pps_select;
-       struct gpio_reg __iomem *sma;
+       struct gpio_reg __iomem *sma_map1;
+       struct gpio_reg __iomem *sma_map2;
        struct irig_master_reg  __iomem *irig_out;
        struct irig_slave_reg   __iomem *irig_in;
        struct dcf_master_reg   __iomem *dcf_out;
        struct dcf_slave_reg    __iomem *dcf_in;
        struct tod_reg          __iomem *nmea_out;
+       struct frequency_reg    __iomem *freq_in[4];
+       struct ptp_ocp_ext_src  *signal_out[4];
        struct ptp_ocp_ext_src  *pps;
        struct ptp_ocp_ext_src  *ts0;
        struct ptp_ocp_ext_src  *ts1;
        struct ptp_ocp_ext_src  *ts2;
+       struct ptp_ocp_ext_src  *ts3;
+       struct ptp_ocp_ext_src  *ts4;
        struct img_reg __iomem  *image;
        struct ptp_clock        *ptp;
        struct ptp_clock_info   ptp_info;
@@ -230,6 +300,8 @@ struct ptp_ocp {
        struct platform_device  *spi_flash;
        struct clk_hw           *i2c_clk;
        struct timer_list       watchdog;
+       const struct ocp_attr_group *attr_tbl;
+       const struct ptp_ocp_eeprom_map *eeprom_map;
        struct dentry           *debug_root;
        time64_t                gnss_lost;
        int                     id;
@@ -238,12 +310,17 @@ struct ptp_ocp {
        int                     gnss2_port;
        int                     mac_port;       /* miniature atomic clock */
        int                     nmea_port;
-       u8                      serial[6];
-       bool                    has_serial;
+       u32                     fw_version;
+       u8                      board_id[OCP_BOARD_ID_LEN];
+       u8                      serial[OCP_SERIAL_LEN];
+       bool                    has_eeprom_data;
        u32                     pps_req_map;
        int                     flash_start;
        u32                     utc_tai_offset;
        u32                     ts_window_adjust;
+       u64                     fw_cap;
+       struct ptp_ocp_signal   signal[4];
+       struct ptp_ocp_sma_connector sma[4];
 };
 
 #define OCP_REQ_TIMESTAMP      BIT(0)
@@ -266,7 +343,36 @@ static int ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r);
 static int ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r);
 static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
 static irqreturn_t ptp_ocp_ts_irq(int irq, void *priv);
+static irqreturn_t ptp_ocp_signal_irq(int irq, void *priv);
 static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable);
+static int ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
+                                     struct ptp_perout_request *req);
+static int ptp_ocp_signal_enable(void *priv, u32 req, bool enable);
+static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr);
+
+static const struct ocp_attr_group fb_timecard_groups[];
+
+struct ptp_ocp_eeprom_map {
+       u16     off;
+       u16     len;
+       u32     bp_offset;
+       const void * const tag;
+};
+
+#define EEPROM_ENTRY(addr, member)                             \
+       .off = addr,                                            \
+       .len = sizeof_field(struct ptp_ocp, member),            \
+       .bp_offset = offsetof(struct ptp_ocp, member)
+
+#define BP_MAP_ENTRY_ADDR(bp, map) ({                          \
+       (void *)((uintptr_t)(bp) + (map)->bp_offset);           \
+})
+
+static struct ptp_ocp_eeprom_map fb_eeprom_map[] = {
+       { EEPROM_ENTRY(0x43, board_id) },
+       { EEPROM_ENTRY(0x00, serial), .tag = "mac" },
+       { }
+};
 
 #define bp_assign_entry(bp, res, val) ({                               \
        uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset;            \
@@ -292,10 +398,10 @@ static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable);
        OCP_RES_LOCATION(member), .setup = ptp_ocp_register_ext
 
 /* This is the MSI vector mapping used.
- * 0: TS3 (and PPS)
+ * 0: PPS (TS5)
  * 1: TS0
  * 2: TS1
- * 3: GNSS
+ * 3: GNSS1
  * 4: GNSS2
  * 5: MAC
  * 6: TS2
@@ -303,6 +409,12 @@ static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable);
  * 8: HWICAP (notused)
  * 9: SPI Flash
  * 10: NMEA
+ * 11: Signal Generator 1
+ * 12: Signal Generator 2
+ * 13: Signal Generator 3
+ * 14: Signal Generator 4
+ * 15: TS3
+ * 16: TS4
  */
 
 static struct ocp_resource ocp_fb_resource[] = {
@@ -337,15 +449,70 @@ static struct ocp_resource ocp_fb_resource[] = {
                        .enable = ptp_ocp_ts_enable,
                },
        },
+       {
+               OCP_EXT_RESOURCE(ts3),
+               .offset = 0x01110000, .size = 0x10000, .irq_vec = 15,
+               .extra = &(struct ptp_ocp_ext_info) {
+                       .index = 3,
+                       .irq_fcn = ptp_ocp_ts_irq,
+                       .enable = ptp_ocp_ts_enable,
+               },
+       },
+       {
+               OCP_EXT_RESOURCE(ts4),
+               .offset = 0x01120000, .size = 0x10000, .irq_vec = 16,
+               .extra = &(struct ptp_ocp_ext_info) {
+                       .index = 4,
+                       .irq_fcn = ptp_ocp_ts_irq,
+                       .enable = ptp_ocp_ts_enable,
+               },
+       },
+       /* Timestamp for PHC and/or PPS generator */
        {
                OCP_EXT_RESOURCE(pps),
                .offset = 0x010C0000, .size = 0x10000, .irq_vec = 0,
                .extra = &(struct ptp_ocp_ext_info) {
-                       .index = 3,
+                       .index = 5,
                        .irq_fcn = ptp_ocp_ts_irq,
                        .enable = ptp_ocp_ts_enable,
                },
        },
+       {
+               OCP_EXT_RESOURCE(signal_out[0]),
+               .offset = 0x010D0000, .size = 0x10000, .irq_vec = 11,
+               .extra = &(struct ptp_ocp_ext_info) {
+                       .index = 1,
+                       .irq_fcn = ptp_ocp_signal_irq,
+                       .enable = ptp_ocp_signal_enable,
+               },
+       },
+       {
+               OCP_EXT_RESOURCE(signal_out[1]),
+               .offset = 0x010E0000, .size = 0x10000, .irq_vec = 12,
+               .extra = &(struct ptp_ocp_ext_info) {
+                       .index = 2,
+                       .irq_fcn = ptp_ocp_signal_irq,
+                       .enable = ptp_ocp_signal_enable,
+               },
+       },
+       {
+               OCP_EXT_RESOURCE(signal_out[2]),
+               .offset = 0x010F0000, .size = 0x10000, .irq_vec = 13,
+               .extra = &(struct ptp_ocp_ext_info) {
+                       .index = 3,
+                       .irq_fcn = ptp_ocp_signal_irq,
+                       .enable = ptp_ocp_signal_enable,
+               },
+       },
+       {
+               OCP_EXT_RESOURCE(signal_out[3]),
+               .offset = 0x01100000, .size = 0x10000, .irq_vec = 14,
+               .extra = &(struct ptp_ocp_ext_info) {
+                       .index = 4,
+                       .irq_fcn = ptp_ocp_signal_irq,
+                       .enable = ptp_ocp_signal_enable,
+               },
+       },
        {
                OCP_MEM_RESOURCE(pps_to_ext),
                .offset = 0x01030000, .size = 0x10000,
@@ -387,15 +554,28 @@ static struct ocp_resource ocp_fb_resource[] = {
                .offset = 0x00130000, .size = 0x1000,
        },
        {
-               OCP_MEM_RESOURCE(sma),
+               OCP_MEM_RESOURCE(sma_map1),
                .offset = 0x00140000, .size = 0x1000,
        },
+       {
+               OCP_MEM_RESOURCE(sma_map2),
+               .offset = 0x00220000, .size = 0x1000,
+       },
        {
                OCP_I2C_RESOURCE(i2c_ctrl),
                .offset = 0x00150000, .size = 0x10000, .irq_vec = 7,
                .extra = &(struct ptp_ocp_i2c_info) {
                        .name = "xiic-i2c",
                        .fixed_rate = 50000000,
+                       .data_size = sizeof(struct xiic_i2c_platform_data),
+                       .data = &(struct xiic_i2c_platform_data) {
+                               .num_devices = 2,
+                               .devices = (struct i2c_board_info[]) {
+                                       { I2C_BOARD_INFO("24c02", 0x50) },
+                                       { I2C_BOARD_INFO("24mac402", 0x58),
+                                         .platform_data = "mac" },
+                               },
+                       },
                },
        },
        {
@@ -430,6 +610,22 @@ static struct ocp_resource ocp_fb_resource[] = {
                        },
                },
        },
+       {
+               OCP_MEM_RESOURCE(freq_in[0]),
+               .offset = 0x01200000, .size = 0x10000,
+       },
+       {
+               OCP_MEM_RESOURCE(freq_in[1]),
+               .offset = 0x01210000, .size = 0x10000,
+       },
+       {
+               OCP_MEM_RESOURCE(freq_in[2]),
+               .offset = 0x01220000, .size = 0x10000,
+       },
+       {
+               OCP_MEM_RESOURCE(freq_in[3]),
+               .offset = 0x01230000, .size = 0x10000,
+       },
        {
                .setup = ptp_ocp_fb_board_init,
        },
@@ -463,25 +659,42 @@ static struct ocp_selector ptp_ocp_clock[] = {
        { }
 };
 
+#define SMA_ENABLE             BIT(15)
+#define SMA_SELECT_MASK                ((1U << 15) - 1)
+#define SMA_DISABLE            0x10000
+
 static struct ocp_selector ptp_ocp_sma_in[] = {
-       { .name = "10Mhz",      .value = 0x00 },
-       { .name = "PPS1",       .value = 0x01 },
-       { .name = "PPS2",       .value = 0x02 },
-       { .name = "TS1",        .value = 0x04 },
-       { .name = "TS2",        .value = 0x08 },
-       { .name = "IRIG",       .value = 0x10 },
-       { .name = "DCF",        .value = 0x20 },
+       { .name = "10Mhz",      .value = 0x0000 },
+       { .name = "PPS1",       .value = 0x0001 },
+       { .name = "PPS2",       .value = 0x0002 },
+       { .name = "TS1",        .value = 0x0004 },
+       { .name = "TS2",        .value = 0x0008 },
+       { .name = "IRIG",       .value = 0x0010 },
+       { .name = "DCF",        .value = 0x0020 },
+       { .name = "TS3",        .value = 0x0040 },
+       { .name = "TS4",        .value = 0x0080 },
+       { .name = "FREQ1",      .value = 0x0100 },
+       { .name = "FREQ2",      .value = 0x0200 },
+       { .name = "FREQ3",      .value = 0x0400 },
+       { .name = "FREQ4",      .value = 0x0800 },
+       { .name = "None",       .value = SMA_DISABLE },
        { }
 };
 
 static struct ocp_selector ptp_ocp_sma_out[] = {
-       { .name = "10Mhz",      .value = 0x00 },
-       { .name = "PHC",        .value = 0x01 },
-       { .name = "MAC",        .value = 0x02 },
-       { .name = "GNSS",       .value = 0x04 },
-       { .name = "GNSS2",      .value = 0x08 },
-       { .name = "IRIG",       .value = 0x10 },
-       { .name = "DCF",        .value = 0x20 },
+       { .name = "10Mhz",      .value = 0x0000 },
+       { .name = "PHC",        .value = 0x0001 },
+       { .name = "MAC",        .value = 0x0002 },
+       { .name = "GNSS1",      .value = 0x0004 },
+       { .name = "GNSS2",      .value = 0x0008 },
+       { .name = "IRIG",       .value = 0x0010 },
+       { .name = "DCF",        .value = 0x0020 },
+       { .name = "GEN1",       .value = 0x0040 },
+       { .name = "GEN2",       .value = 0x0080 },
+       { .name = "GEN3",       .value = 0x0100 },
+       { .name = "GEN4",       .value = 0x0200 },
+       { .name = "GND",        .value = 0x2000 },
+       { .name = "VCC",        .value = 0x4000 },
        { }
 };
 
@@ -703,6 +916,12 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
                        ext = bp->ts2;
                        break;
                case 3:
+                       ext = bp->ts3;
+                       break;
+               case 4:
+                       ext = bp->ts4;
+                       break;
+               case 5:
                        ext = bp->pps;
                        break;
                }
@@ -712,13 +931,27 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
                ext = bp->pps;
                break;
        case PTP_CLK_REQ_PEROUT:
-               if (on &&
-                   (rq->perout.period.sec != 1 || rq->perout.period.nsec != 0))
-                       return -EINVAL;
-               /* This is a request for 1PPS on an output SMA.
-                * Allow, but assume manual configuration.
-                */
-               return 0;
+               switch (rq->perout.index) {
+               case 0:
+                       /* This is a request for 1PPS on an output SMA.
+                        * Allow, but assume manual configuration.
+                        */
+                       if (on && (rq->perout.period.sec != 1 ||
+                                  rq->perout.period.nsec != 0))
+                               return -EINVAL;
+                       return 0;
+               case 1:
+               case 2:
+               case 3:
+               case 4:
+                       req = rq->perout.index - 1;
+                       ext = bp->signal_out[req];
+                       err = ptp_ocp_signal_from_perout(bp, req, &rq->perout);
+                       if (err)
+                               return err;
+                       break;
+               }
+               break;
        default:
                return -EOPNOTSUPP;
        }
@@ -730,6 +963,36 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
        return err;
 }
 
+static int
+ptp_ocp_verify(struct ptp_clock_info *ptp_info, unsigned pin,
+              enum ptp_pin_function func, unsigned chan)
+{
+       struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
+       char buf[16];
+
+       switch (func) {
+       case PTP_PF_NONE:
+               snprintf(buf, sizeof(buf), "IN: None");
+               break;
+       case PTP_PF_EXTTS:
+               /* Allow timestamps, but require sysfs configuration. */
+               return 0;
+       case PTP_PF_PEROUT:
+               /* channel 0 is 1PPS from PHC.
+                * channels 1..4 are the frequency generators.
+                */
+               if (chan)
+                       snprintf(buf, sizeof(buf), "OUT: GEN%d", chan);
+               else
+                       snprintf(buf, sizeof(buf), "OUT: PHC");
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return ptp_ocp_sma_store(bp, buf, pin + 1);
+}
+
 static const struct ptp_clock_info ptp_ocp_clock_info = {
        .owner          = THIS_MODULE,
        .name           = KBUILD_MODNAME,
@@ -740,9 +1003,10 @@ static const struct ptp_clock_info ptp_ocp_clock_info = {
        .adjfine        = ptp_ocp_null_adjfine,
        .adjphase       = ptp_ocp_null_adjphase,
        .enable         = ptp_ocp_enable,
+       .verify         = ptp_ocp_verify,
        .pps            = true,
-       .n_ext_ts       = 4,
-       .n_per_out      = 1,
+       .n_ext_ts       = 6,
+       .n_per_out      = 5,
 };
 
 static void
@@ -914,83 +1178,93 @@ ptp_ocp_tod_gnss_name(int idx)
                "ALL", "COMBINED", "GPS", "GLONASS", "GALILEO", "BEIDOU",
                "Unknown"
        };
-       if (idx > ARRAY_SIZE(gnss_name))
+       if (idx >= ARRAY_SIZE(gnss_name))
                idx = ARRAY_SIZE(gnss_name) - 1;
        return gnss_name[idx];
 }
 
+struct ptp_ocp_nvmem_match_info {
+       struct ptp_ocp *bp;
+       const void * const tag;
+};
+
 static int
-ptp_ocp_firstchild(struct device *dev, void *data)
+ptp_ocp_nvmem_match(struct device *dev, const void *data)
 {
-       return 1;
+       const struct ptp_ocp_nvmem_match_info *info = data;
+
+       dev = dev->parent;
+       if (!i2c_verify_client(dev) || info->tag != dev->platform_data)
+               return 0;
+
+       while ((dev = dev->parent))
+               if (dev->driver && !strcmp(dev->driver->name, KBUILD_MODNAME))
+                       return info->bp == dev_get_drvdata(dev);
+       return 0;
 }
 
-static int
-ptp_ocp_read_i2c(struct i2c_adapter *adap, u8 addr, u8 reg, u8 sz, u8 *data)
+static inline struct nvmem_device *
+ptp_ocp_nvmem_device_get(struct ptp_ocp *bp, const void * const tag)
 {
-       struct i2c_msg msgs[2] = {
-               {
-                       .addr = addr,
-                       .len = 1,
-                       .buf = &reg,
-               },
-               {
-                       .addr = addr,
-                       .flags = I2C_M_RD,
-                       .len = 2,
-                       .buf = data,
-               },
-       };
-       int err;
-       u8 len;
-
-       /* xiic-i2c for some stupid reason only does 2 byte reads. */
-       while (sz) {
-               len = min_t(u8, sz, 2);
-               msgs[1].len = len;
-               err = i2c_transfer(adap, msgs, 2);
-               if (err != msgs[1].len)
-                       return err;
-               msgs[1].buf += len;
-               reg += len;
-               sz -= len;
+       struct ptp_ocp_nvmem_match_info info = { .bp = bp, .tag = tag };
+
+       return nvmem_device_find(&info, ptp_ocp_nvmem_match);
+}
+
+static inline void
+ptp_ocp_nvmem_device_put(struct nvmem_device **nvmemp)
+{
+       if (*nvmemp != NULL) {
+               nvmem_device_put(*nvmemp);
+               *nvmemp = NULL;
        }
-       return 0;
 }
 
 static void
-ptp_ocp_get_serial_number(struct ptp_ocp *bp)
+ptp_ocp_read_eeprom(struct ptp_ocp *bp)
 {
-       struct i2c_adapter *adap;
-       struct device *dev;
-       int err;
+       const struct ptp_ocp_eeprom_map *map;
+       struct nvmem_device *nvmem;
+       const void *tag;
+       int ret;
 
        if (!bp->i2c_ctrl)
                return;
 
-       dev = device_find_child(&bp->i2c_ctrl->dev, NULL, ptp_ocp_firstchild);
-       if (!dev) {
-               dev_err(&bp->pdev->dev, "Can't find I2C adapter\n");
-               return;
-       }
-
-       adap = i2c_verify_adapter(dev);
-       if (!adap) {
-               dev_err(&bp->pdev->dev, "device '%s' isn't an I2C adapter\n",
-                       dev_name(dev));
-               goto out;
-       }
+       tag = NULL;
+       nvmem = NULL;
 
-       err = ptp_ocp_read_i2c(adap, 0x58, 0x9A, 6, bp->serial);
-       if (err) {
-               dev_err(&bp->pdev->dev, "could not read eeprom: %d\n", err);
-               goto out;
+       for (map = bp->eeprom_map; map->len; map++) {
+               if (map->tag != tag) {
+                       tag = map->tag;
+                       ptp_ocp_nvmem_device_put(&nvmem);
+               }
+               if (!nvmem) {
+                       nvmem = ptp_ocp_nvmem_device_get(bp, tag);
+                       if (!nvmem)
+                               goto out;
+               }
+               ret = nvmem_device_read(nvmem, map->off, map->len,
+                                       BP_MAP_ENTRY_ADDR(bp, map));
+               if (ret != map->len)
+                       goto read_fail;
        }
 
-       bp->has_serial = true;
+       bp->has_eeprom_data = true;
 
 out:
-       put_device(dev);
+       ptp_ocp_nvmem_device_put(&nvmem);
+       return;
+
+read_fail:
+       dev_err(&bp->pdev->dev, "could not read eeprom: %d\n", ret);
+       goto out;
+}
+
+static int
+ptp_ocp_firstchild(struct device *dev, void *data)
+{
+       return 1;
 }
 
 static struct device *
@@ -1091,34 +1365,33 @@ ptp_ocp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
        if (err)
                return err;
 
-       if (bp->image) {
-               u32 ver = ioread32(&bp->image->version);
-
-               if (ver & 0xffff) {
-                       sprintf(buf, "%d", ver);
-                       err = devlink_info_version_running_put(req,
-                                                              "fw",
-                                                              buf);
-               } else {
-                       sprintf(buf, "%d", ver >> 16);
-                       err = devlink_info_version_running_put(req,
-                                                              "loader",
-                                                              buf);
-               }
-               if (err)
-                       return err;
+       if (bp->fw_version & 0xffff) {
+               sprintf(buf, "%d", bp->fw_version);
+               err = devlink_info_version_running_put(req, "fw", buf);
+       } else {
+               sprintf(buf, "%d", bp->fw_version >> 16);
+               err = devlink_info_version_running_put(req, "loader", buf);
        }
+       if (err)
+               return err;
 
-       if (!bp->has_serial)
-               ptp_ocp_get_serial_number(bp);
-
-       if (bp->has_serial) {
-               sprintf(buf, "%pM", bp->serial);
-               err = devlink_info_serial_number_put(req, buf);
-               if (err)
-                       return err;
+       if (!bp->has_eeprom_data) {
+               ptp_ocp_read_eeprom(bp);
+               if (!bp->has_eeprom_data)
+                       return 0;
        }
 
+       sprintf(buf, "%pM", bp->serial);
+       err = devlink_info_serial_number_put(req, buf);
+       if (err)
+               return err;
+
+       err = devlink_info_version_fixed_put(req,
+                       DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+                       bp->board_id);
+       if (err)
+               return err;
+
        return 0;
 }
 
@@ -1235,6 +1508,137 @@ ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r)
        return 0;
 }
 
+/* The expectation is that this is triggered only on error. */
+static irqreturn_t
+ptp_ocp_signal_irq(int irq, void *priv)
+{
+       struct ptp_ocp_ext_src *ext = priv;
+       struct signal_reg __iomem *reg = ext->mem;
+       struct ptp_ocp *bp = ext->bp;
+       u32 enable, status;
+       int gen;
+
+       gen = ext->info->index - 1;
+
+       enable = ioread32(&reg->enable);
+       status = ioread32(&reg->status);
+
+       /* disable generator on error */
+       if (status || !enable) {
+               iowrite32(0, &reg->intr_mask);
+               iowrite32(0, &reg->enable);
+               bp->signal[gen].running = false;
+       }
+
+       iowrite32(0, &reg->intr);       /* ack interrupt */
+
+       return IRQ_HANDLED;
+}
+
+static int
+ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s)
+{
+       struct ptp_system_timestamp sts;
+       struct timespec64 ts;
+       ktime_t start_ns;
+       int err;
+
+       if (!s->period)
+               return 0;
+
+       if (!s->pulse)
+               s->pulse = ktime_divns(s->period * s->duty, 100);
+
+       err = ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts);
+       if (err)
+               return err;
+
+       start_ns = ktime_set(ts.tv_sec, ts.tv_nsec) + NSEC_PER_MSEC;
+       if (!s->start) {
+               /* roundup() does not work on 32-bit systems */
+               s->start = DIV_ROUND_UP_ULL(start_ns, s->period);
+               s->start = ktime_add(s->start, s->phase);
+       }
+
+       if (s->duty < 1 || s->duty > 99)
+               return -EINVAL;
+
+       if (s->pulse < 1 || s->pulse > s->period)
+               return -EINVAL;
+
+       if (s->start < start_ns)
+               return -EINVAL;
+
+       bp->signal[gen] = *s;
+
+       return 0;
+}
+
+static int
+ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
+                          struct ptp_perout_request *req)
+{
+       struct ptp_ocp_signal s = { };
+
+       s.polarity = bp->signal[gen].polarity;
+       s.period = ktime_set(req->period.sec, req->period.nsec);
+       if (!s.period)
+               return 0;
+
+       if (req->flags & PTP_PEROUT_DUTY_CYCLE) {
+               s.pulse = ktime_set(req->on.sec, req->on.nsec);
+               s.duty = ktime_divns(s.pulse * 100, s.period);
+       }
+
+       if (req->flags & PTP_PEROUT_PHASE)
+               s.phase = ktime_set(req->phase.sec, req->phase.nsec);
+       else
+               s.start = ktime_set(req->start.sec, req->start.nsec);
+
+       return ptp_ocp_signal_set(bp, gen, &s);
+}
+
+static int
+ptp_ocp_signal_enable(void *priv, u32 req, bool enable)
+{
+       struct ptp_ocp_ext_src *ext = priv;
+       struct signal_reg __iomem *reg = ext->mem;
+       struct ptp_ocp *bp = ext->bp;
+       struct timespec64 ts;
+       int gen;
+
+       gen = ext->info->index - 1;
+
+       iowrite32(0, &reg->intr_mask);
+       iowrite32(0, &reg->enable);
+       bp->signal[gen].running = false;
+       if (!enable)
+               return 0;
+
+       ts = ktime_to_timespec64(bp->signal[gen].start);
+       iowrite32(ts.tv_sec, &reg->start_sec);
+       iowrite32(ts.tv_nsec, &reg->start_ns);
+
+       ts = ktime_to_timespec64(bp->signal[gen].period);
+       iowrite32(ts.tv_sec, &reg->period_sec);
+       iowrite32(ts.tv_nsec, &reg->period_ns);
+
+       ts = ktime_to_timespec64(bp->signal[gen].pulse);
+       iowrite32(ts.tv_sec, &reg->pulse_sec);
+       iowrite32(ts.tv_nsec, &reg->pulse_ns);
+
+       iowrite32(bp->signal[gen].polarity, &reg->polarity);
+       iowrite32(0, &reg->repeat_count);
+
+       iowrite32(0, &reg->intr);               /* clear interrupt state */
+       iowrite32(1, &reg->intr_mask);          /* enable interrupt */
+       iowrite32(3, &reg->enable);             /* valid & enable */
+
+       bp->signal[gen].running = true;
+
+       return 0;
+}
+
 static irqreturn_t
 ptp_ocp_ts_irq(int irq, void *priv)
 {
@@ -1362,7 +1766,7 @@ ptp_ocp_serial_line(struct ptp_ocp *bp, struct ocp_resource *r)
        uart.port.mapbase = pci_resource_start(pdev, 0) + r->offset;
        uart.port.irq = pci_irq_vector(pdev, r->irq_vec);
        uart.port.uartclk = 50000000;
-       uart.port.flags = UPF_FIXED_TYPE | UPF_IOREMAP;
+       uart.port.flags = UPF_FIXED_TYPE | UPF_IOREMAP | UPF_NO_THRE_TEST;
        uart.port.type = PORT_16550A;
 
        return serial8250_register_8250_port(&uart);
@@ -1407,31 +1811,132 @@ ptp_ocp_nmea_out_init(struct ptp_ocp *bp)
        iowrite32(1, &bp->nmea_out->ctrl);              /* enable */
 }
 
-/* FB specific board initializers; last "resource" registered. */
-static int
-ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+static void
+_ptp_ocp_signal_init(struct ptp_ocp_signal *s, struct signal_reg __iomem *reg)
 {
-       bp->flash_start = 1024 * 4096;
+       u32 val;
 
-       ptp_ocp_tod_init(bp);
-       ptp_ocp_nmea_out_init(bp);
+       iowrite32(0, &reg->enable);             /* disable */
 
-       return ptp_ocp_init_clock(bp);
+       val = ioread32(&reg->polarity);
+       s->polarity = val ? true : false;
+       s->duty = 50;
 }
 
-static bool
-ptp_ocp_allow_irq(struct ptp_ocp *bp, struct ocp_resource *r)
+static void
+ptp_ocp_signal_init(struct ptp_ocp *bp)
 {
-       bool allow = !r->irq_vec || r->irq_vec < bp->n_irqs;
+       int i;
 
-       if (!allow)
-               dev_err(&bp->pdev->dev, "irq %d out of range, skipping %s\n",
-                       r->irq_vec, r->name);
-       return allow;
+       for (i = 0; i < 4; i++)
+               if (bp->signal_out[i])
+                       _ptp_ocp_signal_init(&bp->signal[i],
+                                            bp->signal_out[i]->mem);
 }
 
-static int
-ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
+static void
+ptp_ocp_sma_init(struct ptp_ocp *bp)
+{
+       u32 reg;
+       int i;
+
+       /* defaults */
+       bp->sma[0].mode = SMA_MODE_IN;
+       bp->sma[1].mode = SMA_MODE_IN;
+       bp->sma[2].mode = SMA_MODE_OUT;
+       bp->sma[3].mode = SMA_MODE_OUT;
+
+       /* If no SMA1 map, the pin functions and directions are fixed. */
+       if (!bp->sma_map1) {
+               for (i = 0; i < 4; i++) {
+                       bp->sma[i].fixed_fcn = true;
+                       bp->sma[i].fixed_dir = true;
+               }
+               return;
+       }
+
+       /* If SMA2 GPIO output map is all 1, it is not present.
+        * This indicates the firmware has fixed direction SMA pins.
+        */
+       reg = ioread32(&bp->sma_map2->gpio2);
+       if (reg == 0xffffffff) {
+               for (i = 0; i < 4; i++)
+                       bp->sma[i].fixed_dir = true;
+       } else {
+               reg = ioread32(&bp->sma_map1->gpio1);
+               bp->sma[0].mode = reg & BIT(15) ? SMA_MODE_IN : SMA_MODE_OUT;
+               bp->sma[1].mode = reg & BIT(31) ? SMA_MODE_IN : SMA_MODE_OUT;
+
+               reg = ioread32(&bp->sma_map1->gpio2);
+               bp->sma[2].mode = reg & BIT(15) ? SMA_MODE_OUT : SMA_MODE_IN;
+               bp->sma[3].mode = reg & BIT(31) ? SMA_MODE_OUT : SMA_MODE_IN;
+       }
+}
+
+static int
+ptp_ocp_fb_set_pins(struct ptp_ocp *bp)
+{
+       struct ptp_pin_desc *config;
+       int i;
+
+       config = kzalloc(sizeof(*config) * 4, GFP_KERNEL);
+       if (!config)
+               return -ENOMEM;
+
+       for (i = 0; i < 4; i++) {
+               sprintf(config[i].name, "sma%d", i + 1);
+               config[i].index = i;
+       }
+
+       bp->ptp_info.n_pins = 4;
+       bp->ptp_info.pin_config = config;
+
+       return 0;
+}
+
+/* FB specific board initializers; last "resource" registered. */
+static int
+ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+       int ver, err;
+
+       bp->flash_start = 1024 * 4096;
+       bp->eeprom_map = fb_eeprom_map;
+       bp->fw_version = ioread32(&bp->image->version);
+       bp->attr_tbl = fb_timecard_groups;
+       bp->fw_cap = OCP_CAP_BASIC;
+
+       ver = bp->fw_version & 0xffff;
+       if (ver >= 19)
+               bp->fw_cap |= OCP_CAP_SIGNAL;
+       if (ver >= 20)
+               bp->fw_cap |= OCP_CAP_FREQ;
+
+       ptp_ocp_tod_init(bp);
+       ptp_ocp_nmea_out_init(bp);
+       ptp_ocp_sma_init(bp);
+       ptp_ocp_signal_init(bp);
+
+       err = ptp_ocp_fb_set_pins(bp);
+       if (err)
+               return err;
+
+       return ptp_ocp_init_clock(bp);
+}
+
+static bool
+ptp_ocp_allow_irq(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+       bool allow = !r->irq_vec || r->irq_vec < bp->n_irqs;
+
+       if (!allow)
+               dev_err(&bp->pdev->dev, "irq %d out of range, skipping %s\n",
+                       r->irq_vec, r->name);
+       return allow;
+}
+
+static int
+ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
 {
        struct ocp_resource *r, *table;
        int err = 0;
@@ -1516,38 +2021,8 @@ __handle_signal_inputs(struct ptp_ocp *bp, u32 val)
  * ANT4 == sma4 (out)
  */
 
-enum ptp_ocp_sma_mode {
-       SMA_MODE_IN,
-       SMA_MODE_OUT,
-};
-
-static struct ptp_ocp_sma_connector {
-       enum    ptp_ocp_sma_mode mode;
-       bool    fixed_mode;
-       u16     default_out_idx;
-} ptp_ocp_sma_map[4] = {
-       {
-               .mode = SMA_MODE_IN,
-               .fixed_mode = true,
-       },
-       {
-               .mode = SMA_MODE_IN,
-               .fixed_mode = true,
-       },
-       {
-               .mode = SMA_MODE_OUT,
-               .fixed_mode = true,
-               .default_out_idx = 0,           /* 10Mhz */
-       },
-       {
-               .mode = SMA_MODE_OUT,
-               .fixed_mode = true,
-               .default_out_idx = 1,           /* PHC */
-       },
-};
-
 static ssize_t
-ptp_ocp_show_output(u32 val, char *buf, int default_idx)
+ptp_ocp_show_output(u32 val, char *buf, int def_val)
 {
        const char *name;
        ssize_t count;
@@ -1555,13 +2030,13 @@ ptp_ocp_show_output(u32 val, char *buf, int default_idx)
        count = sysfs_emit(buf, "OUT: ");
        name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, val);
        if (!name)
-               name = ptp_ocp_sma_out[default_idx].name;
+               name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, def_val);
        count += sysfs_emit_at(buf, count, "%s\n", name);
        return count;
 }
 
 static ssize_t
-ptp_ocp_show_inputs(u32 val, char *buf, const char *zero_in)
+ptp_ocp_show_inputs(u32 val, char *buf, int def_val)
 {
        const char *name;
        ssize_t count;
@@ -1574,8 +2049,10 @@ ptp_ocp_show_inputs(u32 val, char *buf, const char *zero_in)
                        count += sysfs_emit_at(buf, count, "%s ", name);
                }
        }
-       if (!val && zero_in)
-               count += sysfs_emit_at(buf, count, "%s ", zero_in);
+       if (!val && def_val >= 0) {
+               name = ptp_ocp_select_name_from_val(ptp_ocp_sma_in, def_val);
+               count += sysfs_emit_at(buf, count, "%s ", name);
+       }
        if (count)
                count--;
        count += sysfs_emit_at(buf, count, "\n");
@@ -1600,7 +2077,7 @@ sma_parse_inputs(const char *buf, enum ptp_ocp_sma_mode *mode)
 
        idx = 0;
        dir = *mode == SMA_MODE_IN ? 0 : 1;
-       if (!strcasecmp("IN:", argv[idx])) {
+       if (!strcasecmp("IN:", argv[0])) {
                dir = 0;
                idx++;
        }
@@ -1621,102 +2098,126 @@ out:
        return ret;
 }
 
+static u32
+ptp_ocp_sma_get(struct ptp_ocp *bp, int sma_nr, enum ptp_ocp_sma_mode mode)
+{
+       u32 __iomem *gpio;
+       u32 shift;
+
+       if (bp->sma[sma_nr - 1].fixed_fcn)
+               return (sma_nr - 1) & 1;
+
+       if (mode == SMA_MODE_IN)
+               gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1;
+       else
+               gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2;
+       shift = sma_nr & 1 ? 0 : 16;
+
+       return (ioread32(gpio) >> shift) & 0xffff;
+}
+
 static ssize_t
-ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, u32 val, char *buf,
-                const char *zero_in)
+ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, char *buf,
+                int default_in_val, int default_out_val)
 {
-       struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1];
+       struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1];
+       u32 val;
 
-       if (sma->mode == SMA_MODE_IN)
-               return ptp_ocp_show_inputs(val, buf, zero_in);
+       val = ptp_ocp_sma_get(bp, sma_nr, sma->mode) & SMA_SELECT_MASK;
 
-       return ptp_ocp_show_output(val, buf, sma->default_out_idx);
+       if (sma->mode == SMA_MODE_IN) {
+               if (sma->disabled)
+                       val = SMA_DISABLE;
+               return ptp_ocp_show_inputs(val, buf, default_in_val);
+       }
+
+       return ptp_ocp_show_output(val, buf, default_out_val);
 }
 
 static ssize_t
 sma1_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct ptp_ocp *bp = dev_get_drvdata(dev);
-       u32 val;
 
-       val = ioread32(&bp->sma->gpio1) & 0x3f;
-       return ptp_ocp_sma_show(bp, 1, val, buf, ptp_ocp_sma_in[0].name);
+       return ptp_ocp_sma_show(bp, 1, buf, 0, 1);
 }
 
 static ssize_t
 sma2_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct ptp_ocp *bp = dev_get_drvdata(dev);
-       u32 val;
 
-       val = (ioread32(&bp->sma->gpio1) >> 16) & 0x3f;
-       return ptp_ocp_sma_show(bp, 2, val, buf, NULL);
+       return ptp_ocp_sma_show(bp, 2, buf, -1, 1);
 }
 
 static ssize_t
 sma3_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct ptp_ocp *bp = dev_get_drvdata(dev);
-       u32 val;
 
-       val = ioread32(&bp->sma->gpio2) & 0x3f;
-       return ptp_ocp_sma_show(bp, 3, val, buf, NULL);
+       return ptp_ocp_sma_show(bp, 3, buf, -1, 0);
 }
 
 static ssize_t
 sma4_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct ptp_ocp *bp = dev_get_drvdata(dev);
-       u32 val;
 
-       val = (ioread32(&bp->sma->gpio2) >> 16) & 0x3f;
-       return ptp_ocp_sma_show(bp, 4, val, buf, NULL);
+       return ptp_ocp_sma_show(bp, 4, buf, -1, 1);
 }
 
 static void
-ptp_ocp_sma_store_output(struct ptp_ocp *bp, u32 val, u32 shift)
+ptp_ocp_sma_store_output(struct ptp_ocp *bp, int sma_nr, u32 val)
 {
+       u32 reg, mask, shift;
        unsigned long flags;
-       u32 gpio, mask;
+       u32 __iomem *gpio;
+
+       gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2;
+       shift = sma_nr & 1 ? 0 : 16;
 
        mask = 0xffff << (16 - shift);
 
        spin_lock_irqsave(&bp->lock, flags);
 
-       gpio = ioread32(&bp->sma->gpio2);
-       gpio = (gpio & mask) | (val << shift);
+       reg = ioread32(gpio);
+       reg = (reg & mask) | (val << shift);
 
-       __handle_signal_outputs(bp, gpio);
+       __handle_signal_outputs(bp, reg);
 
-       iowrite32(gpio, &bp->sma->gpio2);
+       iowrite32(reg, gpio);
 
        spin_unlock_irqrestore(&bp->lock, flags);
 }
 
 static void
-ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, u32 val, u32 shift)
+ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, int sma_nr, u32 val)
 {
+       u32 reg, mask, shift;
        unsigned long flags;
-       u32 gpio, mask;
+       u32 __iomem *gpio;
+
+       gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1;
+       shift = sma_nr & 1 ? 0 : 16;
 
        mask = 0xffff << (16 - shift);
 
        spin_lock_irqsave(&bp->lock, flags);
 
-       gpio = ioread32(&bp->sma->gpio1);
-       gpio = (gpio & mask) | (val << shift);
+       reg = ioread32(gpio);
+       reg = (reg & mask) | (val << shift);
 
-       __handle_signal_inputs(bp, gpio);
+       __handle_signal_inputs(bp, reg);
 
-       iowrite32(gpio, &bp->sma->gpio1);
+       iowrite32(reg, gpio);
 
        spin_unlock_irqrestore(&bp->lock, flags);
 }
 
-static ssize_t
-ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr, u32 shift)
+static int
+ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr)
 {
-       struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1];
+       struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1];
        enum ptp_ocp_sma_mode mode;
        int val;
 
@@ -1725,18 +2226,35 @@ ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr, u32 shift)
        if (val < 0)
                return val;
 
-       if (mode != sma->mode && sma->fixed_mode)
+       if (sma->fixed_dir && (mode != sma->mode || val & SMA_DISABLE))
                return -EOPNOTSUPP;
 
+       if (sma->fixed_fcn) {
+               if (val != ((sma_nr - 1) & 1))
+                       return -EOPNOTSUPP;
+               return 0;
+       }
+
+       sma->disabled = !!(val & SMA_DISABLE);
+
        if (mode != sma->mode) {
-               pr_err("Mode changes not supported yet.\n");
-               return -EOPNOTSUPP;
+               if (mode == SMA_MODE_IN)
+                       ptp_ocp_sma_store_output(bp, sma_nr, 0);
+               else
+                       ptp_ocp_sma_store_inputs(bp, sma_nr, 0);
+               sma->mode = mode;
        }
 
-       if (sma->mode == SMA_MODE_IN)
-               ptp_ocp_sma_store_inputs(bp, val, shift);
+       if (!sma->fixed_dir)
+               val |= SMA_ENABLE;              /* add enable bit */
+
+       if (sma->disabled)
+               val = 0;
+
+       if (mode == SMA_MODE_IN)
+               ptp_ocp_sma_store_inputs(bp, sma_nr, val);
        else
-               ptp_ocp_sma_store_output(bp, val, shift);
+               ptp_ocp_sma_store_output(bp, sma_nr, val);
 
        return 0;
 }
@@ -1748,7 +2266,7 @@ sma1_store(struct device *dev, struct device_attribute *attr,
        struct ptp_ocp *bp = dev_get_drvdata(dev);
        int err;
 
-       err = ptp_ocp_sma_store(bp, buf, 1, 0);
+       err = ptp_ocp_sma_store(bp, buf, 1);
        return err ? err : count;
 }
 
@@ -1759,7 +2277,7 @@ sma2_store(struct device *dev, struct device_attribute *attr,
        struct ptp_ocp *bp = dev_get_drvdata(dev);
        int err;
 
-       err = ptp_ocp_sma_store(bp, buf, 2, 16);
+       err = ptp_ocp_sma_store(bp, buf, 2);
        return err ? err : count;
 }
 
@@ -1770,7 +2288,7 @@ sma3_store(struct device *dev, struct device_attribute *attr,
        struct ptp_ocp *bp = dev_get_drvdata(dev);
        int err;
 
-       err = ptp_ocp_sma_store(bp, buf, 3, 0);
+       err = ptp_ocp_sma_store(bp, buf, 3);
        return err ? err : count;
 }
 
@@ -1781,7 +2299,7 @@ sma4_store(struct device *dev, struct device_attribute *attr,
        struct ptp_ocp *bp = dev_get_drvdata(dev);
        int err;
 
-       err = ptp_ocp_sma_store(bp, buf, 4, 16);
+       err = ptp_ocp_sma_store(bp, buf, 4);
        return err ? err : count;
 }
 static DEVICE_ATTR_RW(sma1);
@@ -1805,13 +2323,263 @@ available_sma_outputs_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(available_sma_outputs);
 
+#define EXT_ATTR_RO(_group, _name, _val)                               \
+       struct dev_ext_attribute dev_attr_##_group##_val##_##_name =    \
+               { __ATTR_RO(_name), (void *)_val }
+#define EXT_ATTR_RW(_group, _name, _val)                               \
+       struct dev_ext_attribute dev_attr_##_group##_val##_##_name =    \
+               { __ATTR_RW(_name), (void *)_val }
+#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
+
+/* period [duty [phase [polarity]]] */
+static ssize_t
+signal_store(struct device *dev, struct device_attribute *attr,
+            const char *buf, size_t count)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       struct ptp_ocp_signal s = { };
+       int gen = (uintptr_t)ea->var;
+       int argc, err;
+       char **argv;
+
+       argv = argv_split(GFP_KERNEL, buf, &argc);
+       if (!argv)
+               return -ENOMEM;
+
+       err = -EINVAL;
+       s.duty = bp->signal[gen].duty;
+       s.phase = bp->signal[gen].phase;
+       s.period = bp->signal[gen].period;
+       s.polarity = bp->signal[gen].polarity;
+
+       switch (argc) {
+       case 4:
+               argc--;
+               err = kstrtobool(argv[argc], &s.polarity);
+               if (err)
+                       goto out;
+               fallthrough;
+       case 3:
+               argc--;
+               err = kstrtou64(argv[argc], 0, &s.phase);
+               if (err)
+                       goto out;
+               fallthrough;
+       case 2:
+               argc--;
+               err = kstrtoint(argv[argc], 0, &s.duty);
+               if (err)
+                       goto out;
+               fallthrough;
+       case 1:
+               argc--;
+               err = kstrtou64(argv[argc], 0, &s.period);
+               if (err)
+                       goto out;
+               break;
+       default:
+               goto out;
+       }
+
+       err = ptp_ocp_signal_set(bp, gen, &s);
+       if (err)
+               goto out;
+
+       err = ptp_ocp_signal_enable(bp->signal_out[gen], gen, s.period != 0);
+
+out:
+       argv_free(argv);
+       return err ? err : count;
+}
+
+static ssize_t
+signal_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       struct ptp_ocp_signal *signal;
+       struct timespec64 ts;
+       ssize_t count;
+       int i;
+
+       i = (uintptr_t)ea->var;
+       signal = &bp->signal[i];
+
+       count = sysfs_emit(buf, "%llu %d %llu %d", signal->period,
+                          signal->duty, signal->phase, signal->polarity);
+
+       ts = ktime_to_timespec64(signal->start);
+       count += sysfs_emit_at(buf, count, " %ptT TAI\n", &ts);
+
+       return count;
+}
+static EXT_ATTR_RW(signal, signal, 0);
+static EXT_ATTR_RW(signal, signal, 1);
+static EXT_ATTR_RW(signal, signal, 2);
+static EXT_ATTR_RW(signal, signal, 3);
+
+static ssize_t
+duty_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       int i = (uintptr_t)ea->var;
+
+       return sysfs_emit(buf, "%d\n", bp->signal[i].duty);
+}
+static EXT_ATTR_RO(signal, duty, 0);
+static EXT_ATTR_RO(signal, duty, 1);
+static EXT_ATTR_RO(signal, duty, 2);
+static EXT_ATTR_RO(signal, duty, 3);
+
+static ssize_t
+period_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       int i = (uintptr_t)ea->var;
+
+       return sysfs_emit(buf, "%llu\n", bp->signal[i].period);
+}
+static EXT_ATTR_RO(signal, period, 0);
+static EXT_ATTR_RO(signal, period, 1);
+static EXT_ATTR_RO(signal, period, 2);
+static EXT_ATTR_RO(signal, period, 3);
+
+static ssize_t
+phase_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       int i = (uintptr_t)ea->var;
+
+       return sysfs_emit(buf, "%llu\n", bp->signal[i].phase);
+}
+static EXT_ATTR_RO(signal, phase, 0);
+static EXT_ATTR_RO(signal, phase, 1);
+static EXT_ATTR_RO(signal, phase, 2);
+static EXT_ATTR_RO(signal, phase, 3);
+
+static ssize_t
+polarity_show(struct device *dev, struct device_attribute *attr,
+             char *buf)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       int i = (uintptr_t)ea->var;
+
+       return sysfs_emit(buf, "%d\n", bp->signal[i].polarity);
+}
+static EXT_ATTR_RO(signal, polarity, 0);
+static EXT_ATTR_RO(signal, polarity, 1);
+static EXT_ATTR_RO(signal, polarity, 2);
+static EXT_ATTR_RO(signal, polarity, 3);
+
+static ssize_t
+running_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       int i = (uintptr_t)ea->var;
+
+       return sysfs_emit(buf, "%d\n", bp->signal[i].running);
+}
+static EXT_ATTR_RO(signal, running, 0);
+static EXT_ATTR_RO(signal, running, 1);
+static EXT_ATTR_RO(signal, running, 2);
+static EXT_ATTR_RO(signal, running, 3);
+
+static ssize_t
+start_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       int i = (uintptr_t)ea->var;
+       struct timespec64 ts;
+
+       ts = ktime_to_timespec64(bp->signal[i].start);
+       return sysfs_emit(buf, "%llu.%lu\n", ts.tv_sec, ts.tv_nsec);
+}
+static EXT_ATTR_RO(signal, start, 0);
+static EXT_ATTR_RO(signal, start, 1);
+static EXT_ATTR_RO(signal, start, 2);
+static EXT_ATTR_RO(signal, start, 3);
+
+static ssize_t
+seconds_store(struct device *dev, struct device_attribute *attr,
+             const char *buf, size_t count)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       int idx = (uintptr_t)ea->var;
+       u32 val;
+       int err;
+
+       err = kstrtou32(buf, 0, &val);
+       if (err)
+               return err;
+       if (val > 0xff)
+               return -EINVAL;
+
+       if (val)
+               val = (val << 8) | 0x1;
+
+       iowrite32(val, &bp->freq_in[idx]->ctrl);
+
+       return count;
+}
+
+static ssize_t
+seconds_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       int idx = (uintptr_t)ea->var;
+       u32 val;
+
+       val = ioread32(&bp->freq_in[idx]->ctrl);
+       if (val & 1)
+               val = (val >> 8) & 0xff;
+       else
+               val = 0;
+
+       return sysfs_emit(buf, "%u\n", val);
+}
+static EXT_ATTR_RW(freq, seconds, 0);
+static EXT_ATTR_RW(freq, seconds, 1);
+static EXT_ATTR_RW(freq, seconds, 2);
+static EXT_ATTR_RW(freq, seconds, 3);
+
+static ssize_t
+frequency_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct dev_ext_attribute *ea = to_ext_attr(attr);
+       struct ptp_ocp *bp = dev_get_drvdata(dev);
+       int idx = (uintptr_t)ea->var;
+       u32 val;
+
+       val = ioread32(&bp->freq_in[idx]->status);
+       if (val & FREQ_STATUS_ERROR)
+               return sysfs_emit(buf, "error\n");
+       if (val & FREQ_STATUS_OVERRUN)
+               return sysfs_emit(buf, "overrun\n");
+       if (val & FREQ_STATUS_VALID)
+               return sysfs_emit(buf, "%lu\n", val & FREQ_STATUS_MASK);
+       return 0;
+}
+static EXT_ATTR_RO(freq, frequency, 0);
+static EXT_ATTR_RO(freq, frequency, 1);
+static EXT_ATTR_RO(freq, frequency, 2);
+static EXT_ATTR_RO(freq, frequency, 3);
+
 static ssize_t
 serialnum_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct ptp_ocp *bp = dev_get_drvdata(dev);
 
-       if (!bp->has_serial)
-               ptp_ocp_get_serial_number(bp);
+       if (!bp->has_eeprom_data)
+               ptp_ocp_read_eeprom(bp);
 
        return sysfs_emit(buf, "%pM\n", bp->serial);
 }
@@ -2039,7 +2807,52 @@ tod_correction_store(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RW(tod_correction);
 
-static struct attribute *timecard_attrs[] = {
+#define _DEVICE_SIGNAL_GROUP_ATTRS(_nr)                                        \
+       static struct attribute *fb_timecard_signal##_nr##_attrs[] = {  \
+               &dev_attr_signal##_nr##_signal.attr.attr,               \
+               &dev_attr_signal##_nr##_duty.attr.attr,                 \
+               &dev_attr_signal##_nr##_phase.attr.attr,                \
+               &dev_attr_signal##_nr##_period.attr.attr,               \
+               &dev_attr_signal##_nr##_polarity.attr.attr,             \
+               &dev_attr_signal##_nr##_running.attr.attr,              \
+               &dev_attr_signal##_nr##_start.attr.attr,                \
+               NULL,                                                   \
+       }
+
+#define DEVICE_SIGNAL_GROUP(_name, _nr)                                        \
+       _DEVICE_SIGNAL_GROUP_ATTRS(_nr);                                \
+       static const struct attribute_group                             \
+                       fb_timecard_signal##_nr##_group = {             \
+               .name = #_name,                                         \
+               .attrs = fb_timecard_signal##_nr##_attrs,               \
+}
+
+DEVICE_SIGNAL_GROUP(gen1, 0);
+DEVICE_SIGNAL_GROUP(gen2, 1);
+DEVICE_SIGNAL_GROUP(gen3, 2);
+DEVICE_SIGNAL_GROUP(gen4, 3);
+
+#define _DEVICE_FREQ_GROUP_ATTRS(_nr)                                  \
+       static struct attribute *fb_timecard_freq##_nr##_attrs[] = {    \
+               &dev_attr_freq##_nr##_seconds.attr.attr,                \
+               &dev_attr_freq##_nr##_frequency.attr.attr,              \
+               NULL,                                                   \
+       }
+
+#define DEVICE_FREQ_GROUP(_name, _nr)                                  \
+       _DEVICE_FREQ_GROUP_ATTRS(_nr);                                  \
+       static const struct attribute_group                             \
+                       fb_timecard_freq##_nr##_group = {               \
+               .name = #_name,                                         \
+               .attrs = fb_timecard_freq##_nr##_attrs,                 \
+}
+
+DEVICE_FREQ_GROUP(freq1, 0);
+DEVICE_FREQ_GROUP(freq2, 1);
+DEVICE_FREQ_GROUP(freq3, 2);
+DEVICE_FREQ_GROUP(freq4, 3);
+
+static struct attribute *fb_timecard_attrs[] = {
        &dev_attr_serialnum.attr,
        &dev_attr_gnss_sync.attr,
        &dev_attr_clock_source.attr,
@@ -2058,33 +2871,111 @@ static struct attribute *timecard_attrs[] = {
        &dev_attr_tod_correction.attr,
        NULL,
 };
-ATTRIBUTE_GROUPS(timecard);
+static const struct attribute_group fb_timecard_group = {
+       .attrs = fb_timecard_attrs,
+};
+static const struct ocp_attr_group fb_timecard_groups[] = {
+       { .cap = OCP_CAP_BASIC,     .group = &fb_timecard_group },
+       { .cap = OCP_CAP_SIGNAL,    .group = &fb_timecard_signal0_group },
+       { .cap = OCP_CAP_SIGNAL,    .group = &fb_timecard_signal1_group },
+       { .cap = OCP_CAP_SIGNAL,    .group = &fb_timecard_signal2_group },
+       { .cap = OCP_CAP_SIGNAL,    .group = &fb_timecard_signal3_group },
+       { .cap = OCP_CAP_FREQ,      .group = &fb_timecard_freq0_group },
+       { .cap = OCP_CAP_FREQ,      .group = &fb_timecard_freq1_group },
+       { .cap = OCP_CAP_FREQ,      .group = &fb_timecard_freq2_group },
+       { .cap = OCP_CAP_FREQ,      .group = &fb_timecard_freq3_group },
+       { },
+};
 
-static const char *
-gpio_map(u32 gpio, u32 bit, const char *pri, const char *sec, const char *def)
+static void
+gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit,
+              const char *def)
 {
-       const char *ans;
+       int i;
 
-       if (gpio & (1 << bit))
-               ans = pri;
-       else if (gpio & (1 << (bit + 16)))
-               ans = sec;
-       else
-               ans = def;
-       return ans;
+       for (i = 0; i < 4; i++) {
+               if (bp->sma[i].mode != SMA_MODE_IN)
+                       continue;
+               if (map[i][0] & (1 << bit)) {
+                       sprintf(buf, "sma%d", i + 1);
+                       return;
+               }
+       }
+       if (!def)
+               def = "----";
+       strcpy(buf, def);
 }
 
 static void
-gpio_multi_map(char *buf, u32 gpio, u32 bit,
-              const char *pri, const char *sec, const char *def)
+gpio_output_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit)
 {
        char *ans = buf;
+       int i;
+
+       strcpy(ans, "----");
+       for (i = 0; i < 4; i++) {
+               if (bp->sma[i].mode != SMA_MODE_OUT)
+                       continue;
+               if (map[i][1] & (1 << bit))
+                       ans += sprintf(ans, "sma%d ", i + 1);
+       }
+}
+
+static void
+_signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr)
+{
+       struct signal_reg __iomem *reg = bp->signal_out[nr]->mem;
+       struct ptp_ocp_signal *signal = &bp->signal[nr];
+       char label[8];
+       bool on;
+       u32 val;
+
+       if (!signal)
+               return;
+
+       on = signal->running;
+       sprintf(label, "GEN%d", nr + 1);
+       seq_printf(s, "%7s: %s, period:%llu duty:%d%% phase:%llu pol:%d",
+                  label, on ? " ON" : "OFF",
+                  signal->period, signal->duty, signal->phase,
+                  signal->polarity);
+
+       val = ioread32(&reg->enable);
+       seq_printf(s, " [%x", val);
+       val = ioread32(&reg->status);
+       seq_printf(s, " %x]", val);
+
+       seq_printf(s, " start:%llu\n", signal->start);
+}
+
+static void
+_frequency_summary_show(struct seq_file *s, int nr,
+                       struct frequency_reg __iomem *reg)
+{
+       char label[8];
+       bool on;
+       u32 val;
+
+       if (!reg)
+               return;
 
-       strcpy(ans, def);
-       if (gpio & (1 << bit))
-               ans += sprintf(ans, "%s ", pri);
-       if (gpio & (1 << (bit + 16)))
-               ans += sprintf(ans, "%s ", sec);
+       sprintf(label, "FREQ%d", nr + 1);
+       val = ioread32(&reg->ctrl);
+       on = val & 1;
+       val = (val >> 8) & 0xff;
+       seq_printf(s, "%7s: %s, sec:%u",
+                  label,
+                  on ? " ON" : "OFF",
+                  val);
+
+       val = ioread32(&reg->status);
+       if (val & FREQ_STATUS_ERROR)
+               seq_printf(s, ", error");
+       if (val & FREQ_STATUS_OVERRUN)
+               seq_printf(s, ", overrun");
+       if (val & FREQ_STATUS_VALID)
+               seq_printf(s, ", freq %lu Hz", val & FREQ_STATUS_MASK);
+       seq_printf(s, "  reg:%x\n", val);
 }
 
 static int
@@ -2092,21 +2983,20 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
 {
        struct device *dev = s->private;
        struct ptp_system_timestamp sts;
-       u32 sma_in, sma_out, ctrl, val;
        struct ts_reg __iomem *ts_reg;
        struct timespec64 ts;
        struct ptp_ocp *bp;
-       const char *src;
+       u16 sma_val[4][2];
+       char *src, *buf;
+       u32 ctrl, val;
        bool on, map;
-       char *buf;
+       int i;
 
        buf = (char *)__get_free_page(GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
        bp = dev_get_drvdata(dev);
-       sma_in = ioread32(&bp->sma->gpio1);
-       sma_out = ioread32(&bp->sma->gpio2);
 
        seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp));
        if (bp->gnss_port != -1)
@@ -2118,22 +3008,47 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
        if (bp->nmea_port != -1)
                seq_printf(s, "%7s: /dev/ttyS%d\n", "NMEA", bp->nmea_port);
 
+       memset(sma_val, 0xff, sizeof(sma_val));
+       if (bp->sma_map1) {
+               u32 reg;
+
+               reg = ioread32(&bp->sma_map1->gpio1);
+               sma_val[0][0] = reg & 0xffff;
+               sma_val[1][0] = reg >> 16;
+
+               reg = ioread32(&bp->sma_map1->gpio2);
+               sma_val[2][1] = reg & 0xffff;
+               sma_val[3][1] = reg >> 16;
+
+               reg = ioread32(&bp->sma_map2->gpio1);
+               sma_val[2][0] = reg & 0xffff;
+               sma_val[3][0] = reg >> 16;
+
+               reg = ioread32(&bp->sma_map2->gpio2);
+               sma_val[0][1] = reg & 0xffff;
+               sma_val[1][1] = reg >> 16;
+       }
+
        sma1_show(dev, NULL, buf);
-       seq_printf(s, "   sma1: %s", buf);
+       seq_printf(s, "   sma1: %04x,%04x %s",
+                  sma_val[0][0], sma_val[0][1], buf);
 
        sma2_show(dev, NULL, buf);
-       seq_printf(s, "   sma2: %s", buf);
+       seq_printf(s, "   sma2: %04x,%04x %s",
+                  sma_val[1][0], sma_val[1][1], buf);
 
        sma3_show(dev, NULL, buf);
-       seq_printf(s, "   sma3: %s", buf);
+       seq_printf(s, "   sma3: %04x,%04x %s",
+                  sma_val[2][0], sma_val[2][1], buf);
 
        sma4_show(dev, NULL, buf);
-       seq_printf(s, "   sma4: %s", buf);
+       seq_printf(s, "   sma4: %04x,%04x %s",
+                  sma_val[3][0], sma_val[3][1], buf);
 
        if (bp->ts0) {
                ts_reg = bp->ts0->mem;
                on = ioread32(&ts_reg->enable);
-               src = "GNSS";
+               src = "GNSS1";
                seq_printf(s, "%7s: %s, src: %s\n", "TS0",
                           on ? " ON" : "OFF", src);
        }
@@ -2141,17 +3056,33 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
        if (bp->ts1) {
                ts_reg = bp->ts1->mem;
                on = ioread32(&ts_reg->enable);
-               src = gpio_map(sma_in, 2, "sma1", "sma2", "----");
+               gpio_input_map(buf, bp, sma_val, 2, NULL);
                seq_printf(s, "%7s: %s, src: %s\n", "TS1",
-                          on ? " ON" : "OFF", src);
+                          on ? " ON" : "OFF", buf);
        }
 
        if (bp->ts2) {
                ts_reg = bp->ts2->mem;
                on = ioread32(&ts_reg->enable);
-               src = gpio_map(sma_in, 3, "sma1", "sma2", "----");
+               gpio_input_map(buf, bp, sma_val, 3, NULL);
                seq_printf(s, "%7s: %s, src: %s\n", "TS2",
-                          on ? " ON" : "OFF", src);
+                          on ? " ON" : "OFF", buf);
+       }
+
+       if (bp->ts3) {
+               ts_reg = bp->ts3->mem;
+               on = ioread32(&ts_reg->enable);
+               gpio_input_map(buf, bp, sma_val, 6, NULL);
+               seq_printf(s, "%7s: %s, src: %s\n", "TS3",
+                          on ? " ON" : "OFF", buf);
+       }
+
+       if (bp->ts4) {
+               ts_reg = bp->ts4->mem;
+               on = ioread32(&ts_reg->enable);
+               gpio_input_map(buf, bp, sma_val, 7, NULL);
+               seq_printf(s, "%7s: %s, src: %s\n", "TS4",
+                          on ? " ON" : "OFF", buf);
        }
 
        if (bp->pps) {
@@ -2159,7 +3090,7 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
                src = "PHC";
                on = ioread32(&ts_reg->enable);
                map = !!(bp->pps_req_map & OCP_REQ_TIMESTAMP);
-               seq_printf(s, "%7s: %s, src: %s\n", "TS3",
+               seq_printf(s, "%7s: %s, src: %s\n", "TS5",
                           on && map ? " ON" : "OFF", src);
 
                map = !!(bp->pps_req_map & OCP_REQ_PPS);
@@ -2167,11 +3098,19 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
                           on && map ? " ON" : "OFF", src);
        }
 
+       if (bp->fw_cap & OCP_CAP_SIGNAL)
+               for (i = 0; i < 4; i++)
+                       _signal_summary_show(s, bp, i);
+
+       if (bp->fw_cap & OCP_CAP_FREQ)
+               for (i = 0; i < 4; i++)
+                       _frequency_summary_show(s, i, bp->freq_in[i]);
+
        if (bp->irig_out) {
                ctrl = ioread32(&bp->irig_out->ctrl);
                on = ctrl & IRIG_M_CTRL_ENABLE;
                val = ioread32(&bp->irig_out->status);
-               gpio_multi_map(buf, sma_out, 4, "sma3", "sma4", "----");
+               gpio_output_map(buf, bp, sma_val, 4);
                seq_printf(s, "%7s: %s, error: %d, mode %d, out: %s\n", "IRIG",
                           on ? " ON" : "OFF", val, (ctrl >> 16), buf);
        }
@@ -2179,15 +3118,15 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
        if (bp->irig_in) {
                on = ioread32(&bp->irig_in->ctrl) & IRIG_S_CTRL_ENABLE;
                val = ioread32(&bp->irig_in->status);
-               src = gpio_map(sma_in, 4, "sma1", "sma2", "----");
+               gpio_input_map(buf, bp, sma_val, 4, NULL);
                seq_printf(s, "%7s: %s, error: %d, src: %s\n", "IRIG in",
-                          on ? " ON" : "OFF", val, src);
+                          on ? " ON" : "OFF", val, buf);
        }
 
        if (bp->dcf_out) {
                on = ioread32(&bp->dcf_out->ctrl) & DCF_M_CTRL_ENABLE;
                val = ioread32(&bp->dcf_out->status);
-               gpio_multi_map(buf, sma_out, 5, "sma3", "sma4", "----");
+               gpio_output_map(buf, bp, sma_val, 5);
                seq_printf(s, "%7s: %s, error: %d, out: %s\n", "DCF",
                           on ? " ON" : "OFF", val, buf);
        }
@@ -2195,9 +3134,9 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
        if (bp->dcf_in) {
                on = ioread32(&bp->dcf_in->ctrl) & DCF_S_CTRL_ENABLE;
                val = ioread32(&bp->dcf_in->status);
-               src = gpio_map(sma_in, 5, "sma1", "sma2", "----");
+               gpio_input_map(buf, bp, sma_val, 5, NULL);
                seq_printf(s, "%7s: %s, error: %d, src: %s\n", "DCF in",
-                          on ? " ON" : "OFF", val, src);
+                          on ? " ON" : "OFF", val, buf);
        }
 
        if (bp->nmea_out) {
@@ -2210,12 +3149,13 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
        /* compute src for PPS1, used below. */
        if (bp->pps_select) {
                val = ioread32(&bp->pps_select->gpio1);
+               src = &buf[80];
                if (val & 0x01)
-                       src = gpio_map(sma_in, 0, "sma1", "sma2", "----");
+                       gpio_input_map(src, bp, sma_val, 0, NULL);
                else if (val & 0x02)
                        src = "MAC";
                else if (val & 0x04)
-                       src = "GNSS";
+                       src = "GNSS1";
                else
                        src = "----";
        } else {
@@ -2248,8 +3188,8 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
        /* reuses PPS1 src from earlier */
        seq_printf(s, "MAC PPS1 src: %s\n", src);
 
-       src = gpio_map(sma_in, 1, "sma1", "sma2", "GNSS2");
-       seq_printf(s, "MAC PPS2 src: %s\n", src);
+       gpio_input_map(buf, bp, sma_val, 1, "GNSS2");
+       seq_printf(s, "MAC PPS2 src: %s\n", buf);
 
        if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts)) {
                struct timespec64 sys_ts;
@@ -2447,6 +3387,7 @@ ptp_ocp_complete(struct ptp_ocp *bp)
 {
        struct pps_device *pps;
        char buf[32];
+       int i, err;
 
        if (bp->gnss_port != -1) {
                sprintf(buf, "ttyS%d", bp->gnss_port);
@@ -2471,8 +3412,13 @@ ptp_ocp_complete(struct ptp_ocp *bp)
        if (pps)
                ptp_ocp_symlink(bp, pps->dev, "pps");
 
-       if (device_add_groups(&bp->dev, timecard_groups))
-               pr_err("device add groups failed\n");
+       for (i = 0; bp->attr_tbl[i].cap; i++) {
+               if (!(bp->attr_tbl[i].cap & bp->fw_cap))
+                       continue;
+               err = sysfs_create_group(&bp->dev.kobj, bp->attr_tbl[i].group);
+               if (err)
+                       return err;
+       }
 
        ptp_ocp_debugfs_add_device(bp);
 
@@ -2520,17 +3466,14 @@ ptp_ocp_info(struct ptp_ocp *bp)
 
        ptp_ocp_phc_info(bp);
 
-       if (bp->image) {
-               u32 ver = ioread32(&bp->image->version);
+       dev_info(dev, "version %x\n", bp->fw_version);
+       if (bp->fw_version & 0xffff)
+               dev_info(dev, "regular image, version %d\n",
+                        bp->fw_version & 0xffff);
+       else
+               dev_info(dev, "golden image, version %d\n",
+                        bp->fw_version >> 16);
 
-               dev_info(dev, "version %x\n", ver);
-               if (ver & 0xffff)
-                       dev_info(dev, "regular image, version %d\n",
-                                ver & 0xffff);
-               else
-                       dev_info(dev, "golden image, version %d\n",
-                                ver >> 16);
-       }
        ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port, 115200);
        ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port, 115200);
        ptp_ocp_serial_info(dev, "MAC", bp->mac_port, 57600);
@@ -2548,17 +3491,22 @@ static void
 ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
 {
        struct device *dev = &bp->dev;
+       int i;
 
        sysfs_remove_link(&dev->kobj, "ttyGNSS");
        sysfs_remove_link(&dev->kobj, "ttyMAC");
        sysfs_remove_link(&dev->kobj, "ptp");
        sysfs_remove_link(&dev->kobj, "pps");
-       device_remove_groups(dev, timecard_groups);
+       if (bp->attr_tbl)
+               for (i = 0; bp->attr_tbl[i].cap; i++)
+                       sysfs_remove_group(&dev->kobj, bp->attr_tbl[i].group);
 }
 
 static void
 ptp_ocp_detach(struct ptp_ocp *bp)
 {
+       int i;
+
        ptp_ocp_debugfs_remove_device(bp);
        ptp_ocp_detach_sysfs(bp);
        if (timer_pending(&bp->watchdog))
@@ -2569,8 +3517,15 @@ ptp_ocp_detach(struct ptp_ocp *bp)
                ptp_ocp_unregister_ext(bp->ts1);
        if (bp->ts2)
                ptp_ocp_unregister_ext(bp->ts2);
+       if (bp->ts3)
+               ptp_ocp_unregister_ext(bp->ts3);
+       if (bp->ts4)
+               ptp_ocp_unregister_ext(bp->ts4);
        if (bp->pps)
                ptp_ocp_unregister_ext(bp->pps);
+       for (i = 0; i < 4; i++)
+               if (bp->signal_out[i])
+                       ptp_ocp_unregister_ext(bp->signal_out[i]);
        if (bp->gnss_port != -1)
                serial8250_unregister_port(bp->gnss_port);
        if (bp->gnss2_port != -1)
@@ -2589,6 +3544,7 @@ ptp_ocp_detach(struct ptp_ocp *bp)
                pci_free_irq_vectors(bp->pdev);
        if (bp->ptp)
                ptp_clock_unregister(bp->ptp);
+       kfree(bp->ptp_info.pin_config);
        device_unregister(&bp->dev);
 }
 
@@ -2608,7 +3564,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        err = pci_enable_device(pdev);
        if (err) {
                dev_err(&pdev->dev, "pci_enable_device\n");
-               goto out_unregister;
+               goto out_free;
        }
 
        bp = devlink_priv(devlink);
@@ -2621,7 +3577,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
         * allow this - if not all of the IRQ's are returned, skip the
         * extra devices and just register the clock.
         */
-       err = pci_alloc_irq_vectors(pdev, 1, 11, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+       err = pci_alloc_irq_vectors(pdev, 1, 17, PCI_IRQ_MSI | PCI_IRQ_MSIX);
        if (err < 0) {
                dev_err(&pdev->dev, "alloc_irq_vectors err: %d\n", err);
                goto out;
@@ -2654,7 +3610,7 @@ out:
        pci_set_drvdata(pdev, NULL);
 out_disable:
        pci_disable_device(pdev);
-out_unregister:
+out_free:
        devlink_free(devlink);
        return err;
 }
index 5db591c..dfb84bb 100644 (file)
@@ -1394,7 +1394,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
 
        if (len < TH_HEADER_LENGTH) {
                CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
-                               "%s(%s): packet length %d to short",
+                               "%s(%s): packet length %d too short",
                                        CTCM_FUNTAIL, dev->name, len);
                priv->stats.rx_dropped++;
                priv->stats.rx_length_errors++;
index 5ea7eeb..e0fdd54 100644 (file)
@@ -166,7 +166,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
                ch->logflags = 0;
                priv->stats.rx_packets++;
                priv->stats.rx_bytes += skblen;
-               netif_rx_ni(skb);
+               netif_rx(skb);
                if (len > 0) {
                        skb_pull(pskb, header->length);
                        if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
index 981e7b1..65aa0a9 100644 (file)
@@ -620,11 +620,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
                pskb->ip_summed = CHECKSUM_UNNECESSARY;
                privptr->stats.rx_packets++;
                privptr->stats.rx_bytes += skb->len;
-               /*
-                * Since receiving is always initiated from a tasklet (in iucv.c),
-                * we must use netif_rx_ni() instead of netif_rx()
-                */
-               netif_rx_ni(skb);
+               netif_rx(skb);
                skb_pull(pskb, header->next);
                skb_put(pskb, NETIUCV_HDRLEN);
        }
index 12c10a5..7f42160 100644 (file)
@@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info,
                return;
 
        for (i = 0; i < shadow->nr_grants; i++) {
-               if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
+               if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
                        shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
                                     "grant still in use by backend\n");
                        BUG();
                }
-               gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
        }
 
        kfree(shadow->sg);
index 6f0a570..6aae0b1 100644 (file)
@@ -53,7 +53,8 @@ static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
                MT8192_AAL0_SEL_IN_CCORR0
        }, {
                DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
-               MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0
+               MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
+               MT8192_DSI0_SEL_IN_DITHER0
        }, {
                DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
                MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
index 2746d05..0fb3631 100644 (file)
@@ -204,7 +204,7 @@ module_platform_driver(exynos_chipid_driver);
 
 MODULE_DESCRIPTION("Samsung Exynos ChipID controller and ASV driver");
 MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
 MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
 MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
 MODULE_LICENSE("GPL");
index ead9a13..646cff6 100644 (file)
@@ -1012,10 +1012,10 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
        int i, ret;
 
        if (vmalloced_buf || kmap_buf) {
-               desc_len = min_t(int, max_seg_size, PAGE_SIZE);
+               desc_len = min_t(unsigned int, max_seg_size, PAGE_SIZE);
                sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
        } else if (virt_addr_valid(buf)) {
-               desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
+               desc_len = min_t(unsigned int, max_seg_size, ctlr->max_dma_len);
                sgs = DIV_ROUND_UP(len, desc_len);
        } else {
                return -EINVAL;
index 493ed48..8ebb21d 100644 (file)
@@ -76,14 +76,15 @@ static void tx_complete(void *arg)
 
 static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
 {
-       int ret;
+       int ret, len;
 
-       ret = netif_rx_ni(skb);
+       len = skb->len + ETH_HLEN;
+       ret = netif_rx(skb);
        if (ret == NET_RX_DROP) {
                nic->stats.rx_dropped++;
        } else {
                nic->stats.rx_packets++;
-               nic->stats.rx_bytes += skb->len + ETH_HLEN;
+               nic->stats.rx_bytes += len;
        }
 
        return 0;
index 0f82f50..49a3f45 100644 (file)
@@ -5907,6 +5907,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
        struct sta_info *psta_bmc;
        struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
        struct xmit_frame *pxmitframe = NULL;
+       struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
        struct sta_priv  *pstapriv = &padapter->stapriv;
 
        /* for BC/MC Frames */
@@ -5917,7 +5918,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
        if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
                msleep(10);/*  10ms, ATIM(HIQ) Windows */
 
-               spin_lock_bh(&psta_bmc->sleep_q.lock);
+               /* spin_lock_bh(&psta_bmc->sleep_q.lock); */
+               spin_lock_bh(&pxmitpriv->lock);
 
                xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
                list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -5940,7 +5942,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
                        rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
                }
 
-               spin_unlock_bh(&psta_bmc->sleep_q.lock);
+               /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
+               spin_unlock_bh(&pxmitpriv->lock);
 
                /* check hi queue and bmc_sleepq */
                rtw_chk_hi_queue_cmd(padapter);
index 41bfca5..105fe0e 100644 (file)
@@ -957,8 +957,10 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_
                if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
                        struct list_head        *xmitframe_plist, *xmitframe_phead;
                        struct xmit_frame *pxmitframe = NULL;
+                       struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
 
-                       spin_lock_bh(&psta->sleep_q.lock);
+                       /* spin_lock_bh(&psta->sleep_q.lock); */
+                       spin_lock_bh(&pxmitpriv->lock);
 
                        xmitframe_phead = get_list_head(&psta->sleep_q);
                        xmitframe_plist = get_next(xmitframe_phead);
@@ -989,10 +991,12 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_
                                        update_beacon(padapter, WLAN_EID_TIM, NULL, true);
                                }
 
-                               spin_unlock_bh(&psta->sleep_q.lock);
+                               /* spin_unlock_bh(&psta->sleep_q.lock); */
+                               spin_unlock_bh(&pxmitpriv->lock);
 
                        } else {
-                               spin_unlock_bh(&psta->sleep_q.lock);
+                               /* spin_unlock_bh(&psta->sleep_q.lock); */
+                               spin_unlock_bh(&pxmitpriv->lock);
 
                                if (pstapriv->tim_bitmap&BIT(psta->aid)) {
                                        if (psta->sleepq_len == 0) {
index 0c9ea15..beb11d8 100644 (file)
@@ -293,48 +293,46 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
 
        /* list_del_init(&psta->wakeup_list); */
 
-       spin_lock_bh(&psta->sleep_q.lock);
+       spin_lock_bh(&pxmitpriv->lock);
+
        rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
        psta->sleepq_len = 0;
-       spin_unlock_bh(&psta->sleep_q.lock);
-
-       spin_lock_bh(&pxmitpriv->lock);
 
        /* vo */
-       spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
+       /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */
        rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
        list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
        phwxmit = pxmitpriv->hwxmits;
        phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
        pstaxmitpriv->vo_q.qcnt = 0;
-       spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
+       /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */
 
        /* vi */
-       spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
+       /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */
        rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
        list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
        phwxmit = pxmitpriv->hwxmits+1;
        phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
        pstaxmitpriv->vi_q.qcnt = 0;
-       spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
+       /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */
 
        /* be */
-       spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
+       /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */
        rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
        list_del_init(&(pstaxmitpriv->be_q.tx_pending));
        phwxmit = pxmitpriv->hwxmits+2;
        phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
        pstaxmitpriv->be_q.qcnt = 0;
-       spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
+       /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */
 
        /* bk */
-       spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
+       /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */
        rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
        list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
        phwxmit = pxmitpriv->hwxmits+3;
        phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
        pstaxmitpriv->bk_q.qcnt = 0;
-       spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
+       /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */
 
        spin_unlock_bh(&pxmitpriv->lock);
 
index 13b8bd5..f466bfd 100644 (file)
@@ -1734,12 +1734,15 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
        struct list_head *plist, *phead, *tmp;
        struct  xmit_frame      *pxmitframe;
 
+       spin_lock_bh(&pframequeue->lock);
+
        phead = get_list_head(pframequeue);
        list_for_each_safe(plist, tmp, phead) {
                pxmitframe = list_entry(plist, struct xmit_frame, list);
 
                rtw_free_xmitframe(pxmitpriv, pxmitframe);
        }
+       spin_unlock_bh(&pframequeue->lock);
 }
 
 s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -1794,7 +1797,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
        struct sta_info *psta;
        struct tx_servq *ptxservq;
        struct pkt_attrib       *pattrib = &pxmitframe->attrib;
-       struct xmit_priv *xmit_priv = &padapter->xmitpriv;
        struct hw_xmit  *phwxmits =  padapter->xmitpriv.hwxmits;
        signed int res = _SUCCESS;
 
@@ -1812,14 +1814,12 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
 
        ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
 
-       spin_lock_bh(&xmit_priv->lock);
        if (list_empty(&ptxservq->tx_pending))
                list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
 
        list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
        ptxservq->qcnt++;
        phwxmits[ac_index].accnt++;
-       spin_unlock_bh(&xmit_priv->lock);
 
 exit:
 
@@ -2202,10 +2202,11 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
        struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
        struct xmit_frame *pxmitframe = NULL;
        struct sta_priv *pstapriv = &padapter->stapriv;
+       struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
 
        psta_bmc = rtw_get_bcmc_stainfo(padapter);
 
-       spin_lock_bh(&psta->sleep_q.lock);
+       spin_lock_bh(&pxmitpriv->lock);
 
        xmitframe_phead = get_list_head(&psta->sleep_q);
        list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -2306,7 +2307,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
 
 _exit:
 
-       spin_unlock_bh(&psta->sleep_q.lock);
+       spin_unlock_bh(&pxmitpriv->lock);
 
        if (update_mask)
                update_beacon(padapter, WLAN_EID_TIM, NULL, true);
@@ -2318,8 +2319,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
        struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
        struct xmit_frame *pxmitframe = NULL;
        struct sta_priv *pstapriv = &padapter->stapriv;
+       struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
 
-       spin_lock_bh(&psta->sleep_q.lock);
+       spin_lock_bh(&pxmitpriv->lock);
 
        xmitframe_phead = get_list_head(&psta->sleep_q);
        list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -2372,7 +2374,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
                }
        }
 
-       spin_unlock_bh(&psta->sleep_q.lock);
+       spin_unlock_bh(&pxmitpriv->lock);
 }
 
 void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
index b5d5e92..1581043 100644 (file)
@@ -502,7 +502,9 @@ s32 rtl8723bs_hal_xmit(
                        rtw_issue_addbareq_cmd(padapter, pxmitframe);
        }
 
+       spin_lock_bh(&pxmitpriv->lock);
        err = rtw_xmitframe_enqueue(padapter, pxmitframe);
+       spin_unlock_bh(&pxmitpriv->lock);
        if (err != _SUCCESS) {
                rtw_free_xmitframe(pxmitpriv, pxmitframe);
 
index c94fa7d..1b343b4 100644 (file)
@@ -102,13 +102,17 @@ there are several "locks" in mlme_priv,
 since mlme_priv is a shared resource between many threads,
 like ISR/Call-Back functions, the OID handlers, and even timer functions.
 
-
 Each struct __queue has its own locks, already.
-Other items are protected by mlme_priv.lock.
+Other items in mlme_priv are protected by mlme_priv.lock, while items in
+xmit_priv are protected by xmit_priv.lock.
 
 To avoid possible dead lock, any thread trying to modifiying mlme_priv
 SHALL not lock up more than one locks at a time!
 
+The only exception is that queue functions which take the __queue.lock
+may be called with the xmit_priv.lock held. In this case the order
+MUST always be first lock xmit_priv.lock and then call any queue functions
+which take __queue.lock.
 */
 
 
index 2555004..e04fc66 100644 (file)
@@ -255,7 +255,7 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev,
        if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) {
                wlandev->netdev->stats.rx_packets++;
                wlandev->netdev->stats.rx_bytes += skb->len;
-               netif_rx_ni(skb);
+               netif_rx(skb);
                return 0;
        }
 
@@ -290,7 +290,7 @@ static void p80211netdev_rx_bh(struct tasklet_struct *t)
 
                                dev->stats.rx_packets++;
                                dev->stats.rx_bytes += skb->len;
-                               netif_rx_ni(skb);
+                               netif_rx(skb);
                                continue;
                        } else {
                                if (!p80211_convert_to_ether(wlandev, skb))
index a16dd4d..73e68cc 100644 (file)
@@ -419,11 +419,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
        for (i = 0; i < tz->trips; i++) {
 
                enum thermal_trip_type type;
-               int temp, hyst;
+               int temp, hyst = 0;
 
                tz->ops->get_trip_type(tz, i, &type);
                tz->ops->get_trip_temp(tz, i, &temp);
-               tz->ops->get_trip_hyst(tz, i, &hyst);
+               if (tz->ops->get_trip_hyst)
+                       tz->ops->get_trip_hyst(tz, i, &hyst);
 
                if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) ||
                    nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) ||
index be09fd9..19b8c7e 100644 (file)
@@ -716,8 +716,9 @@ static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
        return 0;
 }
 
-static void xenhcd_gnttab_done(struct usb_shadow *shadow)
+static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id)
 {
+       struct usb_shadow *shadow = info->shadow + id;
        int nr_segs = 0;
        int i;
 
@@ -726,8 +727,10 @@ static void xenhcd_gnttab_done(struct usb_shadow *shadow)
        if (xenusb_pipeisoc(shadow->req.pipe))
                nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
 
-       for (i = 0; i < nr_segs; i++)
-               gnttab_end_foreign_access(shadow->req.seg[i].gref, 0, 0UL);
+       for (i = 0; i < nr_segs; i++) {
+               if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref))
+                       xenhcd_set_error(info, "backend didn't release grant");
+       }
 
        shadow->req.nr_buffer_segs = 0;
        shadow->req.u.isoc.nr_frame_desc_segs = 0;
@@ -841,7 +844,9 @@ static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
        list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
                req_id = urbp->req_id;
                if (!urbp->unlinked) {
-                       xenhcd_gnttab_done(&info->shadow[req_id]);
+                       xenhcd_gnttab_done(info, req_id);
+                       if (info->error)
+                               return;
                        if (urbp->urb->status == -EINPROGRESS)
                                /* not dequeued */
                                xenhcd_giveback_urb(info, urbp->urb,
@@ -942,8 +947,7 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
        rp = info->urb_ring.sring->rsp_prod;
        if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
                xenhcd_set_error(info, "Illegal index on urb-ring");
-               spin_unlock_irqrestore(&info->lock, flags);
-               return 0;
+               goto err;
        }
        rmb(); /* ensure we see queued responses up to "rp" */
 
@@ -952,11 +956,13 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
                id = res.id;
                if (id >= XENUSB_URB_RING_SIZE) {
                        xenhcd_set_error(info, "Illegal data on urb-ring");
-                       continue;
+                       goto err;
                }
 
                if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
-                       xenhcd_gnttab_done(&info->shadow[id]);
+                       xenhcd_gnttab_done(info, id);
+                       if (info->error)
+                               goto err;
                        urb = info->shadow[id].urb;
                        if (likely(urb)) {
                                urb->actual_length = res.actual_length;
@@ -978,6 +984,10 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
        spin_unlock_irqrestore(&info->lock, flags);
 
        return more_to_do;
+
+ err:
+       spin_unlock_irqrestore(&info->lock, flags);
+       return 0;
 }
 
 static int xenhcd_conn_notify(struct xenhcd_info *info)
index f648f1c..d0f9107 100644 (file)
@@ -1563,11 +1563,27 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
 
        switch (cmd) {
        case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET:
+               /* This mq feature check aligns with pre-existing userspace
+                * implementation.
+                *
+                * Without it, an untrusted driver could fake a multiqueue config
+                * request down to a non-mq device that may cause kernel to
+                * panic due to uninitialized resources for extra vqs. Even with
+                * a well behaving guest driver, it is not expected to allow
+                * changing the number of vqs on a non-mq device.
+                */
+               if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ))
+                       break;
+
                read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq));
                if (read != sizeof(mq))
                        break;
 
                newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
+               if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+                   newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
+                       break;
+
                if (ndev->cur_num_vqs == 2 * newqps) {
                        status = VIRTIO_NET_OK;
                        break;
@@ -1897,11 +1913,25 @@ static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev)
        return ndev->mvdev.mlx_features;
 }
 
-static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
+static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
 {
+       /* Minimum features to expect */
        if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
                return -EOPNOTSUPP;
 
+       /* Double check features combination sent down by the driver.
+        * Fail invalid features due to absence of the depended feature.
+        *
+        * Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit
+        * requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ".
+        * By failing the invalid features sent down by untrusted drivers,
+        * we're assured the assumption made upon is_index_valid() and
+        * is_ctrl_vq_idx() will not be compromised.
+        */
+       if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) ==
+            BIT_ULL(VIRTIO_NET_F_MQ))
+               return -EINVAL;
+
        return 0;
 }
 
@@ -1977,7 +2007,7 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
 
        print_features(mvdev, features, true);
 
-       err = verify_min_features(mvdev, features);
+       err = verify_driver_features(mvdev, features);
        if (err)
                return err;
 
index 9846c9d..1ea5254 100644 (file)
@@ -393,7 +393,7 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
         * If it does happen we assume a legacy guest.
         */
        if (!vdev->features_valid)
-               vdpa_set_features(vdev, 0, true);
+               vdpa_set_features_unlocked(vdev, 0);
        ops->get_config(vdev, offset, buf, len);
 }
 
index 2b1143f..0a4d93e 100644 (file)
@@ -294,7 +294,7 @@ vduse_domain_alloc_iova(struct iova_domain *iovad,
 
        iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
 
-       return iova_pfn << shift;
+       return (dma_addr_t)iova_pfn << shift;
 }
 
 static void vduse_domain_free_iova(struct iova_domain *iovad,
index a57e381..cce101e 100644 (file)
@@ -533,8 +533,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
 {
        struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
 
-       vdpa_unregister_device(&vp_vdpa->vdpa);
        vp_modern_remove(&vp_vdpa->mdev);
+       vdpa_unregister_device(&vp_vdpa->vdpa);
 }
 
 static struct pci_driver vp_vdpa_driver = {
index 670d56c..40b0983 100644 (file)
@@ -57,6 +57,17 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
        if (last < start)
                return -EFAULT;
 
+       /* If the range being mapped is [0, ULONG_MAX], split it into two entries
+        * otherwise its size would overflow u64.
+        */
+       if (start == 0 && last == ULONG_MAX) {
+               u64 mid = last / 2;
+
+               vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque);
+               addr += mid + 1;
+               start = mid + 1;
+       }
+
        if (iotlb->limit &&
            iotlb->nmaps == iotlb->limit &&
            iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) {
index 8515398..ec5249e 100644 (file)
@@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
        if (copy_from_user(&features, featurep, sizeof(features)))
                return -EFAULT;
 
-       if (vdpa_set_features(vdpa, features, false))
+       if (vdpa_set_features(vdpa, features))
                return -EINVAL;
 
        return 0;
index 59edb5a..1768362 100644 (file)
@@ -1170,6 +1170,13 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
                goto done;
        }
 
+       if ((msg.type == VHOST_IOTLB_UPDATE ||
+            msg.type == VHOST_IOTLB_INVALIDATE) &&
+            msg.size == 0) {
+               ret = -EINVAL;
+               goto done;
+       }
+
        if (dev->msg_handler)
                ret = dev->msg_handler(dev, &msg);
        else
@@ -1981,7 +1988,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
        return 0;
 }
 
-static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
+static int vhost_update_avail_event(struct vhost_virtqueue *vq)
 {
        if (vhost_put_avail_event(vq))
                return -EFAULT;
@@ -2527,7 +2534,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
                        return false;
                }
        } else {
-               r = vhost_update_avail_event(vq, vq->avail_idx);
+               r = vhost_update_avail_event(vq);
                if (r) {
                        vq_err(vq, "Failed to update avail event index at %p: %d\n",
                               vhost_avail_event(vq), r);
index 37f0b42..e6c9d41 100644 (file)
@@ -753,7 +753,8 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
 
        /* Iterating over all connections for all CIDs to find orphans is
         * inefficient.  Room for improvement here. */
-       vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
+       vsock_for_each_connected_socket(&vhost_transport.transport,
+                                       vhost_vsock_reset_orphans);
 
        /* Don't check the owner, because we are in the release path, so we
         * need to stop the vsock device in any case.
index 34f80b7..492fc26 100644 (file)
@@ -105,7 +105,6 @@ config VIRTIO_BALLOON
 
 config VIRTIO_MEM
        tristate "Virtio mem driver"
-       default m
        depends on X86_64
        depends on VIRTIO
        depends on MEMORY_HOTPLUG
index 00ac9db..22f15f4 100644 (file)
@@ -166,14 +166,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status)
 }
 EXPORT_SYMBOL_GPL(virtio_add_status);
 
-int virtio_finalize_features(struct virtio_device *dev)
+/* Do some validation, then set FEATURES_OK */
+static int virtio_features_ok(struct virtio_device *dev)
 {
-       int ret = dev->config->finalize_features(dev);
        unsigned status;
+       int ret;
 
        might_sleep();
-       if (ret)
-               return ret;
 
        ret = arch_has_restricted_virtio_memory_access();
        if (ret) {
@@ -202,8 +201,23 @@ int virtio_finalize_features(struct virtio_device *dev)
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(virtio_finalize_features);
 
+/**
+ * virtio_reset_device - quiesce device for removal
+ * @dev: the device to reset
+ *
+ * Prevents device from sending interrupts and accessing memory.
+ *
+ * Generally used for cleanup during driver / device removal.
+ *
+ * Once this has been invoked, caller must ensure that
+ * virtqueue_notify / virtqueue_kick are not in progress.
+ *
+ * Note: this guarantees that vq callbacks are not in progress, however caller
+ * is responsible for preventing access from other contexts, such as a system
+ * call/workqueue/bh.  Invoking virtio_break_device then flushing any such
+ * contexts is one way to handle that.
+ * */
 void virtio_reset_device(struct virtio_device *dev)
 {
        dev->config->reset(dev);
@@ -245,17 +259,6 @@ static int virtio_dev_probe(struct device *_d)
                driver_features_legacy = driver_features;
        }
 
-       /*
-        * Some devices detect legacy solely via F_VERSION_1. Write
-        * F_VERSION_1 to force LE config space accesses before FEATURES_OK for
-        * these when needed.
-        */
-       if (drv->validate && !virtio_legacy_is_little_endian()
-                         && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) {
-               dev->features = BIT_ULL(VIRTIO_F_VERSION_1);
-               dev->config->finalize_features(dev);
-       }
-
        if (device_features & (1ULL << VIRTIO_F_VERSION_1))
                dev->features = driver_features & device_features;
        else
@@ -266,13 +269,26 @@ static int virtio_dev_probe(struct device *_d)
                if (device_features & (1ULL << i))
                        __virtio_set_bit(dev, i);
 
+       err = dev->config->finalize_features(dev);
+       if (err)
+               goto err;
+
        if (drv->validate) {
+               u64 features = dev->features;
+
                err = drv->validate(dev);
                if (err)
                        goto err;
+
+               /* Did validation change any features? Then write them again. */
+               if (features != dev->features) {
+                       err = dev->config->finalize_features(dev);
+                       if (err)
+                               goto err;
+               }
        }
 
-       err = virtio_finalize_features(dev);
+       err = virtio_features_ok(dev);
        if (err)
                goto err;
 
@@ -496,7 +512,11 @@ int virtio_device_restore(struct virtio_device *dev)
        /* We have a driver! */
        virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
 
-       ret = virtio_finalize_features(dev);
+       ret = dev->config->finalize_features(dev);
+       if (ret)
+               goto err;
+
+       ret = virtio_features_ok(dev);
        if (ret)
                goto err;
 
index 7767a7f..7650455 100644 (file)
@@ -317,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
        /* Give virtio_ring a chance to accept features. */
        vring_transport_features(vdev);
 
-       return vdpa_set_features(vdpa, vdev->features, false);
+       return vdpa_set_features(vdpa, vdev->features);
 }
 
 static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
index 3fa40c7..edb0acd 100644 (file)
@@ -169,20 +169,14 @@ undo:
                __del_gref(gref);
        }
 
-       /* It's possible for the target domain to map the just-allocated grant
-        * references by blindly guessing their IDs; if this is done, then
-        * __del_gref will leave them in the queue_gref list. They need to be
-        * added to the global list so that we can free them when they are no
-        * longer referenced.
-        */
-       if (unlikely(!list_empty(&queue_gref)))
-               list_splice_tail(&queue_gref, &gref_list);
        mutex_unlock(&gref_mutex);
        return rc;
 }
 
 static void __del_gref(struct gntalloc_gref *gref)
 {
+       unsigned long addr;
+
        if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
                uint8_t *tmp = kmap(gref->page);
                tmp[gref->notify.pgoff] = 0;
@@ -196,21 +190,16 @@ static void __del_gref(struct gntalloc_gref *gref)
        gref->notify.flags = 0;
 
        if (gref->gref_id) {
-               if (gnttab_query_foreign_access(gref->gref_id))
-                       return;
-
-               if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
-                       return;
-
-               gnttab_free_grant_reference(gref->gref_id);
+               if (gref->page) {
+                       addr = (unsigned long)page_to_virt(gref->page);
+                       gnttab_end_foreign_access(gref->gref_id, 0, addr);
+               } else
+                       gnttab_free_grant_reference(gref->gref_id);
        }
 
        gref_size--;
        list_del(&gref->next_gref);
 
-       if (gref->page)
-               __free_page(gref->page);
-
        kfree(gref);
 }
 
index 3729bea..5c83d41 100644 (file)
@@ -134,12 +134,9 @@ struct gnttab_ops {
         */
        unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
        /*
-        * Query the status of a grant entry. Ref parameter is reference of
-        * queried grant entry, return value is the status of queried entry.
-        * Detailed status(writing/reading) can be gotten from the return value
-        * by bit operations.
+        * Read the frame number related to a given grant reference.
         */
-       int (*query_foreign_access)(grant_ref_t ref);
+       unsigned long (*read_frame)(grant_ref_t ref);
 };
 
 struct unmap_refs_callback_data {
@@ -284,22 +281,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 }
 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 
-static int gnttab_query_foreign_access_v1(grant_ref_t ref)
-{
-       return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
-}
-
-static int gnttab_query_foreign_access_v2(grant_ref_t ref)
-{
-       return grstatus[ref] & (GTF_reading|GTF_writing);
-}
-
-int gnttab_query_foreign_access(grant_ref_t ref)
-{
-       return gnttab_interface->query_foreign_access(ref);
-}
-EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
-
 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
 {
        u16 flags, nflags;
@@ -353,6 +334,16 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 }
 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 
+static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
+{
+       return gnttab_shared.v1[ref].frame;
+}
+
+static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
+{
+       return gnttab_shared.v2[ref].full_page.frame;
+}
+
 struct deferred_entry {
        struct list_head list;
        grant_ref_t ref;
@@ -382,12 +373,9 @@ static void gnttab_handle_deferred(struct timer_list *unused)
                spin_unlock_irqrestore(&gnttab_list_lock, flags);
                if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
                        put_free_entry(entry->ref);
-                       if (entry->page) {
-                               pr_debug("freeing g.e. %#x (pfn %#lx)\n",
-                                        entry->ref, page_to_pfn(entry->page));
-                               put_page(entry->page);
-                       } else
-                               pr_info("freeing g.e. %#x\n", entry->ref);
+                       pr_debug("freeing g.e. %#x (pfn %#lx)\n",
+                                entry->ref, page_to_pfn(entry->page));
+                       put_page(entry->page);
                        kfree(entry);
                        entry = NULL;
                } else {
@@ -412,9 +400,18 @@ static void gnttab_handle_deferred(struct timer_list *unused)
 static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
                                struct page *page)
 {
-       struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+       struct deferred_entry *entry;
+       gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
        const char *what = KERN_WARNING "leaking";
 
+       entry = kmalloc(sizeof(*entry), gfp);
+       if (!page) {
+               unsigned long gfn = gnttab_interface->read_frame(ref);
+
+               page = pfn_to_page(gfn_to_pfn(gfn));
+               get_page(page);
+       }
+
        if (entry) {
                unsigned long flags;
 
@@ -435,11 +432,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
               what, ref, page ? page_to_pfn(page) : -1);
 }
 
+int gnttab_try_end_foreign_access(grant_ref_t ref)
+{
+       int ret = _gnttab_end_foreign_access_ref(ref, 0);
+
+       if (ret)
+               put_free_entry(ref);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
+
 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
                               unsigned long page)
 {
-       if (gnttab_end_foreign_access_ref(ref, readonly)) {
-               put_free_entry(ref);
+       if (gnttab_try_end_foreign_access(ref)) {
                if (page != 0)
                        put_page(virt_to_page(page));
        } else
@@ -1417,7 +1424,7 @@ static const struct gnttab_ops gnttab_v1_ops = {
        .update_entry                   = gnttab_update_entry_v1,
        .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v1,
        .end_foreign_transfer_ref       = gnttab_end_foreign_transfer_ref_v1,
-       .query_foreign_access           = gnttab_query_foreign_access_v1,
+       .read_frame                     = gnttab_read_frame_v1,
 };
 
 static const struct gnttab_ops gnttab_v2_ops = {
@@ -1429,7 +1436,7 @@ static const struct gnttab_ops gnttab_v2_ops = {
        .update_entry                   = gnttab_update_entry_v2,
        .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v2,
        .end_foreign_transfer_ref       = gnttab_end_foreign_transfer_ref_v2,
-       .query_foreign_access           = gnttab_query_foreign_access_v2,
+       .read_frame                     = gnttab_read_frame_v2,
 };
 
 static bool gnttab_need_v2(void)
index 3c9ae15..0ca351f 100644 (file)
@@ -337,8 +337,8 @@ static void free_active_ring(struct sock_mapping *map)
        if (!map->active.ring)
                return;
 
-       free_pages((unsigned long)map->active.data.in,
-                       map->active.ring->ring_order);
+       free_pages_exact(map->active.data.in,
+                        PAGE_SIZE << map->active.ring->ring_order);
        free_page((unsigned long)map->active.ring);
 }
 
@@ -352,8 +352,8 @@ static int alloc_active_ring(struct sock_mapping *map)
                goto out;
 
        map->active.ring->ring_order = PVCALLS_RING_ORDER;
-       bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                       PVCALLS_RING_ORDER);
+       bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER,
+                                 GFP_KERNEL | __GFP_ZERO);
        if (!bytes)
                goto out;
 
index e8bed1c..df68906 100644 (file)
@@ -379,7 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
                      unsigned int nr_pages, grant_ref_t *grefs)
 {
        int err;
-       int i, j;
+       unsigned int i;
+       grant_ref_t gref_head;
+
+       err = gnttab_alloc_grant_references(nr_pages, &gref_head);
+       if (err) {
+               xenbus_dev_fatal(dev, err, "granting access to ring page");
+               return err;
+       }
 
        for (i = 0; i < nr_pages; i++) {
                unsigned long gfn;
@@ -389,23 +396,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
                else
                        gfn = virt_to_gfn(vaddr);
 
-               err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
-               if (err < 0) {
-                       xenbus_dev_fatal(dev, err,
-                                        "granting access to ring page");
-                       goto fail;
-               }
-               grefs[i] = err;
+               grefs[i] = gnttab_claim_grant_reference(&gref_head);
+               gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
+                                               gfn, 0);
 
                vaddr = vaddr + XEN_PAGE_SIZE;
        }
 
        return 0;
-
-fail:
-       for (j = 0; j < i; j++)
-               gnttab_end_foreign_access_ref(grefs[j], 0);
-       return err;
 }
 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
 
index 5e9157d..f447c90 100644 (file)
@@ -703,7 +703,7 @@ static int afs_writepages_region(struct address_space *mapping,
        struct folio *folio;
        struct page *head_page;
        ssize_t ret;
-       int n;
+       int n, skips = 0;
 
        _enter("%llx,%llx,", start, end);
 
@@ -754,8 +754,15 @@ static int afs_writepages_region(struct address_space *mapping,
 #ifdef CONFIG_AFS_FSCACHE
                                folio_wait_fscache(folio);
 #endif
+                       } else {
+                               start += folio_size(folio);
                        }
                        folio_put(folio);
+                       if (wbc->sync_mode == WB_SYNC_NONE) {
+                               if (skips >= 5 || need_resched())
+                                       break;
+                               skips++;
+                       }
                        continue;
                }
 
index 947f047..ebb2d10 100644 (file)
@@ -602,6 +602,9 @@ enum {
        /* Indicate that we want the transaction kthread to commit right now. */
        BTRFS_FS_COMMIT_TRANS,
 
+       /* Indicate we have half completed snapshot deletions pending. */
+       BTRFS_FS_UNFINISHED_DROPS,
+
 #if BITS_PER_LONG == 32
        /* Indicate if we have error/warn message printed on 32bit systems */
        BTRFS_FS_32BIT_ERROR,
@@ -1106,8 +1109,15 @@ enum {
        BTRFS_ROOT_QGROUP_FLUSHING,
        /* We started the orphan cleanup for this root. */
        BTRFS_ROOT_ORPHAN_CLEANUP,
+       /* This root has a drop operation that was started previously. */
+       BTRFS_ROOT_UNFINISHED_DROP,
 };
 
+static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
+{
+       clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
+}
+
 /*
  * Record swapped tree blocks of a subvolume tree for delayed subtree trace
  * code. For detail check comment in fs/btrfs/qgroup.c.
index 87a5add..48590a3 100644 (file)
@@ -3813,6 +3813,10 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
 
        set_bit(BTRFS_FS_OPEN, &fs_info->flags);
 
+       /* Kick the cleaner thread so it'll start deleting snapshots. */
+       if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
+               wake_up_process(fs_info->cleaner_kthread);
+
 clear_oneshot:
        btrfs_clear_oneshot_options(fs_info);
        return 0;
@@ -4538,6 +4542,12 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
         */
        kthread_park(fs_info->cleaner_kthread);
 
+       /*
+        * If we had UNFINISHED_DROPS we could still be processing them, so
+        * clear that bit and wake up relocation so it can stop.
+        */
+       btrfs_wake_unfinished_drop(fs_info);
+
        /* wait for the qgroup rescan worker to stop */
        btrfs_qgroup_wait_for_completion(fs_info, false);
 
index d89273c..96427b1 100644 (file)
@@ -5622,6 +5622,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
        int ret;
        int level;
        bool root_dropped = false;
+       bool unfinished_drop = false;
 
        btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
 
@@ -5664,6 +5665,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
         * already dropped.
         */
        set_bit(BTRFS_ROOT_DELETING, &root->state);
+       unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state);
+
        if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
                level = btrfs_header_level(root->node);
                path->nodes[level] = btrfs_lock_root_node(root);
@@ -5838,6 +5841,13 @@ out_free:
        kfree(wc);
        btrfs_free_path(path);
 out:
+       /*
+        * We were an unfinished drop root, check to see if there are any
+        * pending, and if not clear and wake up any waiters.
+        */
+       if (!err && unfinished_drop)
+               btrfs_maybe_wake_unfinished_drop(fs_info);
+
        /*
         * So if we need to stop dropping the snapshot for whatever reason we
         * need to make sure to add it back to the dead root list so that we
index 409bad3..4c91060 100644 (file)
@@ -6841,14 +6841,24 @@ static void assert_eb_page_uptodate(const struct extent_buffer *eb,
 {
        struct btrfs_fs_info *fs_info = eb->fs_info;
 
+       /*
+        * If we are using the commit root we could potentially clear a page
+        * Uptodate while we're using the extent buffer that we've previously
+        * looked up.  We don't want to complain in this case, as the page was
+        * valid before, we just didn't write it out.  Instead we want to catch
+        * the case where we didn't actually read the block properly, which
+        * would have !PageUptodate && !PageError, as we clear PageError before
+        * reading.
+        */
        if (fs_info->sectorsize < PAGE_SIZE) {
-               bool uptodate;
+               bool uptodate, error;
 
                uptodate = btrfs_subpage_test_uptodate(fs_info, page,
                                                       eb->start, eb->len);
-               WARN_ON(!uptodate);
+               error = btrfs_subpage_test_error(fs_info, page, eb->start, eb->len);
+               WARN_ON(!uptodate && !error);
        } else {
-               WARN_ON(!PageUptodate(page));
+               WARN_ON(!PageUptodate(page) && !PageError(page));
        }
 }
 
index 76e530f..5bbea5e 100644 (file)
@@ -7600,6 +7600,34 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
        }
 
        len = min(len, em->len - (start - em->start));
+
+       /*
+        * If we have a NOWAIT request and the range contains multiple extents
+        * (or a mix of extents and holes), then we return -EAGAIN to make the
+        * caller fallback to a context where it can do a blocking (without
+        * NOWAIT) request. This way we avoid doing partial IO and returning
+        * success to the caller, which is not optimal for writes and for reads
+        * it can result in unexpected behaviour for an application.
+        *
+        * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
+        * iomap_dio_rw(), we can end up returning less data then what the caller
+        * asked for, resulting in an unexpected, and incorrect, short read.
+        * That is, the caller asked to read N bytes and we return less than that,
+        * which is wrong unless we are crossing EOF. This happens if we get a
+        * page fault error when trying to fault in pages for the buffer that is
+        * associated to the struct iov_iter passed to iomap_dio_rw(), and we
+        * have previously submitted bios for other extents in the range, in
+        * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
+        * those bios have completed by the time we get the page fault error,
+        * which we return back to our caller - we should only return EIOCBQUEUED
+        * after we have submitted bios for all the extents in the range.
+        */
+       if ((flags & IOMAP_NOWAIT) && len < length) {
+               free_extent_map(em);
+               ret = -EAGAIN;
+               goto unlock_err;
+       }
+
        if (write) {
                ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
                                                    start, len);
index f12dc68..30d42ea 100644 (file)
@@ -1196,6 +1196,14 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
        if (!fs_info->quota_root)
                goto out;
 
+       /*
+        * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
+        * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
+        * to lock that mutex while holding a transaction handle and the rescan
+        * worker needs to commit a transaction.
+        */
+       mutex_unlock(&fs_info->qgroup_ioctl_lock);
+
        /*
         * Request qgroup rescan worker to complete and wait for it. This wait
         * must be done before transaction start for quota disable since it may
@@ -1203,7 +1211,6 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
         */
        clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
        btrfs_qgroup_wait_for_completion(fs_info, false);
-       mutex_unlock(&fs_info->qgroup_ioctl_lock);
 
        /*
         * 1 For the root item
index f546519..9d80548 100644 (file)
@@ -3960,6 +3960,19 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
        int rw = 0;
        int err = 0;
 
+       /*
+        * This only gets set if we had a half-deleted snapshot on mount.  We
+        * cannot allow relocation to start while we're still trying to clean up
+        * these pending deletions.
+        */
+       ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
+       if (ret)
+               return ret;
+
+       /* We may have been woken up by close_ctree, so bail if we're closing. */
+       if (btrfs_fs_closing(fs_info))
+               return -EINTR;
+
        bg = btrfs_lookup_block_group(fs_info, group_start);
        if (!bg)
                return -ENOENT;
index 3d68d2d..ca7426e 100644 (file)
@@ -278,6 +278,21 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
 
                WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state));
                if (btrfs_root_refs(&root->root_item) == 0) {
+                       struct btrfs_key drop_key;
+
+                       btrfs_disk_key_to_cpu(&drop_key, &root->root_item.drop_progress);
+                       /*
+                        * If we have a non-zero drop_progress then we know we
+                        * made it partly through deleting this snapshot, and
+                        * thus we need to make sure we block any balance from
+                        * happening until this snapshot is completely dropped.
+                        */
+                       if (drop_key.objectid != 0 || drop_key.type != 0 ||
+                           drop_key.offset != 0) {
+                               set_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
+                               set_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state);
+                       }
+
                        set_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
                        btrfs_add_dead_root(root);
                }
index 29bd8c7..ef7ae20 100644 (file)
@@ -736,7 +736,7 @@ void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
         * Since we own the page lock, no one else could touch subpage::writers
         * and we are safe to do several atomic operations without spinlock.
         */
-       if (atomic_read(&subpage->writers))
+       if (atomic_read(&subpage->writers) == 0)
                /* No writers, locked by plain lock_page() */
                return unlock_page(page);
 
index c3cfdfd..1f1c25d 100644 (file)
@@ -854,7 +854,37 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
 static noinline void wait_for_commit(struct btrfs_transaction *commit,
                                     const enum btrfs_trans_state min_state)
 {
-       wait_event(commit->commit_wait, commit->state >= min_state);
+       struct btrfs_fs_info *fs_info = commit->fs_info;
+       u64 transid = commit->transid;
+       bool put = false;
+
+       while (1) {
+               wait_event(commit->commit_wait, commit->state >= min_state);
+               if (put)
+                       btrfs_put_transaction(commit);
+
+               if (min_state < TRANS_STATE_COMPLETED)
+                       break;
+
+               /*
+                * A transaction isn't really completed until all of the
+                * previous transactions are completed, but with fsync we can
+                * end up with SUPER_COMMITTED transactions before a COMPLETED
+                * transaction. Wait for those.
+                */
+
+               spin_lock(&fs_info->trans_lock);
+               commit = list_first_entry_or_null(&fs_info->trans_list,
+                                                 struct btrfs_transaction,
+                                                 list);
+               if (!commit || commit->transid > transid) {
+                       spin_unlock(&fs_info->trans_lock);
+                       break;
+               }
+               refcount_inc(&commit->use_count);
+               put = true;
+               spin_unlock(&fs_info->trans_lock);
+       }
 }
 
 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
@@ -1319,6 +1349,32 @@ again:
        return 0;
 }
 
+/*
+ * If we had a pending drop we need to see if there are any others left in our
+ * dead roots list, and if not clear our bit and wake any waiters.
+ */
+void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
+{
+       /*
+        * We put the drop in progress roots at the front of the list, so if the
+        * first entry doesn't have UNFINISHED_DROP set we can wake everybody
+        * up.
+        */
+       spin_lock(&fs_info->trans_lock);
+       if (!list_empty(&fs_info->dead_roots)) {
+               struct btrfs_root *root = list_first_entry(&fs_info->dead_roots,
+                                                          struct btrfs_root,
+                                                          root_list);
+               if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) {
+                       spin_unlock(&fs_info->trans_lock);
+                       return;
+               }
+       }
+       spin_unlock(&fs_info->trans_lock);
+
+       btrfs_wake_unfinished_drop(fs_info);
+}
+
 /*
  * dead roots are old snapshots that need to be deleted.  This allocates
  * a dirty root struct and adds it into the list of dead roots that need to
@@ -1331,7 +1387,12 @@ void btrfs_add_dead_root(struct btrfs_root *root)
        spin_lock(&fs_info->trans_lock);
        if (list_empty(&root->root_list)) {
                btrfs_grab_root(root);
-               list_add_tail(&root->root_list, &fs_info->dead_roots);
+
+               /* We want to process the partially complete drops first. */
+               if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state))
+                       list_add(&root->root_list, &fs_info->dead_roots);
+               else
+                       list_add_tail(&root->root_list, &fs_info->dead_roots);
        }
        spin_unlock(&fs_info->trans_lock);
 }
index 9402d8d..ba8a982 100644 (file)
@@ -216,6 +216,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
 
 void btrfs_add_dead_root(struct btrfs_root *root);
 int btrfs_defrag_root(struct btrfs_root *root);
+void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info);
 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans);
index 9fd145f..aae5697 100644 (file)
@@ -1682,6 +1682,7 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
         */
        for (slot = 0; slot < nritems; slot++) {
                u32 item_end_expected;
+               u64 item_data_end;
                int ret;
 
                btrfs_item_key_to_cpu(leaf, &key, slot);
@@ -1696,6 +1697,8 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
                        return -EUCLEAN;
                }
 
+               item_data_end = (u64)btrfs_item_offset(leaf, slot) +
+                               btrfs_item_size(leaf, slot);
                /*
                 * Make sure the offset and ends are right, remember that the
                 * item data starts at the end of the leaf and grows towards the
@@ -1706,11 +1709,10 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
                else
                        item_end_expected = btrfs_item_offset(leaf,
                                                                 slot - 1);
-               if (unlikely(btrfs_item_data_end(leaf, slot) != item_end_expected)) {
+               if (unlikely(item_data_end != item_end_expected)) {
                        generic_err(leaf, slot,
-                               "unexpected item end, have %u expect %u",
-                               btrfs_item_data_end(leaf, slot),
-                               item_end_expected);
+                               "unexpected item end, have %llu expect %u",
+                               item_data_end, item_end_expected);
                        return -EUCLEAN;
                }
 
@@ -1719,12 +1721,10 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
                 * just in case all the items are consistent to each other, but
                 * all point outside of the leaf.
                 */
-               if (unlikely(btrfs_item_data_end(leaf, slot) >
-                            BTRFS_LEAF_DATA_SIZE(fs_info))) {
+               if (unlikely(item_data_end > BTRFS_LEAF_DATA_SIZE(fs_info))) {
                        generic_err(leaf, slot,
-                       "slot end outside of leaf, have %u expect range [0, %u]",
-                               btrfs_item_data_end(leaf, slot),
-                               BTRFS_LEAF_DATA_SIZE(fs_info));
+                       "slot end outside of leaf, have %llu expect range [0, %u]",
+                               item_data_end, BTRFS_LEAF_DATA_SIZE(fs_info));
                        return -EUCLEAN;
                }
 
index 3ee014c..6bc8834 100644 (file)
@@ -1362,6 +1362,15 @@ again:
                                                 inode, name, namelen);
                        kfree(name);
                        iput(dir);
+                       /*
+                        * Whenever we need to check if a name exists or not, we
+                        * check the subvolume tree. So after an unlink we must
+                        * run delayed items, so that future checks for a name
+                        * during log replay see that the name does not exists
+                        * anymore.
+                        */
+                       if (!ret)
+                               ret = btrfs_run_delayed_items(trans);
                        if (ret)
                                goto out;
                        goto again;
@@ -1614,6 +1623,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                                 */
                                if (!ret && inode->i_nlink == 0)
                                        inc_nlink(inode);
+                               /*
+                                * Whenever we need to check if a name exists or
+                                * not, we check the subvolume tree. So after an
+                                * unlink we must run delayed items, so that future
+                                * checks for a name during log replay see that the
+                                * name does not exists anymore.
+                                */
+                               if (!ret)
+                                       ret = btrfs_run_delayed_items(trans);
                        }
                        if (ret < 0)
                                goto out;
@@ -4635,7 +4653,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
 
 /*
  * Log all prealloc extents beyond the inode's i_size to make sure we do not
- * lose them after doing a fast fsync and replaying the log. We scan the
+ * lose them after doing a full/fast fsync and replaying the log. We scan the
  * subvolume's root instead of iterating the inode's extent map tree because
  * otherwise we can log incorrect extent items based on extent map conversion.
  * That can happen due to the fact that extent maps are merged when they
@@ -5414,6 +5432,7 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
                                   struct btrfs_log_ctx *ctx,
                                   bool *need_log_inode_item)
 {
+       const u64 i_size = i_size_read(&inode->vfs_inode);
        struct btrfs_root *root = inode->root;
        int ins_start_slot = 0;
        int ins_nr = 0;
@@ -5434,13 +5453,21 @@ again:
                if (min_key->type > max_key->type)
                        break;
 
-               if (min_key->type == BTRFS_INODE_ITEM_KEY)
+               if (min_key->type == BTRFS_INODE_ITEM_KEY) {
                        *need_log_inode_item = false;
-
-               if ((min_key->type == BTRFS_INODE_REF_KEY ||
-                    min_key->type == BTRFS_INODE_EXTREF_KEY) &&
-                   inode->generation == trans->transid &&
-                   !recursive_logging) {
+               } else if (min_key->type == BTRFS_EXTENT_DATA_KEY &&
+                          min_key->offset >= i_size) {
+                       /*
+                        * Extents at and beyond eof are logged with
+                        * btrfs_log_prealloc_extents().
+                        * Only regular files have BTRFS_EXTENT_DATA_KEY keys,
+                        * and no keys greater than that, so bail out.
+                        */
+                       break;
+               } else if ((min_key->type == BTRFS_INODE_REF_KEY ||
+                           min_key->type == BTRFS_INODE_EXTREF_KEY) &&
+                          inode->generation == trans->transid &&
+                          !recursive_logging) {
                        u64 other_ino = 0;
                        u64 other_parent = 0;
 
@@ -5471,10 +5498,8 @@ again:
                                btrfs_release_path(path);
                                goto next_key;
                        }
-               }
-
-               /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
-               if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
+               } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
+                       /* Skip xattrs, logged later with btrfs_log_all_xattrs() */
                        if (ins_nr == 0)
                                goto next_slot;
                        ret = copy_items(trans, inode, dst_path, path,
@@ -5527,9 +5552,21 @@ next_key:
                        break;
                }
        }
-       if (ins_nr)
+       if (ins_nr) {
                ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
                                 ins_nr, inode_only, logged_isize);
+               if (ret)
+                       return ret;
+       }
+
+       if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) {
+               /*
+                * Release the path because otherwise we might attempt to double
+                * lock the same leaf with btrfs_log_prealloc_extents() below.
+                */
+               btrfs_release_path(path);
+               ret = btrfs_log_prealloc_extents(trans, inode, dst_path);
+       }
 
        return ret;
 }
index 51c968c..ae93cee 100644 (file)
@@ -254,7 +254,7 @@ static bool cachefiles_shorten_object(struct cachefiles_object *object,
                ret = cachefiles_inject_write_error();
                if (ret == 0)
                        ret = vfs_fallocate(file, FALLOC_FL_ZERO_RANGE,
-                                           new_size, dio_size);
+                                           new_size, dio_size - new_size);
                if (ret < 0) {
                        trace_cachefiles_io_error(object, file_inode(file), ret,
                                                  cachefiles_trace_fallocate_error);
index 83f41bd..3546510 100644 (file)
@@ -28,6 +28,11 @@ struct cachefiles_xattr {
 static const char cachefiles_xattr_cache[] =
        XATTR_USER_PREFIX "CacheFiles.cache";
 
+struct cachefiles_vol_xattr {
+       __be32  reserved;       /* Reserved, should be 0 */
+       __u8    data[];         /* netfs volume coherency data */
+} __packed;
+
 /*
  * set the state xattr on a cache file
  */
@@ -185,6 +190,7 @@ void cachefiles_prepare_to_write(struct fscache_cookie *cookie)
  */
 bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
 {
+       struct cachefiles_vol_xattr *buf;
        unsigned int len = volume->vcookie->coherency_len;
        const void *p = volume->vcookie->coherency;
        struct dentry *dentry = volume->dentry;
@@ -192,10 +198,17 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
 
        _enter("%x,#%d", volume->vcookie->debug_id, len);
 
+       len += sizeof(*buf);
+       buf = kmalloc(len, GFP_KERNEL);
+       if (!buf)
+               return false;
+       buf->reserved = cpu_to_be32(0);
+       memcpy(buf->data, p, len);
+
        ret = cachefiles_inject_write_error();
        if (ret == 0)
                ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
-                                  p, len, 0);
+                                  buf, len, 0);
        if (ret < 0) {
                trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret,
                                           cachefiles_trace_setxattr_error);
@@ -209,6 +222,7 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
                                               cachefiles_coherency_vol_set_ok);
        }
 
+       kfree(buf);
        _leave(" = %d", ret);
        return ret == 0;
 }
@@ -218,7 +232,7 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
  */
 int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
 {
-       struct cachefiles_xattr *buf;
+       struct cachefiles_vol_xattr *buf;
        struct dentry *dentry = volume->dentry;
        unsigned int len = volume->vcookie->coherency_len;
        const void *p = volume->vcookie->coherency;
@@ -228,6 +242,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
 
        _enter("");
 
+       len += sizeof(*buf);
        buf = kmalloc(len, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
@@ -245,7 +260,9 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
                                        "Failed to read xattr with error %zd", xlen);
                }
                why = cachefiles_coherency_vol_check_xattr;
-       } else if (memcmp(buf->data, p, len) != 0) {
+       } else if (buf->reserved != cpu_to_be32(0)) {
+               why = cachefiles_coherency_vol_check_resv;
+       } else if (memcmp(buf->data, p, len - sizeof(*buf)) != 0) {
                why = cachefiles_coherency_vol_check_cmp;
        } else {
                why = cachefiles_coherency_vol_check_ok;
index cd54a52..592730f 100644 (file)
@@ -941,7 +941,17 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
 
        while (count) {
                if (cs->write && cs->pipebufs && page) {
-                       return fuse_ref_page(cs, page, offset, count);
+                       /*
+                        * Can't control lifetime of pipe buffers, so always
+                        * copy user pages.
+                        */
+                       if (cs->req->args->user_pages) {
+                               err = fuse_copy_fill(cs);
+                               if (err)
+                                       return err;
+                       } else {
+                               return fuse_ref_page(cs, page, offset, count);
+                       }
                } else if (!cs->len) {
                        if (cs->move_pages && page &&
                            offset == 0 && count == PAGE_SIZE) {
index 8290944..0fc150c 100644 (file)
@@ -1413,6 +1413,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
                        (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
        }
 
+       ap->args.user_pages = true;
        if (write)
                ap->args.in_pages = true;
        else
index e8e59fb..eac4984 100644 (file)
@@ -256,6 +256,7 @@ struct fuse_args {
        bool nocreds:1;
        bool in_pages:1;
        bool out_pages:1;
+       bool user_pages:1;
        bool out_argvar:1;
        bool page_zeroing:1;
        bool page_replace:1;
index ee846ce..9ee36aa 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/exportfs.h>
 #include <linux/posix_acl.h>
 #include <linux/pid_namespace.h>
+#include <uapi/linux/magic.h>
 
 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
 MODULE_DESCRIPTION("Filesystem in Userspace");
@@ -50,8 +51,6 @@ MODULE_PARM_DESC(max_user_congthresh,
  "Global limit for the maximum congestion threshold an "
  "unprivileged user can set");
 
-#define FUSE_SUPER_MAGIC 0x65735546
-
 #define FUSE_DEFAULT_BLKSIZE 512
 
 /** Maximum number of outstanding background requests */
index fbc09da..df58966 100644 (file)
@@ -394,9 +394,12 @@ static int fuse_priv_ioctl(struct inode *inode, struct fuse_file *ff,
        args.out_args[1].value = ptr;
 
        err = fuse_simple_request(fm, &args);
-       if (!err && outarg.flags & FUSE_IOCTL_RETRY)
-               err = -EIO;
-
+       if (!err) {
+               if (outarg.result < 0)
+                       err = outarg.result;
+               else if (outarg.flags & FUSE_IOCTL_RETRY)
+                       err = -EIO;
+       }
        return err;
 }
 
index 2772dec..8bde30f 100644 (file)
@@ -1105,17 +1105,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
                goto read_super_error;
        }
 
-       root = d_make_root(inode);
-       if (!root) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto read_super_error;
-       }
-
-       sb->s_root = root;
-
-       ocfs2_complete_mount_recovery(osb);
-
        osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL,
                                                &ocfs2_kset->kobj);
        if (!osb->osb_dev_kset) {
@@ -1133,6 +1122,17 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
                goto read_super_error;
        }
 
+       root = d_make_root(inode);
+       if (!root) {
+               status = -ENOMEM;
+               mlog_errno(status);
+               goto read_super_error;
+       }
+
+       sb->s_root = root;
+
+       ocfs2_complete_mount_recovery(osb);
+
        if (ocfs2_mount_local(osb))
                snprintf(nodestr, sizeof(nodestr), "local");
        else
index cc28623..2667db9 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -253,7 +253,8 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
         */
        was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
        for (;;) {
-               unsigned int head = pipe->head;
+               /* Read ->head with a barrier vs post_one_notification() */
+               unsigned int head = smp_load_acquire(&pipe->head);
                unsigned int tail = pipe->tail;
                unsigned int mask = pipe->ring_size - 1;
 
@@ -831,10 +832,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
        int i;
 
 #ifdef CONFIG_WATCH_QUEUE
-       if (pipe->watch_queue) {
+       if (pipe->watch_queue)
                watch_queue_clear(pipe->watch_queue);
-               put_watch_queue(pipe->watch_queue);
-       }
 #endif
 
        (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
@@ -844,6 +843,10 @@ void free_pipe_info(struct pipe_inode_info *pipe)
                if (buf->ops)
                        pipe_buf_release(pipe, buf);
        }
+#ifdef CONFIG_WATCH_QUEUE
+       if (pipe->watch_queue)
+               put_watch_queue(pipe->watch_queue);
+#endif
        if (pipe->tmp_page)
                __free_page(pipe->tmp_page);
        kfree(pipe->bufs);
index 6e97ed7..f46060e 100644 (file)
@@ -309,7 +309,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 
        name = arch_vma_name(vma);
        if (!name) {
-               const char *anon_name;
+               struct anon_vma_name *anon_name;
 
                if (!mm) {
                        name = "[vdso]";
@@ -327,10 +327,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
                        goto done;
                }
 
-               anon_name = vma_anon_name(vma);
+               anon_name = anon_vma_name(vma);
                if (anon_name) {
                        seq_pad(m, ' ');
-                       seq_printf(m, "[anon:%s]", anon_name);
+                       seq_printf(m, "[anon:%s]", anon_name->name);
                }
        }
 
@@ -1597,7 +1597,8 @@ static const struct mm_walk_ops pagemap_ops = {
  * Bits 5-54  swap offset if swapped
  * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
  * Bit  56    page exclusively mapped
- * Bits 57-60 zero
+ * Bit  57    pte is uffd-wp write-protected
+ * Bits 58-60 zero
  * Bit  61    page is file-page or shared-anon
  * Bit  62    page swapped
  * Bit  63    page present
index e26b101..8e03b3d 100644 (file)
@@ -878,7 +878,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                                 new_flags, vma->anon_vma,
                                 vma->vm_file, vma->vm_pgoff,
                                 vma_policy(vma),
-                                NULL_VM_UFFD_CTX, vma_anon_name(vma));
+                                NULL_VM_UFFD_CTX, anon_vma_name(vma));
                if (prev)
                        vma = prev;
                else
@@ -1438,7 +1438,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
                                 vma_policy(vma),
                                 ((struct vm_userfaultfd_ctx){ ctx }),
-                                vma_anon_name(vma));
+                                anon_vma_name(vma));
                if (prev) {
                        vma = prev;
                        goto next;
@@ -1615,7 +1615,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                prev = vma_merge(mm, prev, start, vma_end, new_flags,
                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
                                 vma_policy(vma),
-                                NULL_VM_UFFD_CTX, vma_anon_name(vma));
+                                NULL_VM_UFFD_CTX, anon_vma_name(vma));
                if (prev) {
                        vma = prev;
                        goto next;
index 63ccb52..220c8c6 100644 (file)
                           ARM_SMCCC_SMC_32,                            \
                           0, 0x7fff)
 
+#define ARM_SMCCC_ARCH_WORKAROUND_3                                    \
+       ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
+                          ARM_SMCCC_SMC_32,                            \
+                          0, 0x3fff)
+
 #define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID                          \
        ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
                           ARM_SMCCC_SMC_32,                            \
index 88449fb..bdb5298 100644 (file)
@@ -1829,6 +1829,11 @@ struct bpf_core_ctx {
 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
                   int relo_idx, void *insn);
 
+static inline bool unprivileged_ebpf_enabled(void)
+{
+       return !sysctl_unprivileged_bpf_disabled;
+}
+
 #else /* !CONFIG_BPF_SYSCALL */
 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
 {
@@ -2032,6 +2037,12 @@ bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
 {
        return NULL;
 }
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+       return false;
+}
+
 #endif /* CONFIG_BPF_SYSCALL */
 
 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
index 6150d11..dca2b13 100644 (file)
  */
 #define DMA_ATTR_PRIVILEGED            (1UL << 9)
 
-/*
- * This is a hint to the DMA-mapping subsystem that the device is expected
- * to overwrite the entire mapped size, thus the caller does not require any
- * of the previous buffer contents to be preserved. This allows
- * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers.
- */
-#define DMA_ATTR_OVERWRITE             (1UL << 10)
-
 /*
  * A dma_addr_t can hold any valid DMA or bus address for the platform.  It can
  * be given to a device to use as a DMA source or target.  It is specific to a
index 60ee7b3..75d40ac 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
  * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2021 Intel Corporation
+ * Copyright (c) 2018 - 2022 Intel Corporation
  */
 
 #ifndef LINUX_IEEE80211_H
@@ -18,6 +18,7 @@
 #include <linux/types.h>
 #include <linux/if_ether.h>
 #include <linux/etherdevice.h>
+#include <linux/bitfield.h>
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 
@@ -1023,6 +1024,8 @@ struct ieee80211_tpc_report_ie {
 #define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK    GENMASK(2, 1)
 #define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT   1
 #define IEEE80211_ADDBA_EXT_NO_FRAG            BIT(0)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_MASK      GENMASK(7, 5)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT     10
 
 struct ieee80211_addba_ext_ie {
        u8 data;
@@ -1697,10 +1700,12 @@ struct ieee80211_ht_operation {
  * A-MPDU buffer sizes
  * According to HT size varies from 8 to 64 frames
  * HE adds the ability to have up to 256 frames.
+ * EHT adds the ability to have up to 1K frames.
  */
 #define IEEE80211_MIN_AMPDU_BUF                0x8
 #define IEEE80211_MAX_AMPDU_BUF_HT     0x40
-#define IEEE80211_MAX_AMPDU_BUF                0x100
+#define IEEE80211_MAX_AMPDU_BUF_HE     0x100
+#define IEEE80211_MAX_AMPDU_BUF_EHT    0x400
 
 
 /* Spatial Multiplexing Power Save Modes (for capability) */
@@ -1925,6 +1930,111 @@ struct ieee80211_mu_edca_param_set {
        struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
 } __packed;
 
+#define IEEE80211_EHT_MCS_NSS_RX 0x0f
+#define IEEE80211_EHT_MCS_NSS_TX 0xf0
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_20mhz_only - EHT 20MHz only station max
+ * supported NSS for per MCS.
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs7_max_nss: indicates the maximum number of spatial streams
+ *     supported for reception and the maximum number of spatial streams
+ *     supported for transmission for MCS 0 - 7.
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ *     supported for reception and the maximum number of spatial streams
+ *     supported for transmission for MCS 8 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ *     supported for reception and the maximum number of spatial streams
+ *     supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ *     supported for reception and the maximum number of spatial streams
+ *     supported for transmission for MCS 12 - 13.
+ */
+struct ieee80211_eht_mcs_nss_supp_20mhz_only {
+       u8 rx_tx_mcs7_max_nss;
+       u8 rx_tx_mcs9_max_nss;
+       u8 rx_tx_mcs11_max_nss;
+       u8 rx_tx_mcs13_max_nss;
+};
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except
+ * 20MHz only stations).
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ *     supported for reception and the maximum number of spatial streams
+ *     supported for transmission for MCS 0 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ *     supported for reception and the maximum number of spatial streams
+ *     supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ *     supported for reception and the maximum number of spatial streams
+ *     supported for transmission for MCS 12 - 13.
+ */
+struct ieee80211_eht_mcs_nss_supp_bw {
+       u8 rx_tx_mcs9_max_nss;
+       u8 rx_tx_mcs11_max_nss;
+       u8 rx_tx_mcs13_max_nss;
+};
+
+/**
+ * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data
+ *
+ * This structure is the "EHT Capabilities element" fixed fields as
+ * described in P802.11be_D1.4 section 9.4.2.313.
+ *
+ * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP*
+ * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP*
+ */
+struct ieee80211_eht_cap_elem_fixed {
+       u8 mac_cap_info[2];
+       u8 phy_cap_info[9];
+} __packed;
+
+/**
+ * struct ieee80211_eht_cap_elem - EHT capabilities element
+ * @fixed: fixed parts, see &ieee80211_eht_cap_elem_fixed
+ * @optional: optional parts
+ */
+struct ieee80211_eht_cap_elem {
+       struct ieee80211_eht_cap_elem_fixed fixed;
+
+       /*
+        * Followed by:
+        * Supported EHT-MCS And NSS Set field: 4, 3, 6 or 9 octets.
+        * EHT PPE Thresholds field: variable length.
+        */
+       u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_eht_operation - eht operation element
+ *
+ * This structure is the "EHT Operation Element" fields as
+ * described in P802.11be_D1.4 section 9.4.2.311
+ *
+ * FIXME: The spec is unclear how big the fields are, and doesn't
+ *       indicate the "Disabled Subchannel Bitmap Present" in the
+ *       structure (Figure 9-1002a) at all ...
+ */
+struct ieee80211_eht_operation {
+       u8 chan_width;
+       u8 ccfs;
+       u8 present_bm;
+
+       u8 disable_subchannel_bitmap[];
+} __packed;
+
+#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT  0x1
+
 /* 802.11ac VHT Capabilities */
 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895                 0x00000000
 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991                 0x00000001
@@ -2129,6 +2239,8 @@ enum ieee80211_client_reg_power {
 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G      0x04
 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G           0x08
 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G     0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL               0x1e
+
 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G       0x20
 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G       0x40
 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK                   0xfe
@@ -2309,6 +2421,7 @@ ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
 #define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK              0x78
 #define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS               (3)
 #define IEEE80211_PPE_THRES_INFO_PPET_SIZE                     (3)
+#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE                        (7)
 
 /*
  * Calculate 802.11ax HE capabilities IE PPE field size
@@ -2338,6 +2451,29 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
        return n;
 }
 
+static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
+{
+       const struct ieee80211_he_cap_elem *he_cap_ie_elem = (const void *)data;
+       u8 needed = sizeof(*he_cap_ie_elem);
+
+       if (len < needed)
+               return false;
+
+       needed += ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+       if (len < needed)
+               return false;
+
+       if (he_cap_ie_elem->phy_cap_info[6] &
+                       IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+               if (len < needed + 1)
+                       return false;
+               needed += ieee80211_he_ppe_size(data[needed],
+                                               he_cap_ie_elem->phy_cap_info);
+       }
+
+       return len >= needed;
+}
+
 /* HE Operation defines */
 #define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK           0x00000007
 #define IEEE80211_HE_OPERATION_TWT_REQUIRED                    0x00000008
@@ -2599,6 +2735,194 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
 #define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0)
 #define S1G_OPER_CH_WIDTH_OPER         GENMASK(4, 1)
 
+/* EHT MAC capabilities as defined in P802.11be_D1.4 section 9.4.2.313.2 */
+#define IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS                        0x01
+#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL                      0x02
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1         0x04
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2         0x08
+#define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT                  0x10
+#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC                        0x20
+#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_MASK              0xc0
+#define                IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_3895       0
+#define                IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_7991       1
+#define                IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_11454      2
+
+/* EHT PHY capabilities as defined in P802.11be_D1.4 section 9.4.2.313.3 */
+#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ                  0x02
+#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ             0x04
+#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI             0x08
+#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO           0x10
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER                   0x20
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE                   0x40
+
+/* EHT beamformee number of spatial streams <= 80MHz is split */
+#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK                0x80
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK                0x03
+
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK       0x1c
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK       0xe0
+
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK         0x07
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK                0x38
+
+/* EHT number of sounding dimensions for 320MHz is split */
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK                0xc0
+#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK                0x01
+#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK               0x02
+#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK               0x04
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK            0x08
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK            0x10
+#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK                 0x20
+#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK         0x40
+#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK                   0x80
+
+#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO              0x01
+#define IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP                     0x02
+#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP           0x04
+#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI     0x08
+#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK                     0xf0
+
+#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK           0x01
+#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP                0x02
+#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP                0x04
+#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT           0x08
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK     0x30
+#define   IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US    0
+#define   IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US    1
+#define   IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US   2
+#define   IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US   3
+
+/* Maximum number of supported EHT LTF is split */
+#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK       0xc0
+#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK       0x07
+
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK                 0x78
+#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP               0x80
+
+#define IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW       0x01
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ      0x02
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ     0x04
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ     0x08
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ             0x10
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ            0x20
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ            0x40
+#define IEEE80211_EHT_PHY_CAP7_TB_SOUNDING_FDBK_RATE_LIMIT     0x80
+
+#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA    0x01
+#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA    0x02
+
+/*
+ * EHT operation channel width as defined in P802.11be_D1.4 section 9.4.2.311
+ */
+#define IEEE80211_EHT_OPER_CHAN_WIDTH          0x7
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ    0
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ    1
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ    2
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ   3
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ   4
+
+/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */
+static inline u8
+ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap,
+                          const struct ieee80211_eht_cap_elem_fixed *eht_cap)
+{
+       u8 count = 0;
+
+       /* on 2.4 GHz, if it supports 40 MHz, the result is 3 */
+       if (he_cap->phy_cap_info[0] &
+           IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+               return 3;
+
+       /* on 2.4 GHz, these three bits are reserved, so should be 0 */
+       if (he_cap->phy_cap_info[0] &
+           IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
+               count += 3;
+
+       if (he_cap->phy_cap_info[0] &
+           IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+               count += 3;
+
+       if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+               count += 3;
+
+       return count ? count : 4;
+}
+
+/* 802.11be EHT PPE Thresholds */
+#define IEEE80211_EHT_PPE_THRES_NSS_POS                        0
+#define IEEE80211_EHT_PPE_THRES_NSS_MASK               0xf
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK  0x1f0
+#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE         3
+#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE       9
+
+/*
+ * Calculate 802.11be EHT capabilities IE EHT field size
+ */
+static inline u8
+ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+       u32 n;
+
+       if (!(phy_cap_info[5] &
+             IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT))
+               return 0;
+
+       n = hweight16(ppe_thres_hdr &
+                     IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+       n *= 1 + u16_get_bits(ppe_thres_hdr, IEEE80211_EHT_PPE_THRES_NSS_MASK);
+
+       /*
+        * Each pair is 6 bits, and we need to add the 9 "header" bits to the
+        * total size.
+        */
+       n = n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2 +
+           IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+       return DIV_ROUND_UP(n, 8);
+}
+
+static inline bool
+ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len)
+{
+       const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data;
+       u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed);
+
+       if (len < needed || !he_capa)
+               return false;
+
+       needed += ieee80211_eht_mcs_nss_size((const void *)he_capa,
+                                            (const void *)data);
+       if (len < needed)
+               return false;
+
+       if (elem->phy_cap_info[5] &
+                       IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+               u16 ppe_thres_hdr;
+
+               if (len < needed + sizeof(ppe_thres_hdr))
+                       return false;
+
+               ppe_thres_hdr = get_unaligned_le16(data + needed);
+               needed += ieee80211_eht_ppe_size(ppe_thres_hdr,
+                                                elem->phy_cap_info);
+       }
+
+       return len >= needed;
+}
+
+static inline bool
+ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
+{
+       const struct ieee80211_eht_operation *elem = (const void *)data;
+       u8 needed = sizeof(*elem);
+
+       if (len < needed)
+               return false;
+
+       if (elem->present_bm & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)
+               needed += 2;
+
+       return len >= needed;
+}
 
 #define LISTEN_INT_USF GENMASK(15, 14)
 #define LISTEN_INT_UI  GENMASK(13, 0)
@@ -3054,6 +3378,9 @@ enum ieee80211_eid_ext {
        WLAN_EID_EXT_SHORT_SSID_LIST = 58,
        WLAN_EID_EXT_HE_6GHZ_CAPA = 59,
        WLAN_EID_EXT_UL_MU_POWER_CAPA = 60,
+       WLAN_EID_EXT_EHT_OPERATION = 106,
+       WLAN_EID_EXT_EHT_MULTI_LINK = 107,
+       WLAN_EID_EXT_EHT_CAPABILITY = 108,
 };
 
 /* Action category code */
@@ -4005,10 +4332,10 @@ static inline bool for_each_element_completed(const struct element *element,
 #define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP                     0x40
 
 struct ieee80211_neighbor_ap_info {
-       u8 tbtt_info_hdr;
-       u8 tbtt_info_len;
-       u8 op_class;
-       u8 channel;
+       u8 tbtt_info_hdr;
+       u8 tbtt_info_len;
+       u8 op_class;
+       u8 channel;
 } __packed;
 
 enum ieee80211_range_params_max_total_ltf {
index b712217..1ed5244 100644 (file)
@@ -52,6 +52,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
        case ARPHRD_VOID:
        case ARPHRD_NONE:
        case ARPHRD_RAWIP:
+       case ARPHRD_PIMREG:
                return false;
        default:
                return true;
index 3aae023..d62ef42 100644 (file)
@@ -119,6 +119,9 @@ int br_vlan_get_info(const struct net_device *dev, u16 vid,
                     struct bridge_vlan_info *p_vinfo);
 int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
                         struct bridge_vlan_info *p_vinfo);
+bool br_mst_enabled(const struct net_device *dev);
+int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids);
+int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state);
 #else
 static inline bool br_vlan_enabled(const struct net_device *dev)
 {
@@ -151,6 +154,22 @@ static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
 {
        return -EINVAL;
 }
+
+static inline bool br_mst_enabled(const struct net_device *dev)
+{
+       return false;
+}
+
+static inline int br_mst_get_info(const struct net_device *dev, u16 msti,
+                                 unsigned long *vids)
+{
+       return -EINVAL;
+}
+static inline int br_mst_get_state(const struct net_device *dev, u16 msti,
+                                  u8 *state)
+{
+       return -EINVAL;
+}
 #endif
 
 #if IS_ENABLED(CONFIG_BRIDGE)
index 10c94a3..b422947 100644 (file)
@@ -21,6 +21,7 @@ struct macvlan_dev {
        struct hlist_node       hlist;
        struct macvlan_port     *port;
        struct net_device       *lowerdev;
+       netdevice_tracker       dev_tracker;
        void                    *accel_priv;
        struct vlan_pcpu_stats __percpu *pcpu_stats;
 
index 129a6c0..1db532f 100644 (file)
@@ -7,6 +7,8 @@
 #ifndef HAVE_IDT82P33_REG
 #define HAVE_IDT82P33_REG
 
+#define REG_ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
+
 /* Register address */
 #define DPLL1_TOD_CNFG 0x134
 #define DPLL2_TOD_CNFG 0x1B4
@@ -41,6 +43,7 @@
 #define REG_SOFT_RESET 0X381
 
 #define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn)))
+#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
 
 /* Register bit definitions */
 #define SYNC_TOD BIT(1)
index d3b1a6a..96cd740 100644 (file)
@@ -264,6 +264,14 @@ enum {
 struct mlx5_cmd_stats {
        u64             sum;
        u64             n;
+       /* number of times command failed */
+       u64             failed;
+       /* number of times command failed on bad status returned by FW */
+       u64             failed_mbox_status;
+       /* last command failed returned errno */
+       u32             last_failed_errno;
+       /* last bad status returned by FW */
+       u8              last_failed_mbox_status;
        struct dentry  *root;
        /* protect command average calculations */
        spinlock_t      lock;
@@ -543,6 +551,15 @@ struct mlx5_adev {
        int idx;
 };
 
+struct mlx5_debugfs_entries {
+       struct dentry *dbg_root;
+       struct dentry *qp_debugfs;
+       struct dentry *eq_debugfs;
+       struct dentry *cq_debugfs;
+       struct dentry *cmdif_debugfs;
+       struct dentry *pages_debugfs;
+};
+
 struct mlx5_ft_pool;
 struct mlx5_priv {
        /* IRQ table valid only for real pci devices PF or VF */
@@ -553,21 +570,19 @@ struct mlx5_priv {
        struct mlx5_nb          pg_nb;
        struct workqueue_struct *pg_wq;
        struct xarray           page_root_xa;
-       int                     fw_pages;
+       u32                     fw_pages;
        atomic_t                reg_pages;
        struct list_head        free_list;
-       int                     vfs_pages;
-       int                     host_pf_pages;
+       u32                     vfs_pages;
+       u32                     host_pf_pages;
+       u32                     fw_pages_alloc_failed;
+       u32                     give_pages_dropped;
+       u32                     reclaim_pages_discard;
 
        struct mlx5_core_health health;
        struct list_head        traps;
 
-       /* start: qp staff */
-       struct dentry          *qp_debugfs;
-       struct dentry          *eq_debugfs;
-       struct dentry          *cq_debugfs;
-       struct dentry          *cmdif_debugfs;
-       /* end: qp staff */
+       struct mlx5_debugfs_entries dbg;
 
        /* start: alloc staff */
        /* protect buffer allocation according to numa node */
@@ -577,7 +592,6 @@ struct mlx5_priv {
        struct mutex            pgdir_mutex;
        struct list_head        pgdir_list;
        /* end: alloc staff */
-       struct dentry          *dbg_root;
 
        struct list_head        ctx_list;
        spinlock_t              ctx_lock;
@@ -955,6 +969,7 @@ typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
 struct mlx5_async_work {
        struct mlx5_async_ctx *ctx;
        mlx5_async_cbk_t user_callback;
+       u16 opcode; /* cmd opcode */
        void *out; /* pointer to the cmd output buffer */
 };
 
@@ -994,9 +1009,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
-                  int size, struct mlx5_frag_buf *buf);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
                             struct mlx5_frag_buf *buf, int node);
 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
@@ -1015,6 +1027,8 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
 void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
                                 s32 npages, bool ec_function);
 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -1022,13 +1036,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
 void mlx5_register_debugfs(void);
 void mlx5_unregister_debugfs(void);
 
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev);
 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
index ea65131..91b7f73 100644 (file)
@@ -493,7 +493,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
        u8         tcp_sport[0x10];
        u8         tcp_dport[0x10];
 
-       u8         reserved_at_c0[0x18];
+       u8         reserved_at_c0[0x10];
+       u8         ipv4_ihl[0x4];
+       u8         reserved_at_c4[0x4];
+
        u8         ttl_hoplimit[0x8];
 
        u8         udp_sport[0x10];
@@ -1343,6 +1346,7 @@ enum mlx5_fc_bulk_alloc_bitmask {
 enum {
        MLX5_STEERING_FORMAT_CONNECTX_5   = 0,
        MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
+       MLX5_STEERING_FORMAT_CONNECTX_7   = 2,
 };
 
 struct mlx5_ifc_cmd_hca_cap_bits {
@@ -1419,8 +1423,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_130[0xa];
        u8         log_max_ra_res_dc[0x6];
 
-       u8         reserved_at_140[0x6];
+       u8         reserved_at_140[0x5];
        u8         release_all_pages[0x1];
+       u8         must_not_use[0x1];
        u8         reserved_at_147[0x2];
        u8         roce_accl[0x1];
        u8         log_max_ra_req_qp[0x6];
@@ -3427,7 +3432,6 @@ enum {
 enum {
        MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO  = BIT(0),
        MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO  = BIT(1),
-       MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO    = BIT(2),
 };
 
 enum {
@@ -9687,7 +9691,9 @@ struct mlx5_ifc_pcam_reg_bits {
 };
 
 struct mlx5_ifc_mcam_enhanced_features_bits {
-       u8         reserved_at_0[0x6a];
+       u8         reserved_at_0[0x5d];
+       u8         mcia_32dwords[0x1];
+       u8         reserved_at_5e[0xc];
        u8         reset_state[0x1];
        u8         ptpcyc2realtime_modify[0x1];
        u8         reserved_at_6c[0x2];
@@ -9882,10 +9888,10 @@ struct mlx5_ifc_pcmr_reg_bits {
 };
 
 struct mlx5_ifc_lane_2_module_mapping_bits {
-       u8         reserved_at_0[0x6];
-       u8         rx_lane[0x2];
-       u8         reserved_at_8[0x6];
-       u8         tx_lane[0x2];
+       u8         reserved_at_0[0x4];
+       u8         rx_lane[0x4];
+       u8         reserved_at_8[0x4];
+       u8         tx_lane[0x4];
        u8         reserved_at_10[0x8];
        u8         module[0x8];
 };
@@ -9894,8 +9900,8 @@ struct mlx5_ifc_bufferx_reg_bits {
        u8         reserved_at_0[0x6];
        u8         lossy[0x1];
        u8         epsb[0x1];
-       u8         reserved_at_8[0xc];
-       u8         size[0xc];
+       u8         reserved_at_8[0x8];
+       u8         size[0x10];
 
        u8         xoff_threshold[0x10];
        u8         xon_threshold[0x10];
index 77ea4f9..28a928b 100644 (file)
@@ -56,8 +56,6 @@ enum mlx5_an_status {
        MLX5_AN_LINK_DOWN   = 4,
 };
 
-#define MLX5_EEPROM_MAX_BYTES                  32
-#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK       0x000000ff
 #define MLX5_I2C_ADDR_LOW              0x50
 #define MLX5_I2C_ADDR_HIGH             0x51
 #define MLX5_EEPROM_PAGE_LENGTH                256
index 213cc56..5744a3f 100644 (file)
@@ -2626,7 +2626,7 @@ static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-       struct mempolicy *, struct vm_userfaultfd_ctx, const char *);
+       struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
        unsigned long addr, int new_below);
@@ -3372,11 +3372,12 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
 
 #ifdef CONFIG_ANON_VMA_NAME
 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
-                         unsigned long len_in, const char *name);
+                         unsigned long len_in,
+                         struct anon_vma_name *anon_name);
 #else
 static inline int
 madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
-                     unsigned long len_in, const char *name) {
+                     unsigned long len_in, struct anon_vma_name *anon_name) {
        return 0;
 }
 #endif
index b725839..cf90b1f 100644 (file)
@@ -140,50 +140,91 @@ static __always_inline void del_page_from_lru_list(struct page *page,
 
 #ifdef CONFIG_ANON_VMA_NAME
 /*
- * mmap_lock should be read-locked when calling vma_anon_name() and while using
- * the returned pointer.
+ * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
+ * either keep holding the lock while using the returned pointer or it should
+ * raise anon_vma_name refcount before releasing the lock.
  */
-extern const char *vma_anon_name(struct vm_area_struct *vma);
+extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
+extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
+extern void anon_vma_name_free(struct kref *kref);
 
-/*
- * mmap_lock should be read-locked for orig_vma->vm_mm.
- * mmap_lock should be write-locked for new_vma->vm_mm or new_vma should be
- * isolated.
- */
-extern void dup_vma_anon_name(struct vm_area_struct *orig_vma,
-                             struct vm_area_struct *new_vma);
+/* mmap_lock should be read-locked */
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
+{
+       if (anon_name)
+               kref_get(&anon_name->kref);
+}
 
-/*
- * mmap_lock should be write-locked or vma should have been isolated under
- * write-locked mmap_lock protection.
- */
-extern void free_vma_anon_name(struct vm_area_struct *vma);
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
+{
+       if (anon_name)
+               kref_put(&anon_name->kref, anon_vma_name_free);
+}
 
-/* mmap_lock should be read-locked */
-static inline bool is_same_vma_anon_name(struct vm_area_struct *vma,
-                                        const char *name)
+static inline
+struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
+{
+       /* Prevent anon_name refcount saturation early on */
+       if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
+               anon_vma_name_get(anon_name);
+               return anon_name;
+
+       }
+       return anon_vma_name_alloc(anon_name->name);
+}
+
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+                                    struct vm_area_struct *new_vma)
+{
+       struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
+
+       if (anon_name)
+               new_vma->anon_name = anon_vma_name_reuse(anon_name);
+}
+
+static inline void free_anon_vma_name(struct vm_area_struct *vma)
 {
-       const char *vma_name = vma_anon_name(vma);
+       /*
+        * Not using anon_vma_name because it generates a warning if mmap_lock
+        * is not held, which might be the case here.
+        */
+       if (!vma->vm_file)
+               anon_vma_name_put(vma->anon_name);
+}
 
-       /* either both NULL, or pointers to same string */
-       if (vma_name == name)
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+                                   struct anon_vma_name *anon_name2)
+{
+       if (anon_name1 == anon_name2)
                return true;
 
-       return name && vma_name && !strcmp(name, vma_name);
+       return anon_name1 && anon_name2 &&
+               !strcmp(anon_name1->name, anon_name2->name);
 }
+
 #else /* CONFIG_ANON_VMA_NAME */
-static inline const char *vma_anon_name(struct vm_area_struct *vma)
+static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
 {
        return NULL;
 }
-static inline void dup_vma_anon_name(struct vm_area_struct *orig_vma,
-                             struct vm_area_struct *new_vma) {}
-static inline void free_vma_anon_name(struct vm_area_struct *vma) {}
-static inline bool is_same_vma_anon_name(struct vm_area_struct *vma,
-                                        const char *name)
+
+static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
+{
+       return NULL;
+}
+
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+                                    struct vm_area_struct *new_vma) {}
+static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+                                   struct anon_vma_name *anon_name2)
 {
        return true;
 }
+
 #endif  /* CONFIG_ANON_VMA_NAME */
 
 static inline void init_tlb_flush_pending(struct mm_struct *mm)
index 5140e5f..0f54987 100644 (file)
@@ -416,7 +416,10 @@ struct vm_area_struct {
                        struct rb_node rb;
                        unsigned long rb_subtree_last;
                } shared;
-               /* Serialized by mmap_sem. */
+               /*
+                * Serialized by mmap_sem. Never use directly because it is
+                * valid only when vm_file is NULL. Use anon_vma_name instead.
+                */
                struct anon_vma_name *anon_name;
        };
 
index 19a27ac..cd7a597 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/prefetch.h>
 #include <asm/cache.h>
 #include <asm/byteorder.h>
+#include <asm/local.h>
 
 #include <linux/percpu.h>
 #include <linux/rculist.h>
@@ -194,6 +195,14 @@ struct net_device_stats {
        unsigned long   tx_compressed;
 };
 
+/* per-cpu stats, allocated on demand.
+ * Try to fit them in a single cache line, for dev_get_stats() sake.
+ */
+struct net_device_core_stats {
+       local_t         rx_dropped;
+       local_t         tx_dropped;
+       local_t         rx_nohandler;
+} __aligned(4 * sizeof(local_t));
 
 #include <linux/cache.h>
 #include <linux/skbuff.h>
@@ -1735,12 +1744,8 @@ enum netdev_ml_priv_type {
  *     @stats:         Statistics struct, which was left as a legacy, use
  *                     rtnl_link_stats64 instead
  *
- *     @rx_dropped:    Dropped packets by core network,
- *                     do not use this in drivers
- *     @tx_dropped:    Dropped packets by core network,
+ *     @core_stats:    core networking counters,
  *                     do not use this in drivers
- *     @rx_nohandler:  nohandler dropped packets by core network on
- *                     inactive devices, do not use this in drivers
  *     @carrier_up_count:      Number of times the carrier has been up
  *     @carrier_down_count:    Number of times the carrier has been down
  *
@@ -1894,6 +1899,8 @@ enum netdev_ml_priv_type {
  *     @garp_port:     GARP
  *     @mrp_port:      MRP
  *
+ *     @dm_private:    Drop monitor private
+ *
  *     @dev:           Class/net/name entry
  *     @sysfs_groups:  Space for optional device, statistics and wireless
  *                     sysfs groups
@@ -2023,9 +2030,7 @@ struct net_device {
 
        struct net_device_stats stats; /* not used by modern drivers */
 
-       atomic_long_t           rx_dropped;
-       atomic_long_t           tx_dropped;
-       atomic_long_t           rx_nohandler;
+       struct net_device_core_stats __percpu *core_stats;
 
        /* Stats to monitor link on/off, flapping */
        atomic_t                carrier_up_count;
@@ -3661,7 +3666,6 @@ static inline unsigned int get_netdev_rx_queue_index(
 }
 #endif
 
-#define DEFAULT_MAX_NUM_RSS_QUEUES     (8)
 int netif_get_num_default_rss_queues(void);
 
 enum skb_free_reason {
@@ -3718,16 +3722,6 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
 int netif_rx(struct sk_buff *skb);
 int __netif_rx(struct sk_buff *skb);
 
-static inline int netif_rx_ni(struct sk_buff *skb)
-{
-       return netif_rx(skb);
-}
-
-static inline int netif_rx_any_context(struct sk_buff *skb)
-{
-       return netif_rx(skb);
-}
-
 int netif_receive_skb(struct sk_buff *skb);
 int netif_receive_skb_core(struct sk_buff *skb);
 void netif_receive_skb_list_internal(struct list_head *head);
@@ -3849,13 +3843,42 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
        return false;
 }
 
+struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev);
+
+static inline struct net_device_core_stats *dev_core_stats(struct net_device *dev)
+{
+       /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
+       struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
+
+       if (likely(p))
+               return this_cpu_ptr(p);
+
+       return netdev_core_stats_alloc(dev);
+}
+
+#define DEV_CORE_STATS_INC(FIELD)                                              \
+static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev)                \
+{                                                                              \
+       struct net_device_core_stats *p;                                        \
+                                                                               \
+       preempt_disable();                                                      \
+       p = dev_core_stats(dev);                                                \
+                                                                               \
+       if (p)                                                                  \
+               local_inc(&p->FIELD);                                           \
+       preempt_enable();                                                       \
+}
+DEV_CORE_STATS_INC(rx_dropped)
+DEV_CORE_STATS_INC(tx_dropped)
+DEV_CORE_STATS_INC(rx_nohandler)
+
 static __always_inline int ____dev_forward_skb(struct net_device *dev,
                                               struct sk_buff *skb,
                                               const bool check_mtu)
 {
        if (skb_orphan_frags(skb, GFP_ATOMIC) ||
            unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
-               atomic_long_inc(&dev->rx_dropped);
+               dev_core_stats_rx_dropped_inc(dev);
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -4682,6 +4705,8 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
 
 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path);
+struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
+                                   netdev_features_t features, __be16 type);
 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
                                    netdev_features_t features);
 
index c7e6f20..5462c29 100644 (file)
 #define PCI_VENDOR_ID_HUAWEI           0x19e5
 
 #define PCI_VENDOR_ID_NETRONOME                0x19ee
+#define PCI_DEVICE_ID_NETRONOME_NFP3800        0x3800
 #define PCI_DEVICE_ID_NETRONOME_NFP4000        0x4000
 #define PCI_DEVICE_ID_NETRONOME_NFP5000        0x5000
 #define PCI_DEVICE_ID_NETRONOME_NFP6000        0x6000
+#define PCI_DEVICE_ID_NETRONOME_NFP3800_VF     0x3803
 #define PCI_DEVICE_ID_NETRONOME_NFP6000_VF     0x6003
 
 #define PCI_VENDOR_ID_QMI              0x1a32
index cd08cf1..36ca2b5 100644 (file)
@@ -87,8 +87,8 @@ extern const int phy_10gbit_features_array[1];
  *
  * @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch
  * @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined
- * @PHY_INTERFACE_MODE_MII: Median-independent interface
- * @PHY_INTERFACE_MODE_GMII: Gigabit median-independent interface
+ * @PHY_INTERFACE_MODE_MII: Media-independent interface
+ * @PHY_INTERFACE_MODE_GMII: Gigabit media-independent interface
  * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface
  * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface
  * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface
@@ -1578,6 +1578,7 @@ int genphy_update_link(struct phy_device *phydev);
 int genphy_read_lpa(struct phy_device *phydev);
 int genphy_read_status_fixed(struct phy_device *phydev);
 int genphy_read_status(struct phy_device *phydev);
+int genphy_read_master_slave(struct phy_device *phydev);
 int genphy_suspend(struct phy_device *phydev);
 int genphy_resume(struct phy_device *phydev);
 int genphy_loopback(struct phy_device *phydev, bool enable);
index 9afd34a..fefa779 100644 (file)
@@ -126,6 +126,17 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
        return msgtype;
 }
 
+/**
+ * ptp_msg_is_sync - Evaluates whether the given skb is a PTP Sync message
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function evaluates whether the given skb is a PTP Sync message.
+ *
+ * Return: true if sync message, false otherwise
+ */
+bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type);
+
 void __init ptp_classifier_init(void);
 #else
 static inline void ptp_classifier_init(void)
@@ -148,5 +159,9 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
         */
        return PTP_MSGTYPE_SYNC;
 }
+static inline bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type)
+{
+       return false;
+}
 #endif
 #endif /* _PTP_CLASSIFY_H_ */
index 7b0cb10..3a30cae 100644 (file)
@@ -314,6 +314,7 @@ struct sk_buff;
  * used to translate the reason to string.
  */
 enum skb_drop_reason {
+       SKB_NOT_DROPPED_YET = 0,
        SKB_DROP_REASON_NOT_SPECIFIED,  /* drop reason is not specified */
        SKB_DROP_REASON_NO_SOCKET,      /* socket not found */
        SKB_DROP_REASON_PKT_TOO_SMALL,  /* packet size is too small */
@@ -412,6 +413,37 @@ enum skb_drop_reason {
                                         * this means that L3 protocol is
                                         * not supported
                                         */
+       SKB_DROP_REASON_SKB_CSUM,       /* sk_buff checksum computation
+                                        * error
+                                        */
+       SKB_DROP_REASON_SKB_GSO_SEG,    /* gso segmentation error */
+       SKB_DROP_REASON_SKB_UCOPY_FAULT,        /* failed to copy data from
+                                                * user space, e.g., via
+                                                * zerocopy_sg_from_iter()
+                                                * or skb_orphan_frags_rx()
+                                                */
+       SKB_DROP_REASON_DEV_HDR,        /* device driver specific
+                                        * header/metadata is invalid
+                                        */
+       /* the device is not ready to xmit/recv due to any of its data
+        * structure that is not up/ready/initialized, e.g., the IFF_UP is
+        * not set, or driver specific tun->tfiles[txq] is not initialized
+        */
+       SKB_DROP_REASON_DEV_READY,
+       SKB_DROP_REASON_FULL_RING,      /* ring buffer is full */
+       SKB_DROP_REASON_NOMEM,          /* error due to OOM */
+       SKB_DROP_REASON_HDR_TRUNC,      /* failed to trunc/extract the header
+                                        * from networking data, e.g., failed
+                                        * to pull the protocol header from
+                                        * frags via pskb_may_pull()
+                                        */
+       SKB_DROP_REASON_TAP_FILTER,     /* dropped by (ebpf) filter directly
+                                        * attached to tun/tap, e.g., via
+                                        * TUNSETFILTEREBPF
+                                        */
+       SKB_DROP_REASON_TAP_TXFILTER,   /* dropped by tx filter implemented
+                                        * at tun/tap, e.g., check_filter()
+                                        */
        SKB_DROP_REASON_MAX,
 };
 
index 2de442e..721089b 100644 (file)
@@ -401,18 +401,24 @@ static inline int vdpa_reset(struct vdpa_device *vdev)
        return ret;
 }
 
-static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features, bool locked)
+static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features)
 {
        const struct vdpa_config_ops *ops = vdev->config;
        int ret;
 
-       if (!locked)
-               mutex_lock(&vdev->cf_mutex);
-
        vdev->features_valid = true;
        ret = ops->set_driver_features(vdev, features);
-       if (!locked)
-               mutex_unlock(&vdev->cf_mutex);
+
+       return ret;
+}
+
+static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
+{
+       int ret;
+
+       mutex_lock(&vdev->cf_mutex);
+       ret = vdpa_set_features_unlocked(vdev, features);
+       mutex_unlock(&vdev->cf_mutex);
 
        return ret;
 }
index 72292a6..5464f39 100644 (file)
@@ -133,7 +133,6 @@ bool is_virtio_device(struct device *dev);
 void virtio_break_device(struct virtio_device *dev);
 
 void virtio_config_changed(struct virtio_device *dev);
-int virtio_finalize_features(struct virtio_device *dev);
 #ifdef CONFIG_PM_SLEEP
 int virtio_device_freeze(struct virtio_device *dev);
 int virtio_device_restore(struct virtio_device *dev);
index 4d107ad..dafdc7f 100644 (file)
@@ -64,8 +64,9 @@ struct virtio_shm_region {
  *     Returns the first 64 feature bits (all we currently need).
  * @finalize_features: confirm what device features we'll be using.
  *     vdev: the virtio_device
- *     This gives the final feature bits for the device: it can change
+ *     This sends the driver feature bits to the device: it can change
  *     the dev->feature bits if it wants.
+ * Note: despite the name this can be called any number of times.
  *     Returns 0 on success or error status
  * @bus_name: return the bus name associated with the device (optional)
  *     vdev: the virtio_device
index c994d1b..3b9a40a 100644 (file)
@@ -28,7 +28,8 @@ struct watch_type_filter {
 struct watch_filter {
        union {
                struct rcu_head rcu;
-               unsigned long   type_filter[2]; /* Bitmask of accepted types */
+               /* Bitmask of accepted types */
+               DECLARE_BITMAP(type_filter, WATCH_TYPE__NR);
        };
        u32                     nr_filters;     /* Number of filters */
        struct watch_type_filter filters[];
index ab20767..f742e50 100644 (file)
@@ -205,7 +205,8 @@ struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
 struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
                                         struct sockaddr_vm *dst);
 void vsock_remove_sock(struct vsock_sock *vsk);
-void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
+void vsock_for_each_connected_socket(struct vsock_transport *transport,
+                                    void (*fn)(struct sock *sk));
 int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk);
 bool vsock_find_cid(unsigned int cid);
 
index 2aa5e95..6b48d9e 100644 (file)
@@ -345,7 +345,7 @@ int  bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
 __poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
 int  bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
-int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
+int  bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags);
 
 void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
 void bt_accept_unlink(struct sock *sk);
index 35c073d..5cb095b 100644 (file)
@@ -255,6 +255,16 @@ enum {
         * during the hdev->setup vendor callback.
         */
        HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER,
+
+       /* When this quirk is set, HCI_OP_SET_EVENT_FLT requests with
+        * HCI_FLT_CLEAR_ALL are ignored and event filtering is
+        * completely avoided. A subset of the CSR controller
+        * clones struggle with this and instantly lock up.
+        *
+        * Note that devices using this must (separately) disable
+        * runtime suspend, because event filtering takes place there.
+        */
+       HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL,
 };
 
 /* HCI device flags */
index d0dfe72..b14f4c0 100644 (file)
@@ -770,7 +770,7 @@ extern const struct sysfs_ops slave_sysfs_ops;
 
 static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
 {
-       atomic_long_inc(&dev->tx_dropped);
+       dev_core_stats_tx_dropped_inc(dev);
        dev_kfree_skb_any(skb);
        return NET_XMIT_DROP;
 }
index f6db085..6871338 100644 (file)
@@ -109,7 +109,12 @@ struct wiphy;
  *     on this channel.
  * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted
  *     on this channel.
- *
+ * @IEEE80211_CHAN_NO_320MHZ: If the driver supports 320 MHz on the band,
+ *     this flag indicates that a 320 MHz channel cannot use this
+ *     channel as the control or any of the secondary channels.
+ *     This may be due to the driver or due to regulatory bandwidth
+ *     restrictions.
+ * @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel.
  */
 enum ieee80211_channel_flags {
        IEEE80211_CHAN_DISABLED         = 1<<0,
@@ -131,6 +136,8 @@ enum ieee80211_channel_flags {
        IEEE80211_CHAN_4MHZ             = 1<<16,
        IEEE80211_CHAN_8MHZ             = 1<<17,
        IEEE80211_CHAN_16MHZ            = 1<<18,
+       IEEE80211_CHAN_NO_320MHZ        = 1<<19,
+       IEEE80211_CHAN_NO_EHT           = 1<<20,
 };
 
 #define IEEE80211_CHAN_NO_HT40 \
@@ -360,6 +367,48 @@ struct ieee80211_sta_he_cap {
        u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
 };
 
+/**
+ * struct ieee80211_eht_mcs_nss_supp - EHT max supported NSS per MCS
+ *
+ * See P802.11be_D1.3 Table 9-401k - "Subfields of the Supported EHT-MCS
+ * and NSS Set field"
+ *
+ * @only_20mhz: MCS/NSS support for 20 MHz-only STA.
+ * @bw._80: MCS/NSS support for BW <= 80 MHz
+ * @bw._160: MCS/NSS support for BW = 160 MHz
+ * @bw._320: MCS/NSS support for BW = 320 MHz
+ */
+struct ieee80211_eht_mcs_nss_supp {
+       union {
+               struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz;
+               struct {
+                       struct ieee80211_eht_mcs_nss_supp_bw _80;
+                       struct ieee80211_eht_mcs_nss_supp_bw _160;
+                       struct ieee80211_eht_mcs_nss_supp_bw _320;
+               } __packed bw;
+       } __packed;
+} __packed;
+
+#define IEEE80211_EHT_PPE_THRES_MAX_LEN                32
+
+/**
+ * struct ieee80211_sta_eht_cap - STA's EHT capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11be EHT capabilities for a STA.
+ *
+ * @has_eht: true iff EHT data is valid.
+ * @eht_cap_elem: Fixed portion of the eht capabilities element.
+ * @eht_mcs_nss_supp: The supported NSS/MCS combinations.
+ * @eht_ppe_thres: Holds the PPE Thresholds data.
+ */
+struct ieee80211_sta_eht_cap {
+       bool has_eht;
+       struct ieee80211_eht_cap_elem_fixed eht_cap_elem;
+       struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp;
+       u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN];
+};
+
 /**
  * struct ieee80211_sband_iftype_data - sband data per interface type
  *
@@ -379,6 +428,7 @@ struct ieee80211_sband_iftype_data {
        u16 types_mask;
        struct ieee80211_sta_he_cap he_cap;
        struct ieee80211_he_6ghz_capa he_6ghz_capa;
+       struct ieee80211_sta_eht_cap eht_cap;
        struct {
                const u8 *data;
                unsigned int len;
@@ -561,6 +611,26 @@ ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband,
        return data->he_6ghz_capa.capa;
 }
 
+/**
+ * ieee80211_get_eht_iftype_cap - return ETH capabilities for an sband's iftype
+ * @sband: the sband to search for the iftype on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to the struct ieee80211_sta_eht_cap, or NULL is none found
+ */
+static inline const struct ieee80211_sta_eht_cap *
+ieee80211_get_eht_iftype_cap(const struct ieee80211_supported_band *sband,
+                            enum nl80211_iftype iftype)
+{
+       const struct ieee80211_sband_iftype_data *data =
+               ieee80211_get_sband_iftype_data(sband, iftype);
+
+       if (data && data->eht_cap.has_eht)
+               return &data->eht_cap;
+
+       return NULL;
+}
+
 /**
  * wiphy_read_of_freq_limits - read frequency limits from device tree
  *
@@ -1417,6 +1487,8 @@ struct sta_txpwr {
  * @airtime_weight: airtime scheduler weight for this station
  * @txpwr: transmit power for an associated station
  * @he_6ghz_capa: HE 6 GHz Band capabilities of station
+ * @eht_capa: EHT capabilities of station
+ * @eht_capa_len: the length of the EHT capabilities
  */
 struct station_parameters {
        const u8 *supported_rates;
@@ -1450,6 +1522,8 @@ struct station_parameters {
        u16 airtime_weight;
        struct sta_txpwr txpwr;
        const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
+       const struct ieee80211_eht_cap_elem *eht_capa;
+       u8 eht_capa_len;
 };
 
 /**
@@ -1527,6 +1601,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
  * @RATE_INFO_FLAGS_HE_MCS: HE MCS information
  * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode
  * @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS
+ * @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information
  */
 enum rate_info_flags {
        RATE_INFO_FLAGS_MCS                     = BIT(0),
@@ -1536,6 +1611,7 @@ enum rate_info_flags {
        RATE_INFO_FLAGS_HE_MCS                  = BIT(4),
        RATE_INFO_FLAGS_EDMG                    = BIT(5),
        RATE_INFO_FLAGS_EXTENDED_SC_DMG         = BIT(6),
+       RATE_INFO_FLAGS_EHT_MCS                 = BIT(7),
 };
 
 /**
@@ -1550,6 +1626,8 @@ enum rate_info_flags {
  * @RATE_INFO_BW_80: 80 MHz bandwidth
  * @RATE_INFO_BW_160: 160 MHz bandwidth
  * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
+ * @RATE_INFO_BW_320: 320 MHz bandwidth
+ * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation
  */
 enum rate_info_bw {
        RATE_INFO_BW_20 = 0,
@@ -1559,6 +1637,8 @@ enum rate_info_bw {
        RATE_INFO_BW_80,
        RATE_INFO_BW_160,
        RATE_INFO_BW_HE_RU,
+       RATE_INFO_BW_320,
+       RATE_INFO_BW_EHT_RU,
 };
 
 /**
@@ -1576,6 +1656,9 @@ enum rate_info_bw {
  * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
  *     only valid if bw is %RATE_INFO_BW_HE_RU)
  * @n_bonded_ch: In case of EDMG the number of bonded channels (1-4)
+ * @eht_gi: EHT guard interval (from &enum nl80211_eht_gi)
+ * @eht_ru_alloc: EHT RU allocation (from &enum nl80211_eht_ru_alloc,
+ *     only valid if bw is %RATE_INFO_BW_EHT_RU)
  */
 struct rate_info {
        u8 flags;
@@ -1587,6 +1670,8 @@ struct rate_info {
        u8 he_dcm;
        u8 he_ru_alloc;
        u8 n_bonded_ch;
+       u8 eht_gi;
+       u8 eht_ru_alloc;
 };
 
 /**
index 79c67f1..6bc783b 100644 (file)
@@ -80,6 +80,7 @@ static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
        return csum16_add(csum, ~addend);
 }
 
+#ifndef HAVE_ARCH_CSUM_SHIFT
 static __always_inline __wsum csum_shift(__wsum sum, int offset)
 {
        /* rotate sum to align it with a 16b boundary */
@@ -87,6 +88,7 @@ static __always_inline __wsum csum_shift(__wsum sum, int offset)
                return (__force __wsum)ror32((__force u32)sum, 8);
        return sum;
 }
+#endif
 
 static __always_inline __wsum
 csum_block_add(__wsum csum, __wsum csum2, int offset)
index 8d5349d..a30180c 100644 (file)
@@ -1197,9 +1197,9 @@ struct devlink_ops {
                         struct netlink_ext_ack *extack);
        int (*port_type_set)(struct devlink_port *devlink_port,
                             enum devlink_port_type port_type);
-       int (*port_split)(struct devlink *devlink, unsigned int port_index,
+       int (*port_split)(struct devlink *devlink, struct devlink_port *port,
                          unsigned int count, struct netlink_ext_ack *extack);
-       int (*port_unsplit)(struct devlink *devlink, unsigned int port_index,
+       int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port,
                            struct netlink_ext_ack *extack);
        int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
                           u16 pool_index,
@@ -1479,6 +1479,21 @@ void *devlink_priv(struct devlink *devlink);
 struct devlink *priv_to_devlink(void *priv);
 struct device *devlink_to_dev(const struct devlink *devlink);
 
+/* Devlink instance explicit locking */
+void devl_lock(struct devlink *devlink);
+void devl_unlock(struct devlink *devlink);
+void devl_assert_locked(struct devlink *devlink);
+bool devl_lock_is_held(struct devlink *devlink);
+
+int devl_port_register(struct devlink *devlink,
+                      struct devlink_port *devlink_port,
+                      unsigned int port_index);
+void devl_port_unregister(struct devlink_port *devlink_port);
+
+int devl_rate_leaf_create(struct devlink_port *port, void *priv);
+void devl_rate_leaf_destroy(struct devlink_port *devlink_port);
+void devl_rate_nodes_destroy(struct devlink *devlink);
+
 struct ib_device;
 
 struct net *devlink_net(const struct devlink *devlink);
index 759479f..934958f 100644 (file)
@@ -892,6 +892,18 @@ struct dsa_switch_ops {
        int     (*get_ts_info)(struct dsa_switch *ds, int port,
                               struct ethtool_ts_info *ts);
 
+       /*
+        * DCB ops
+        */
+       int     (*port_get_default_prio)(struct dsa_switch *ds, int port);
+       int     (*port_set_default_prio)(struct dsa_switch *ds, int port,
+                                        u8 prio);
+       int     (*port_get_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp);
+       int     (*port_add_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
+                                     u8 prio);
+       int     (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
+                                     u8 prio);
+
        /*
         * Suspend and resume
         */
@@ -945,7 +957,10 @@ struct dsa_switch_ops {
                                     struct dsa_bridge bridge);
        void    (*port_stp_state_set)(struct dsa_switch *ds, int port,
                                      u8 state);
+       int     (*port_mst_state_set)(struct dsa_switch *ds, int port,
+                                     const struct switchdev_mst_state *state);
        void    (*port_fast_age)(struct dsa_switch *ds, int port);
+       int     (*port_vlan_fast_age)(struct dsa_switch *ds, int port, u16 vid);
        int     (*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
                                         struct switchdev_brport_flags flags,
                                         struct netlink_ext_ack *extack);
@@ -964,6 +979,9 @@ struct dsa_switch_ops {
                                 struct netlink_ext_ack *extack);
        int     (*port_vlan_del)(struct dsa_switch *ds, int port,
                                 const struct switchdev_obj_port_vlan *vlan);
+       int     (*vlan_msti_set)(struct dsa_switch *ds, struct dsa_bridge bridge,
+                                const struct switchdev_vlan_msti *msti);
+
        /*
         * Forwarding database
         */
@@ -1010,7 +1028,7 @@ struct dsa_switch_ops {
                                    struct flow_cls_offload *cls, bool ingress);
        int     (*port_mirror_add)(struct dsa_switch *ds, int port,
                                   struct dsa_mall_mirror_tc_entry *mirror,
-                                  bool ingress);
+                                  bool ingress, struct netlink_ext_ack *extack);
        void    (*port_mirror_del)(struct dsa_switch *ds, int port,
                                   struct dsa_mall_mirror_tc_entry *mirror);
        int     (*port_policer_add)(struct dsa_switch *ds, int port,
@@ -1227,6 +1245,12 @@ typedef int dsa_fdb_walk_cb_t(struct dsa_switch *ds, int port,
 
 int dsa_port_walk_fdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb);
 int dsa_port_walk_mdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb);
+bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
+                                const unsigned char *addr, u16 vid,
+                                struct dsa_db db);
+bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
+                                const struct switchdev_obj_port_mdb *mdb,
+                                struct dsa_db db);
 
 /* Keep inline for faster access in hot path */
 static inline bool netdev_uses_dsa(const struct net_device *dev)
index 9c5637d..90cd02f 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <linux/skbuff.h>
 
+#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
+
 struct ip_esp_hdr;
 
 static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
index 58beb16..987bd51 100644 (file)
@@ -29,6 +29,7 @@ struct flowi_tunnel {
 struct flowi_common {
        int     flowic_oif;
        int     flowic_iif;
+       int     flowic_l3mdev;
        __u32   flowic_mark;
        __u8    flowic_tos;
        __u8    flowic_scope;
@@ -36,7 +37,6 @@ struct flowi_common {
        __u8    flowic_flags;
 #define FLOWI_FLAG_ANYSRC              0x01
 #define FLOWI_FLAG_KNOWN_NH            0x02
-#define FLOWI_FLAG_SKIP_NH_OIF         0x04
        __u32   flowic_secid;
        kuid_t  flowic_uid;
        struct flowi_tunnel flowic_tun_key;
@@ -70,6 +70,7 @@ struct flowi4 {
        struct flowi_common     __fl_common;
 #define flowi4_oif             __fl_common.flowic_oif
 #define flowi4_iif             __fl_common.flowic_iif
+#define flowi4_l3mdev          __fl_common.flowic_l3mdev
 #define flowi4_mark            __fl_common.flowic_mark
 #define flowi4_tos             __fl_common.flowic_tos
 #define flowi4_scope           __fl_common.flowic_scope
@@ -102,6 +103,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
 {
        fl4->flowi4_oif = oif;
        fl4->flowi4_iif = LOOPBACK_IFINDEX;
+       fl4->flowi4_l3mdev = 0;
        fl4->flowi4_mark = mark;
        fl4->flowi4_tos = tos;
        fl4->flowi4_scope = scope;
@@ -132,6 +134,7 @@ struct flowi6 {
        struct flowi_common     __fl_common;
 #define flowi6_oif             __fl_common.flowic_oif
 #define flowi6_iif             __fl_common.flowic_iif
+#define flowi6_l3mdev          __fl_common.flowic_l3mdev
 #define flowi6_mark            __fl_common.flowic_mark
 #define flowi6_scope           __fl_common.flowic_scope
 #define flowi6_proto           __fl_common.flowic_proto
@@ -177,6 +180,7 @@ struct flowi {
        } u;
 #define flowi_oif      u.__fl_common.flowic_oif
 #define flowi_iif      u.__fl_common.flowic_iif
+#define flowi_l3mdev   u.__fl_common.flowic_l3mdev
 #define flowi_mark     u.__fl_common.flowic_mark
 #define flowi_tos      u.__fl_common.flowic_tos
 #define flowi_scope    u.__fl_common.flowic_scope
index 92267d2..021778a 100644 (file)
@@ -150,6 +150,8 @@ enum flow_action_id {
        FLOW_ACTION_PPPOE_PUSH,
        FLOW_ACTION_JUMP,
        FLOW_ACTION_PIPE,
+       FLOW_ACTION_VLAN_PUSH_ETH,
+       FLOW_ACTION_VLAN_POP_ETH,
        NUM_FLOW_ACTIONS,
 };
 
@@ -211,6 +213,10 @@ struct flow_action_entry {
                        __be16          proto;
                        u8              prio;
                } vlan;
+               struct {                                /* FLOW_ACTION_VLAN_PUSH_ETH */
+                       unsigned char dst[ETH_ALEN];
+                       unsigned char src[ETH_ALEN];
+               } vlan_push_eth;
                struct {                                /* FLOW_ACTION_MANGLE */
                                                        /* FLOW_ACTION_ADD */
                        enum flow_action_mangle_base htype;
index 0e16ebb..c1d6169 100644 (file)
@@ -7,8 +7,13 @@
 #define GTP0_PORT      3386
 #define GTP1U_PORT     2152
 
+/* GTP messages types */
+#define GTP_ECHO_REQ   1       /* Echo Request */
+#define GTP_ECHO_RSP   2       /* Echo Response */
 #define GTP_TPDU       255
 
+#define GTPIE_RECOVERY 14
+
 struct gtp0_header {   /* According to GSM TS 09.60. */
        __u8    flags;
        __u8    type;
@@ -27,6 +32,43 @@ struct gtp1_header { /* According to 3GPP TS 29.060. */
        __be32  tid;
 } __attribute__ ((packed));
 
+struct gtp1_header_long {      /* According to 3GPP TS 29.060. */
+       __u8    flags;
+       __u8    type;
+       __be16  length;
+       __be32  tid;
+       __be16  seq;
+       __u8    npdu;
+       __u8    next;
+} __packed;
+
+/* GTP Information Element */
+struct gtp_ie {
+       __u8    tag;
+       __u8    val;
+} __packed;
+
+struct gtp0_packet {
+       struct gtp0_header gtp0_h;
+       struct gtp_ie ie;
+} __packed;
+
+struct gtp1u_packet {
+       struct gtp1_header_long gtp1u_h;
+       struct gtp_ie ie;
+} __packed;
+
+struct gtp_pdu_session_info {  /* According to 3GPP TS 38.415. */
+       u8      pdu_type;
+       u8      qfi;
+};
+
+static inline bool netif_is_gtp(const struct net_device *dev)
+{
+       return dev->rtnl_link_ops &&
+               !strcmp(dev->rtnl_link_ops->kind, "gtp");
+}
+
 #define GTP1_F_NPDU    0x01
 #define GTP1_F_SEQ     0x02
 #define GTP1_F_EXTHDR  0x04
index bd6912d..382ebb8 100644 (file)
@@ -636,6 +636,7 @@ struct ieee80211_fils_discovery {
  * @tx_pwr_env: transmit power envelope array of BSS.
  * @tx_pwr_env_num: number of @tx_pwr_env.
  * @pwr_reduction: power constraint of BSS.
+ * @eht_support: does this BSS support EHT
  */
 struct ieee80211_bss_conf {
        const u8 *bssid;
@@ -710,6 +711,7 @@ struct ieee80211_bss_conf {
        struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
        u8 tx_pwr_env_num;
        u8 pwr_reduction;
+       bool eht_support;
 };
 
 /**
@@ -2005,6 +2007,7 @@ enum ieee80211_sta_state {
  * @IEEE80211_STA_RX_BW_80: station can receive up to 80 MHz
  * @IEEE80211_STA_RX_BW_160: station can receive up to 160 MHz
  *     (including 80+80 MHz)
+ * @IEEE80211_STA_RX_BW_320: station can receive up to 320 MHz
  *
  * Implementation note: 20 must be zero to be initialized
  *     correctly, the values must be sorted.
@@ -2014,6 +2017,7 @@ enum ieee80211_sta_rx_bandwidth {
        IEEE80211_STA_RX_BW_40,
        IEEE80211_STA_RX_BW_80,
        IEEE80211_STA_RX_BW_160,
+       IEEE80211_STA_RX_BW_320,
 };
 
 /**
@@ -2069,6 +2073,7 @@ struct ieee80211_sta_txpwr {
  * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
  * @he_cap: HE capabilities of this STA
  * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
+ * @eht_cap: EHT capabilities of this STA
  * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
  *     that this station is allowed to transmit to us.
  *     Can be modified by driver.
@@ -2109,6 +2114,7 @@ struct ieee80211_sta {
        struct ieee80211_sta_vht_cap vht_cap;
        struct ieee80211_sta_he_cap he_cap;
        struct ieee80211_he_6ghz_capa he_6ghz_capa;
+       struct ieee80211_sta_eht_cap eht_cap;
        u16 max_rx_aggregation_subframes;
        bool wme;
        u8 uapsd_queues;
@@ -4942,12 +4948,14 @@ void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
  * @cntdwn_counter_offs: array of IEEE80211_MAX_CNTDWN_COUNTERS_NUM offsets
  *     to countdown counters.  This array can contain zero values which
  *     should be ignored.
+ * @mbssid_off: position of the multiple bssid element
  */
 struct ieee80211_mutable_offsets {
        u16 tim_offset;
        u16 tim_length;
 
        u16 cntdwn_counter_offs[IEEE80211_MAX_CNTDWN_COUNTERS_NUM];
+       u16 mbssid_off;
 };
 
 /**
@@ -6064,6 +6072,16 @@ void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect);
  */
 void ieee80211_resume_disconnect(struct ieee80211_vif *vif);
 
+/**
+ * ieee80211_hw_restart_disconnect - disconnect from AP after
+ * hardware restart
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Instructs mac80211 to disconnect from the AP after
+ * hardware restart.
+ */
+void ieee80211_hw_restart_disconnect(struct ieee80211_vif *vif);
+
 /**
  * ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
  *     rssi threshold triggered
index 8731d5b..b08b709 100644 (file)
@@ -97,7 +97,6 @@ struct nf_conn {
        unsigned long status;
 
        u16             cpu;
-       u16             local_origin:1;
        possible_net_t ct_net;
 
 #if IS_ENABLED(CONFIG_NF_NAT)
index 37f0fbe..9939c36 100644 (file)
@@ -177,4 +177,5 @@ void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat);
 int nf_nat_helper_try_module_get(const char *name, u16 l3num,
                                 u8 protonum);
 void nf_nat_helper_put(struct nf_conntrack_helper *helper);
+void nf_ct_set_auto_assign_helper_warned(struct net *net);
 #endif /*_NF_CONNTRACK_HELPER_H*/
index c4c0861..20af9d3 100644 (file)
@@ -126,6 +126,7 @@ struct nft_regs_track {
        struct {
                const struct nft_expr           *selector;
                const struct nft_expr           *bitwise;
+               u8                              num_reg;
        } regs[NFT_REG32_NUM];
 
        const struct nft_expr                   *cur;
@@ -1633,4 +1634,25 @@ static inline struct nftables_pernet *nft_pernet(const struct net *net)
        return net_generic(net, nf_tables_net_id);
 }
 
+#define __NFT_REDUCE_READONLY  1UL
+#define NFT_REDUCE_READONLY    (void *)__NFT_REDUCE_READONLY
+
+static inline bool nft_reduce_is_readonly(const struct nft_expr *expr)
+{
+       return expr->ops->reduce == NFT_REDUCE_READONLY;
+}
+
+void nft_reg_track_update(struct nft_regs_track *track,
+                         const struct nft_expr *expr, u8 dreg, u8 len);
+void nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg, u8 len);
+void __nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg);
+
+static inline bool nft_reg_track_cmp(struct nft_regs_track *track,
+                                    const struct nft_expr *expr, u8 dreg)
+{
+       return track->regs[dreg].selector &&
+              track->regs[dreg].selector->ops == expr->ops &&
+              track->regs[dreg].num_reg == 0;
+}
+
 #endif /* _NET_NF_TABLES_H */
index 237f375..eed099e 100644 (file)
@@ -37,4 +37,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
 
 void nft_fib_store_result(void *reg, const struct nft_fib *priv,
                          const struct net_device *dev);
+
+bool nft_fib_reduce(struct nft_regs_track *track,
+                   const struct nft_expr *expr);
 #endif
index 2dce55c..9b51cc6 100644 (file)
@@ -6,6 +6,7 @@
 
 struct nft_meta {
        enum nft_meta_keys      key:8;
+       u8                      len;
        union {
                u8              dreg;
                u8              sreg;
@@ -43,4 +44,6 @@ int nft_meta_set_validate(const struct nft_ctx *ctx,
                          const struct nft_expr *expr,
                          const struct nft_data **data);
 
+bool nft_meta_get_reduce(struct nft_regs_track *track,
+                        const struct nft_expr *expr);
 #endif
index f068786..ce0cc4e 100644 (file)
@@ -127,6 +127,7 @@ struct netns_ipv4 {
        u8 sysctl_tcp_synack_retries;
        u8 sysctl_tcp_syncookies;
        u8 sysctl_tcp_migrate_req;
+       u8 sysctl_tcp_comp_sack_nr;
        int sysctl_tcp_reordering;
        u8 sysctl_tcp_retries1;
        u8 sysctl_tcp_retries2;
@@ -160,9 +161,9 @@ struct netns_ipv4 {
        int sysctl_tcp_challenge_ack_limit;
        int sysctl_tcp_min_rtt_wlen;
        u8 sysctl_tcp_min_tso_segs;
+       u8 sysctl_tcp_tso_rtt_log;
        u8 sysctl_tcp_autocorking;
        u8 sysctl_tcp_reflect_tos;
-       u8 sysctl_tcp_comp_sack_nr;
        int sysctl_tcp_invalid_ratelimit;
        int sysctl_tcp_pacing_ss_ratio;
        int sysctl_tcp_pacing_ca_ratio;
index 947733a..bd7c3be 100644 (file)
@@ -66,11 +66,7 @@ struct netns_xfrm {
        int                     sysctl_larval_drop;
        u32                     sysctl_acq_expires;
 
-       u8                      policy_default;
-#define XFRM_POL_DEFAULT_IN    1
-#define XFRM_POL_DEFAULT_OUT   2
-#define XFRM_POL_DEFAULT_FWD   4
-#define XFRM_POL_DEFAULT_MASK  7
+       u8                      policy_default[XFRM_POLICY_MAX];
 
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *sysctl_hdr;
index 3e424d4..aa0171d 100644 (file)
@@ -19,6 +19,7 @@
 enum switchdev_attr_id {
        SWITCHDEV_ATTR_ID_UNDEFINED,
        SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+       SWITCHDEV_ATTR_ID_PORT_MST_STATE,
        SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
        SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS,
        SWITCHDEV_ATTR_ID_PORT_MROUTER,
@@ -27,7 +28,14 @@ enum switchdev_attr_id {
        SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
        SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
        SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
+       SWITCHDEV_ATTR_ID_BRIDGE_MST,
        SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
+       SWITCHDEV_ATTR_ID_VLAN_MSTI,
+};
+
+struct switchdev_mst_state {
+       u16 msti;
+       u8 state;
 };
 
 struct switchdev_brport_flags {
@@ -35,6 +43,11 @@ struct switchdev_brport_flags {
        unsigned long mask;
 };
 
+struct switchdev_vlan_msti {
+       u16 vid;
+       u16 msti;
+};
+
 struct switchdev_attr {
        struct net_device *orig_dev;
        enum switchdev_attr_id id;
@@ -43,13 +56,16 @@ struct switchdev_attr {
        void (*complete)(struct net_device *dev, int err, void *priv);
        union {
                u8 stp_state;                           /* PORT_STP_STATE */
+               struct switchdev_mst_state mst_state;   /* PORT_MST_STATE */
                struct switchdev_brport_flags brport_flags; /* PORT_BRIDGE_FLAGS */
                bool mrouter;                           /* PORT_MROUTER */
                clock_t ageing_time;                    /* BRIDGE_AGEING_TIME */
                bool vlan_filtering;                    /* BRIDGE_VLAN_FILTERING */
                u16 vlan_protocol;                      /* BRIDGE_VLAN_PROTOCOL */
+               bool mst;                               /* BRIDGE_MST */
                bool mc_disabled;                       /* MC_DISABLED */
                u8 mrp_port_role;                       /* MRP_PORT_ROLE */
+               struct switchdev_vlan_msti vlan_msti;   /* VLAN_MSTI */
        } u;
 };
 
index f94b8bc..a97600f 100644 (file)
@@ -78,4 +78,14 @@ static inline u8 tcf_vlan_push_prio(const struct tc_action *a)
 
        return tcfv_push_prio;
 }
+
+static inline void tcf_vlan_push_eth(unsigned char *src, unsigned char *dest,
+                                    const struct tc_action *a)
+{
+       rcu_read_lock();
+       memcpy(dest, rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_dst, ETH_ALEN);
+       memcpy(dest, rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_src, ETH_ALEN);
+       rcu_read_unlock();
+}
+
 #endif /* __NET_TC_VLAN_H */
index d486d7b..70ca4a5 100644 (file)
@@ -1674,10 +1674,11 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index,
                return NULL;
        return __tcp_md5_do_lookup(sk, l3index, addr, family);
 }
-bool tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
-                         enum skb_drop_reason *reason,
-                         const void *saddr, const void *daddr,
-                         int family, int dif, int sdif);
+
+enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+                    const void *saddr, const void *daddr,
+                    int family, int dif, int sdif);
 
 
 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
@@ -1688,13 +1689,13 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index,
 {
        return NULL;
 }
-static inline bool tcp_inbound_md5_hash(const struct sock *sk,
-                                       const struct sk_buff *skb,
-                                       enum skb_drop_reason *reason,
-                                       const void *saddr, const void *daddr,
-                                       int family, int dif, int sdif)
+
+static inline enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+                    const void *saddr, const void *daddr,
+                    int family, int dif, int sdif)
 {
-       return false;
+       return SKB_NOT_DROPPED_YET;
 }
 #define tcp_twsk_md5_key(twsk) NULL
 #endif
index 526cb2c..b6968a5 100644 (file)
@@ -626,7 +626,6 @@ tls_offload_ctx_rx(const struct tls_context *tls_ctx)
        return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
 }
 
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
                                     enum tls_offload_ctx_dir direction)
 {
@@ -641,7 +640,6 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
 {
        return __tls_driver_ctx(tls_get_ctx(sk), direction);
 }
-#endif
 
 #define RESYNC_REQ BIT(0)
 #define RESYNC_REQ_ASYNC BIT(1)
index 76aa6f1..6fb899f 100644 (file)
@@ -1081,25 +1081,18 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
 }
 
 #ifdef CONFIG_XFRM
-static inline bool
-xfrm_default_allow(struct net *net, int dir)
-{
-       u8 def = net->xfrm.policy_default;
-
-       switch (dir) {
-       case XFRM_POLICY_IN:
-               return def & XFRM_POL_DEFAULT_IN ? false : true;
-       case XFRM_POLICY_OUT:
-               return def & XFRM_POL_DEFAULT_OUT ? false : true;
-       case XFRM_POLICY_FWD:
-               return def & XFRM_POL_DEFAULT_FWD ? false : true;
-       }
-       return false;
-}
-
 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
                        unsigned short family);
 
+static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
+                                        int dir)
+{
+       if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
+               return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
+
+       return false;
+}
+
 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
                                       struct sk_buff *skb,
                                       unsigned int family, int reverse)
@@ -1110,13 +1103,9 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
        if (sk && sk->sk_policy[XFRM_POLICY_IN])
                return __xfrm_policy_check(sk, ndir, skb, family);
 
-       if (xfrm_default_allow(net, dir))
-               return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
-                      (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
-                      __xfrm_policy_check(sk, ndir, skb, family);
-       else
-               return (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
-                      __xfrm_policy_check(sk, ndir, skb, family);
+       return __xfrm_check_nopolicy(net, skb, dir) ||
+              (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
+              __xfrm_policy_check(sk, ndir, skb, family);
 }
 
 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
@@ -1168,13 +1157,12 @@ static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
 {
        struct net *net = dev_net(skb->dev);
 
-       if (xfrm_default_allow(net, XFRM_POLICY_OUT))
-               return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
-                       (skb_dst(skb)->flags & DST_NOXFRM) ||
-                       __xfrm_route_forward(skb, family);
-       else
-               return (skb_dst(skb)->flags & DST_NOXFRM) ||
-                       __xfrm_route_forward(skb, family);
+       if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
+           net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
+               return true;
+
+       return (skb_dst(skb)->flags & DST_NOXFRM) ||
+              __xfrm_route_forward(skb, family);
 }
 
 static inline int xfrm4_route_forward(struct sk_buff *skb)
index ee3c596..9b4e6c7 100644 (file)
@@ -642,6 +642,11 @@ struct ocelot_lag_fdb {
        struct list_head list;
 };
 
+struct ocelot_mirror {
+       refcount_t refcount;
+       int to;
+};
+
 struct ocelot_port {
        struct ocelot                   *ocelot;
 
@@ -723,6 +728,7 @@ struct ocelot {
        struct ocelot_vcap_block        block[3];
        struct ocelot_vcap_policer      vcap_pol;
        struct vcap_props               *vcap;
+       struct ocelot_mirror            *mirror;
 
        struct ocelot_psfp_list         psfp;
 
@@ -869,6 +875,11 @@ int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
                                 struct switchdev_brport_flags val);
 void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
                              struct switchdev_brport_flags val);
+int ocelot_port_get_default_prio(struct ocelot *ocelot, int port);
+int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio);
+int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp);
+int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio);
+int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio);
 int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
                            struct net_device *bridge, int bridge_num,
                            struct netlink_ext_ack *extack);
@@ -903,6 +914,9 @@ int ocelot_get_max_mtu(struct ocelot *ocelot, int port);
 int ocelot_port_policer_add(struct ocelot *ocelot, int port,
                            struct ocelot_policer *pol);
 int ocelot_port_policer_del(struct ocelot *ocelot, int port);
+int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to,
+                          bool ingress, struct netlink_ext_ack *extack);
+void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress);
 int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
                              struct flow_cls_offload *f, bool ingress);
 int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
index deb2ad9..7b2bf9b 100644 (file)
@@ -654,6 +654,7 @@ struct ocelot_vcap_action {
                        enum ocelot_mask_mode mask_mode;
                        unsigned long port_mask;
                        bool police_ena;
+                       bool mirror_ena;
                        struct ocelot_policer pol;
                        u32 pol_ix;
                };
@@ -697,6 +698,7 @@ struct ocelot_vcap_filter {
        unsigned long ingress_port_mask;
        /* For VCAP ES0 */
        struct ocelot_vcap_port ingress_port;
+       /* For VCAP IS2 mirrors and ES0 */
        struct ocelot_vcap_port egress_port;
 
        enum ocelot_vcap_bit dmac_mc;
index c6f5aa7..2c53063 100644 (file)
@@ -56,6 +56,7 @@ enum cachefiles_coherency_trace {
        cachefiles_coherency_set_ok,
        cachefiles_coherency_vol_check_cmp,
        cachefiles_coherency_vol_check_ok,
+       cachefiles_coherency_vol_check_resv,
        cachefiles_coherency_vol_check_xattr,
        cachefiles_coherency_vol_set_fail,
        cachefiles_coherency_vol_set_ok,
@@ -139,6 +140,7 @@ enum cachefiles_error_trace {
        EM(cachefiles_coherency_set_ok,         "SET ok  ")             \
        EM(cachefiles_coherency_vol_check_cmp,  "VOL BAD cmp ")         \
        EM(cachefiles_coherency_vol_check_ok,   "VOL OK      ")         \
+       EM(cachefiles_coherency_vol_check_resv, "VOL BAD resv") \
        EM(cachefiles_coherency_vol_check_xattr,"VOL BAD xatt")         \
        EM(cachefiles_coherency_vol_set_fail,   "VOL SET fail")         \
        E_(cachefiles_coherency_vol_set_ok,     "VOL SET ok  ")
index 6bf4317..f8e28e6 100644 (file)
@@ -115,6 +115,10 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
                  __entry->csum_reqd)
 );
 
+DEFINE_EVENT(mptcp_dump_mpext, mptcp_sendmsg_frag,
+       TP_PROTO(struct mptcp_ext *mpext),
+       TP_ARGS(mpext));
+
 DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status,
        TP_PROTO(struct mptcp_ext *mpext),
        TP_ARGS(mpext));
index c0769d9..e1670e1 100644 (file)
        EM(SKB_DROP_REASON_XDP, XDP)                            \
        EM(SKB_DROP_REASON_TC_INGRESS, TC_INGRESS)              \
        EM(SKB_DROP_REASON_PTYPE_ABSENT, PTYPE_ABSENT)          \
+       EM(SKB_DROP_REASON_SKB_CSUM, SKB_CSUM)                  \
+       EM(SKB_DROP_REASON_SKB_GSO_SEG, SKB_GSO_SEG)            \
+       EM(SKB_DROP_REASON_SKB_UCOPY_FAULT, SKB_UCOPY_FAULT)    \
+       EM(SKB_DROP_REASON_DEV_HDR, DEV_HDR)                    \
+       EM(SKB_DROP_REASON_DEV_READY, DEV_READY)                \
+       EM(SKB_DROP_REASON_FULL_RING, FULL_RING)                \
+       EM(SKB_DROP_REASON_NOMEM, NOMEM)                        \
+       EM(SKB_DROP_REASON_HDR_TRUNC, HDR_TRUNC)                \
+       EM(SKB_DROP_REASON_TAP_FILTER, TAP_FILTER)              \
+       EM(SKB_DROP_REASON_TAP_TXFILTER, TAP_TXFILTER)          \
        EMe(SKB_DROP_REASON_MAX, MAX)
 
 #undef EM
index c55935b..590f8ae 100644 (file)
@@ -137,20 +137,16 @@ struct can_isotp_ll_options {
 #define CAN_ISOTP_WAIT_TX_DONE 0x400   /* wait for tx completion */
 #define CAN_ISOTP_SF_BROADCAST 0x800   /* 1-to-N functional addressing */
 
-/* default values */
+/* protocol machine default values */
 
 #define CAN_ISOTP_DEFAULT_FLAGS                0
 #define CAN_ISOTP_DEFAULT_EXT_ADDRESS  0x00
 #define CAN_ISOTP_DEFAULT_PAD_CONTENT  0xCC /* prevent bit-stuffing */
-#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 0
+#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 50000 /* 50 micro seconds */
 #define CAN_ISOTP_DEFAULT_RECV_BS      0
 #define CAN_ISOTP_DEFAULT_RECV_STMIN   0x00
 #define CAN_ISOTP_DEFAULT_RECV_WFTMAX  0
 
-#define CAN_ISOTP_DEFAULT_LL_MTU       CAN_MTU
-#define CAN_ISOTP_DEFAULT_LL_TX_DL     CAN_MAX_DLEN
-#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS  0
-
 /*
  * Remark on CAN_ISOTP_DEFAULT_RECV_* values:
  *
@@ -162,4 +158,24 @@ struct can_isotp_ll_options {
  * consistency and copied directly into the flow control (FC) frame.
  */
 
+/* link layer default values => make use of Classical CAN frames */
+
+#define CAN_ISOTP_DEFAULT_LL_MTU       CAN_MTU
+#define CAN_ISOTP_DEFAULT_LL_TX_DL     CAN_MAX_DLEN
+#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS  0
+
+/*
+ * The CAN_ISOTP_DEFAULT_FRAME_TXTIME has become a non-zero value as
+ * it only makes sense for isotp implementation tests to run without
+ * a N_As value. As user space applications usually do not set the
+ * frame_txtime element of struct can_isotp_options the new in-kernel
+ * default is very likely overwritten with zero when the sockopt()
+ * CAN_ISOTP_OPTS is invoked.
+ * To make sure that a N_As value of zero is only set intentional the
+ * value '0' is now interpreted as 'do not change the current value'.
+ * When a frame_txtime of zero is required for testing purposes this
+ * CAN_ISOTP_FRAME_TXTIME_ZERO u32 value has to be set in frame_txtime.
+ */
+#define CAN_ISOTP_FRAME_TXTIME_ZERO    0xFFFFFFFF
+
 #endif /* !_UAPI_CAN_ISOTP_H */
index 79f9191..2f61298 100644 (file)
@@ -8,6 +8,7 @@ enum gtp_genl_cmds {
        GTP_CMD_NEWPDP,
        GTP_CMD_DELPDP,
        GTP_CMD_GETPDP,
+       GTP_CMD_ECHOREQ,
 
        GTP_CMD_MAX,
 };
index 2711c35..a86a7e7 100644 (file)
@@ -122,6 +122,7 @@ enum {
        IFLA_BRIDGE_VLAN_TUNNEL_INFO,
        IFLA_BRIDGE_MRP,
        IFLA_BRIDGE_CFM,
+       IFLA_BRIDGE_MST,
        __IFLA_BRIDGE_MAX,
 };
 #define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1)
@@ -453,6 +454,21 @@ enum {
 
 #define IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX (__IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX - 1)
 
+enum {
+       IFLA_BRIDGE_MST_UNSPEC,
+       IFLA_BRIDGE_MST_ENTRY,
+       __IFLA_BRIDGE_MST_MAX,
+};
+#define IFLA_BRIDGE_MST_MAX (__IFLA_BRIDGE_MST_MAX - 1)
+
+enum {
+       IFLA_BRIDGE_MST_ENTRY_UNSPEC,
+       IFLA_BRIDGE_MST_ENTRY_MSTI,
+       IFLA_BRIDGE_MST_ENTRY_STATE,
+       __IFLA_BRIDGE_MST_ENTRY_MAX,
+};
+#define IFLA_BRIDGE_MST_ENTRY_MAX (__IFLA_BRIDGE_MST_ENTRY_MAX - 1)
+
 struct bridge_stp_xstats {
        __u64 transition_blk;
        __u64 transition_fwd;
@@ -564,6 +580,7 @@ enum {
        BRIDGE_VLANDB_GOPTS_MCAST_QUERIER,
        BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS,
        BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE,
+       BRIDGE_VLANDB_GOPTS_MSTI,
        __BRIDGE_VLANDB_GOPTS_MAX
 };
 #define BRIDGE_VLANDB_GOPTS_MAX (__BRIDGE_VLANDB_GOPTS_MAX - 1)
@@ -759,6 +776,7 @@ struct br_mcast_stats {
 enum br_boolopt_id {
        BR_BOOLOPT_NO_LL_LEARN,
        BR_BOOLOPT_MCAST_VLAN_SNOOPING,
+       BR_BOOLOPT_MST_ENABLE,
        BR_BOOLOPT_MAX
 };
 
index ddca203..cc284c0 100644 (file)
@@ -842,6 +842,7 @@ enum {
        IFLA_GENEVE_LABEL,
        IFLA_GENEVE_TTL_INHERIT,
        IFLA_GENEVE_DF,
+       IFLA_GENEVE_INNER_PROTO_INHERIT,
        __IFLA_GENEVE_MAX
 };
 #define IFLA_GENEVE_MAX        (__IFLA_GENEVE_MAX - 1)
@@ -887,6 +888,8 @@ enum {
        IFLA_GTP_FD1,
        IFLA_GTP_PDP_HASHSIZE,
        IFLA_GTP_ROLE,
+       IFLA_GTP_CREATE_SOCKETS,
+       IFLA_GTP_RESTART_COUNT,
        __IFLA_GTP_MAX,
 };
 #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
index 7d91055..1021196 100644 (file)
@@ -176,8 +176,10 @@ enum {
 #define TUNNEL_VXLAN_OPT       __cpu_to_be16(0x1000)
 #define TUNNEL_NOCACHE         __cpu_to_be16(0x2000)
 #define TUNNEL_ERSPAN_OPT      __cpu_to_be16(0x4000)
+#define TUNNEL_GTP_OPT         __cpu_to_be16(0x8000)
 
 #define TUNNEL_OPTIONS_PRESENT \
-               (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
+               (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT | \
+               TUNNEL_GTP_OPT)
 
 #endif /* _UAPI_IF_TUNNEL_H_ */
index 225ec87..7989d94 100644 (file)
 #define KEY_PAUSECD            201
 #define KEY_PROG3              202
 #define KEY_PROG4              203
-#define KEY_DASHBOARD          204     /* AL Dashboard */
+#define KEY_ALL_APPLICATIONS   204     /* AC Desktop Show All Applications */
+#define KEY_DASHBOARD          KEY_ALL_APPLICATIONS
 #define KEY_SUSPEND            205
 #define KEY_CLOSE              206     /* AC Close */
 #define KEY_PLAY               207
 #define KEY_ASSISTANT          0x247   /* AL Context-aware desktop assistant */
 #define KEY_KBD_LAYOUT_NEXT    0x248   /* AC Next Keyboard Layout Select */
 #define KEY_EMOJI_PICKER       0x249   /* Show/hide emoji picker (HUTRR101) */
+#define KEY_DICTATE            0x24a   /* Start or Stop Voice Dictation Session (HUTRR99) */
 
 #define KEY_BRIGHTNESS_MIN             0x250   /* Set Brightness to Minimum */
 #define KEY_BRIGHTNESS_MAX             0x251   /* Set Brightness to Maximum */
index 0425cd7..f724129 100644 (file)
@@ -36,6 +36,7 @@
 #define EFIVARFS_MAGIC         0xde5e81e4
 #define HOSTFS_SUPER_MAGIC     0x00c0ffee
 #define OVERLAYFS_SUPER_MAGIC  0x794c7630
+#define FUSE_SUPER_MAGIC       0x65735546
 
 #define MINIX_SUPER_MAGIC      0x137F          /* minix v1 fs, 14 char names */
 #define MINIX_SUPER_MAGIC2     0x138F          /* minix v1 fs, 30 char names */
index f106a39..9690efe 100644 (file)
@@ -81,6 +81,7 @@ enum {
 #define MPTCP_PM_ADDR_FLAG_SUBFLOW                     (1 << 1)
 #define MPTCP_PM_ADDR_FLAG_BACKUP                      (1 << 2)
 #define MPTCP_PM_ADDR_FLAG_FULLMESH                    (1 << 3)
+#define MPTCP_PM_ADDR_FLAG_IMPLICIT                    (1 << 4)
 
 enum {
        MPTCP_PM_CMD_UNSPEC,
index 195a238..0568a79 100644 (file)
@@ -11,7 +11,7 @@
  * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
  * Copyright 2008 Colin McCabe <colin@cozybit.com>
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -2659,6 +2659,10 @@ enum nl80211_commands {
  *     enumerated in &enum nl80211_ap_settings_flags. This attribute shall be
  *     used with %NL80211_CMD_START_AP request.
  *
+ * @NL80211_ATTR_EHT_CAPABILITY: EHT Capability information element (from
+ *     association request when used with NL80211_CMD_NEW_STATION). Can be set
+ *     only if %NL80211_STA_FLAG_WME is set.
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3169,6 +3173,8 @@ enum nl80211_attrs {
 
        NL80211_ATTR_AP_SETTINGS_FLAGS,
 
+       NL80211_ATTR_EHT_CAPABILITY,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -3224,6 +3230,8 @@ enum nl80211_attrs {
 #define NL80211_HE_MAX_CAPABILITY_LEN           54
 #define NL80211_MAX_NR_CIPHER_SUITES           5
 #define NL80211_MAX_NR_AKM_SUITES              2
+#define NL80211_EHT_MIN_CAPABILITY_LEN          13
+#define NL80211_EHT_MAX_CAPABILITY_LEN          51
 
 #define NL80211_MIN_REMAIN_ON_CHANNEL_TIME     10
 
@@ -3251,7 +3259,7 @@ enum nl80211_attrs {
  *     and therefore can't be created in the normal ways, use the
  *     %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
  *     commands to create and destroy one
- * @NL80211_IF_TYPE_OCB: Outside Context of a BSS
+ * @NL80211_IFTYPE_OCB: Outside Context of a BSS
  *     This mode corresponds to the MIB variable dot11OCBActivated=true
  * @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev)
  * @NL80211_IFTYPE_MAX: highest interface type number currently defined
@@ -3392,6 +3400,56 @@ enum nl80211_he_ru_alloc {
        NL80211_RATE_INFO_HE_RU_ALLOC_2x996,
 };
 
+/**
+ * enum nl80211_eht_gi - EHT guard interval
+ * @NL80211_RATE_INFO_EHT_GI_0_8: 0.8 usec
+ * @NL80211_RATE_INFO_EHT_GI_1_6: 1.6 usec
+ * @NL80211_RATE_INFO_EHT_GI_3_2: 3.2 usec
+ */
+enum nl80211_eht_gi {
+       NL80211_RATE_INFO_EHT_GI_0_8,
+       NL80211_RATE_INFO_EHT_GI_1_6,
+       NL80211_RATE_INFO_EHT_GI_3_2,
+};
+
+/**
+ * enum nl80211_eht_ru_alloc - EHT RU allocation values
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_26: 26-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_52: 52-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_52P26: 52+26-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_106: 106-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_106P26: 106+26 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_242: 242-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_484: 484-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_484P242: 484+242 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_996: 996-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_996P484: 996+484 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242: 996+484+242 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_2x996: 2x996-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484: 2x996+484 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_3x996: 3x996-tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484: 3x996+484 tone RU allocation
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC_4x996: 4x996-tone RU allocation
+ */
+enum nl80211_eht_ru_alloc {
+       NL80211_RATE_INFO_EHT_RU_ALLOC_26,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_52,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_52P26,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_106,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_106P26,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_242,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_484,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_484P242,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_996,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_996P484,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_2x996,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_3x996,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484,
+       NL80211_RATE_INFO_EHT_RU_ALLOC_4x996,
+};
+
 /**
  * enum nl80211_rate_info - bitrate information
  *
@@ -3431,6 +3489,13 @@ enum nl80211_he_ru_alloc {
  * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1)
  * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then
  *     non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc)
+ * @NL80211_RATE_INFO_320_MHZ_WIDTH: 320 MHz bitrate
+ * @NL80211_RATE_INFO_EHT_MCS: EHT MCS index (u8, 0-15)
+ * @NL80211_RATE_INFO_EHT_NSS: EHT NSS value (u8, 1-8)
+ * @NL80211_RATE_INFO_EHT_GI: EHT guard interval identifier
+ *     (u8, see &enum nl80211_eht_gi)
+ * @NL80211_RATE_INFO_EHT_RU_ALLOC: EHT RU allocation, if not present then
+ *     non-OFDMA was used (u8, see &enum nl80211_eht_ru_alloc)
  * @__NL80211_RATE_INFO_AFTER_LAST: internal use
  */
 enum nl80211_rate_info {
@@ -3452,6 +3517,11 @@ enum nl80211_rate_info {
        NL80211_RATE_INFO_HE_GI,
        NL80211_RATE_INFO_HE_DCM,
        NL80211_RATE_INFO_HE_RU_ALLOC,
+       NL80211_RATE_INFO_320_MHZ_WIDTH,
+       NL80211_RATE_INFO_EHT_MCS,
+       NL80211_RATE_INFO_EHT_NSS,
+       NL80211_RATE_INFO_EHT_GI,
+       NL80211_RATE_INFO_EHT_RU_ALLOC,
 
        /* keep last */
        __NL80211_RATE_INFO_AFTER_LAST,
@@ -3766,6 +3836,14 @@ enum nl80211_mpath_info {
  *     given for all 6 GHz band channels
  * @NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS: vendor element capabilities that are
  *     advertised on this band/for this iftype (binary)
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC: EHT MAC capabilities as in EHT
+ *     capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY: EHT PHY capabilities as in EHT
+ *     capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET: EHT supported NSS/MCS as in EHT
+ *     capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE: EHT PPE thresholds information as
+ *     defined in EHT capabilities element
  * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
  * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band attribute currently defined
  */
@@ -3779,6 +3857,10 @@ enum nl80211_band_iftype_attr {
        NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
        NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA,
        NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS,
+       NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC,
+       NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY,
+       NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET,
+       NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE,
 
        /* keep last */
        __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
@@ -3923,6 +4005,10 @@ enum nl80211_wmm_rule {
  *     on this channel in current regulatory domain.
  * @NL80211_FREQUENCY_ATTR_16MHZ: 16 MHz operation is allowed
  *     on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_NO_320MHZ: any 320 MHz channel using this channel
+ *     as the primary or any of the secondary channels isn't possible
+ * @NL80211_FREQUENCY_ATTR_NO_EHT: EHT operation is not allowed on this channel
+ *     in current regulatory domain.
  * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
  *     currently defined
  * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -3959,6 +4045,8 @@ enum nl80211_frequency_attr {
        NL80211_FREQUENCY_ATTR_4MHZ,
        NL80211_FREQUENCY_ATTR_8MHZ,
        NL80211_FREQUENCY_ATTR_16MHZ,
+       NL80211_FREQUENCY_ATTR_NO_320MHZ,
+       NL80211_FREQUENCY_ATTR_NO_EHT,
 
        /* keep last */
        __NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -4157,6 +4245,7 @@ enum nl80211_sched_scan_match_attr {
  * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
  * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed
  * @NL80211_RRF_NO_HE: HE operation not allowed
+ * @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed
  */
 enum nl80211_reg_rule_flags {
        NL80211_RRF_NO_OFDM             = 1<<0,
@@ -4175,6 +4264,7 @@ enum nl80211_reg_rule_flags {
        NL80211_RRF_NO_80MHZ            = 1<<15,
        NL80211_RRF_NO_160MHZ           = 1<<16,
        NL80211_RRF_NO_HE               = 1<<17,
+       NL80211_RRF_NO_320MHZ           = 1<<18,
 };
 
 #define NL80211_RRF_PASSIVE_SCAN       NL80211_RRF_NO_IR
@@ -4672,6 +4762,8 @@ enum nl80211_key_mode {
  * @NL80211_CHAN_WIDTH_4: 4 MHz OFDM channel
  * @NL80211_CHAN_WIDTH_8: 8 MHz OFDM channel
  * @NL80211_CHAN_WIDTH_16: 16 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_320: 320 MHz channel, the %NL80211_ATTR_CENTER_FREQ1
+ *     attribute must be provided as well
  */
 enum nl80211_chan_width {
        NL80211_CHAN_WIDTH_20_NOHT,
@@ -4687,6 +4779,7 @@ enum nl80211_chan_width {
        NL80211_CHAN_WIDTH_4,
        NL80211_CHAN_WIDTH_8,
        NL80211_CHAN_WIDTH_16,
+       NL80211_CHAN_WIDTH_320,
 };
 
 /**
index 9d1710f..ce3e173 100644 (file)
@@ -351,11 +351,21 @@ enum ovs_key_attr {
        OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,   /* struct ovs_key_ct_tuple_ipv4 */
        OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,   /* struct ovs_key_ct_tuple_ipv6 */
        OVS_KEY_ATTR_NSH,       /* Nested set of ovs_nsh_key_* */
-       OVS_KEY_ATTR_IPV6_EXTHDRS,  /* struct ovs_key_ipv6_exthdr */
 
-#ifdef __KERNEL__
-       OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ip_tunnel_info */
-#endif
+       /* User space decided to squat on types 29 and 30.  They are defined
+        * below, but should not be sent to the kernel.
+        *
+        * WARNING: No new types should be added unless they are defined
+        *          for both kernel and user space (no 'ifdef's).  It's hard
+        *          to keep compatibility otherwise.
+        */
+       OVS_KEY_ATTR_PACKET_TYPE,   /* be32 packet type */
+       OVS_KEY_ATTR_ND_EXTENSIONS, /* IPv6 Neighbor Discovery extensions */
+
+       OVS_KEY_ATTR_TUNNEL_INFO,   /* struct ip_tunnel_info.
+                                    * For in-kernel use only.
+                                    */
+       OVS_KEY_ATTR_IPV6_EXTHDRS,  /* struct ovs_key_ipv6_exthdr */
        __OVS_KEY_ATTR_MAX
 };
 
index ee38b35..404f97f 100644 (file)
@@ -616,6 +616,10 @@ enum {
                                         * TCA_FLOWER_KEY_ENC_OPT_ERSPAN_
                                         * attributes
                                         */
+       TCA_FLOWER_KEY_ENC_OPTS_GTP,    /* Nested
+                                        * TCA_FLOWER_KEY_ENC_OPT_GTP_
+                                        * attributes
+                                        */
        __TCA_FLOWER_KEY_ENC_OPTS_MAX,
 };
 
@@ -654,6 +658,17 @@ enum {
 #define TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX \
                (__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1)
 
+enum {
+       TCA_FLOWER_KEY_ENC_OPT_GTP_UNSPEC,
+       TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,            /* u8 */
+       TCA_FLOWER_KEY_ENC_OPT_GTP_QFI,                 /* u8 */
+
+       __TCA_FLOWER_KEY_ENC_OPT_GTP_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_GTP_MAX \
+               (__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX - 1)
+
 enum {
        TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC,
        TCA_FLOWER_KEY_MPLS_OPTS_LSE,
index 9b77cfc..283c5a7 100644 (file)
@@ -159,8 +159,16 @@ struct rfkill_event_ext {
  * old behaviour for all userspace, unless it explicitly opts in to the
  * rules outlined here by using the new &struct rfkill_event_ext.
  *
- * Userspace using &struct rfkill_event_ext must adhere to the following
- * rules
+ * Additionally, some other userspace (bluez, g-s-d) was reading with a
+ * large size but as streaming reads rather than message-based, or with
+ * too strict checks for the returned size. So eventually, we completely
+ * reverted this, and extended messages need to be opted in to by using
+ * an ioctl:
+ *
+ *  ioctl(fd, RFKILL_IOCTL_MAX_SIZE, sizeof(struct rfkill_event_ext));
+ *
+ * Userspace using &struct rfkill_event_ext and the ioctl must adhere to
+ * the following rules:
  *
  * 1. accept short writes, optionally using them to detect that it's
  *    running on an older kernel;
@@ -175,6 +183,8 @@ struct rfkill_event_ext {
 #define RFKILL_IOC_MAGIC       'R'
 #define RFKILL_IOC_NOINPUT     1
 #define RFKILL_IOCTL_NOINPUT   _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT)
+#define RFKILL_IOC_MAX_SIZE    2
+#define RFKILL_IOCTL_MAX_SIZE  _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_EXT_SIZE, __u32)
 
 /* and that's all userspace gets */
 
index 51530aa..83849a3 100644 (file)
@@ -817,6 +817,7 @@ enum {
 #define RTEXT_FILTER_MRP       (1 << 4)
 #define RTEXT_FILTER_CFM_CONFIG        (1 << 5)
 #define RTEXT_FILTER_CFM_STATUS        (1 << 6)
+#define RTEXT_FILTER_MST       (1 << 7)
 
 /* End of information exported to user level */
 
index cb854df..c9fea93 100644 (file)
@@ -104,17 +104,32 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
  * access has been ended, free the given page too.  Access will be ended
  * immediately iff the grant entry is not in use, otherwise it will happen
  * some time later.  page may be 0, in which case no freeing will occur.
+ * Note that the granted page might still be accessed (read or write) by the
+ * other side after gnttab_end_foreign_access() returns, so even if page was
+ * specified as 0 it is not allowed to just reuse the page for other
+ * purposes immediately. gnttab_end_foreign_access() will take an additional
+ * reference to the granted page in this case, which is dropped only after
+ * the grant is no longer in use.
+ * This requires that multi page allocations for areas subject to
+ * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
+ * via free_pages_exact()) in order to avoid high order pages.
  */
 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
                               unsigned long page);
 
+/*
+ * End access through the given grant reference, iff the grant entry is
+ * no longer in use.  In case of success ending foreign access, the
+ * grant reference is deallocated.
+ * Return 1 if the grant entry was freed, 0 if it is still in use.
+ */
+int gnttab_try_end_foreign_access(grant_ref_t ref);
+
 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
 
 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
 
-int gnttab_query_foreign_access(grant_ref_t ref);
-
 /*
  * operations on reserved batches of grant references
  */
index e9ffb0c..e8db8d9 100644 (file)
@@ -17,6 +17,7 @@ CONFIG_SYMBOLIC_ERRNAME=y
 # Compile-time checks and compiler options
 #
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_FRAME_WARN=2048
 CONFIG_SECTION_MISMATCH_WARN_ONLY=y
index bfc56cb..6db1c47 100644 (file)
@@ -627,10 +627,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
        for (i = 0; i < nr_slots(alloc_size + offset); i++)
                mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
        tlb_addr = slot_addr(mem->start, index) + offset;
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-           (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE ||
-           dir == DMA_BIDIRECTIONAL))
-               swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
+       /*
+        * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
+        * to the tlb buffer, if we knew for sure the device will
+        * overwirte the entire current content. But we don't. Thus
+        * unconditional bounce may prevent leaking swiotlb content (i.e.
+        * kernel memory) to user-space.
+        */
+       swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
        return tlb_addr;
 }
 
@@ -697,10 +701,13 @@ void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
                size_t size, enum dma_data_direction dir)
 {
-       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
-               swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
-       else
-               BUG_ON(dir != DMA_FROM_DEVICE);
+       /*
+        * Unconditional bounce is necessary to avoid corruption on
+        * sync_*_for_cpu or dma_ummap_* when the device didn't overwrite
+        * the whole lengt of the bounce buffer.
+        */
+       swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
+       BUG_ON(!valid_dma_direction(dir));
 }
 
 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
index 3db1a41..d94f002 100644 (file)
@@ -366,14 +366,14 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
                *new = data_race(*orig);
                INIT_LIST_HEAD(&new->anon_vma_chain);
                new->vm_next = new->vm_prev = NULL;
-               dup_vma_anon_name(orig, new);
+               dup_anon_vma_name(orig, new);
        }
        return new;
 }
 
 void vm_area_free(struct vm_area_struct *vma)
 {
-       free_vma_anon_name(vma);
+       free_anon_vma_name(vma);
        kmem_cache_free(vm_area_cachep, vma);
 }
 
index 97dc9e5..5b0e172 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/export.h>
 #include <linux/mm.h>
+#include <linux/mm_inline.h>
 #include <linux/utsname.h>
 #include <linux/mman.h>
 #include <linux/reboot.h>
@@ -2286,15 +2287,16 @@ static int prctl_set_vma(unsigned long opt, unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        const char __user *uname;
-       char *name, *pch;
+       struct anon_vma_name *anon_name = NULL;
        int error;
 
        switch (opt) {
        case PR_SET_VMA_ANON_NAME:
                uname = (const char __user *)arg;
                if (uname) {
-                       name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
+                       char *name, *pch;
 
+                       name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
                        if (IS_ERR(name))
                                return PTR_ERR(name);
 
@@ -2304,15 +2306,18 @@ static int prctl_set_vma(unsigned long opt, unsigned long addr,
                                        return -EINVAL;
                                }
                        }
-               } else {
-                       /* Reset the name */
-                       name = NULL;
+                       /* anon_vma has its own copy */
+                       anon_name = anon_vma_name_alloc(name);
+                       kfree(name);
+                       if (!anon_name)
+                               return -ENOMEM;
+
                }
 
                mmap_write_lock(mm);
-               error = madvise_set_anon_name(mm, addr, size, name);
+               error = madvise_set_anon_name(mm, addr, size, anon_name);
                mmap_write_unlock(mm);
-               kfree(name);
+               anon_vma_name_put(anon_name);
                break;
        default:
                error = -EINVAL;
index 5ae443b..730ab56 100644 (file)
@@ -180,6 +180,10 @@ static int bpf_stats_handler(struct ctl_table *table, int write,
        return ret;
 }
 
+void __weak unpriv_ebpf_notify(int new_state)
+{
+}
+
 static int bpf_unpriv_handler(struct ctl_table *table, int write,
                              void *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -197,6 +201,9 @@ static int bpf_unpriv_handler(struct ctl_table *table, int write,
                        return -EPERM;
                *(int *)table->data = unpriv_enable;
        }
+
+       unpriv_ebpf_notify(unpriv_enable);
+
        return ret;
 }
 #endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */
index af68a67..21dea90 100644 (file)
@@ -310,10 +310,20 @@ record_it:
        local_irq_restore(flags);
 }
 
-static void blk_trace_free(struct blk_trace *bt)
+static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
 {
        relay_close(bt->rchan);
-       debugfs_remove(bt->dir);
+
+       /*
+        * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
+        * under 'q->debugfs_dir', thus lookup and remove them.
+        */
+       if (!bt->dir) {
+               debugfs_remove(debugfs_lookup("dropped", q->debugfs_dir));
+               debugfs_remove(debugfs_lookup("msg", q->debugfs_dir));
+       } else {
+               debugfs_remove(bt->dir);
+       }
        free_percpu(bt->sequence);
        free_percpu(bt->msg_data);
        kfree(bt);
@@ -335,10 +345,10 @@ static void put_probe_ref(void)
        mutex_unlock(&blk_probe_mutex);
 }
 
-static void blk_trace_cleanup(struct blk_trace *bt)
+static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
 {
        synchronize_rcu();
-       blk_trace_free(bt);
+       blk_trace_free(q, bt);
        put_probe_ref();
 }
 
@@ -352,7 +362,7 @@ static int __blk_trace_remove(struct request_queue *q)
                return -EINVAL;
 
        if (bt->trace_state != Blktrace_running)
-               blk_trace_cleanup(bt);
+               blk_trace_cleanup(q, bt);
 
        return 0;
 }
@@ -572,7 +582,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
        ret = 0;
 err:
        if (ret)
-               blk_trace_free(bt);
+               blk_trace_free(q, bt);
        return ret;
 }
 
@@ -1616,7 +1626,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
 
        put_probe_ref();
        synchronize_rcu();
-       blk_trace_free(bt);
+       blk_trace_free(q, bt);
        return 0;
 }
 
@@ -1647,7 +1657,7 @@ static int blk_trace_setup_queue(struct request_queue *q,
        return 0;
 
 free_bt:
-       blk_trace_free(bt);
+       blk_trace_free(q, bt);
        return ret;
 }
 
index 93e9929..4214d6a 100644 (file)
@@ -7830,7 +7830,7 @@ int ftrace_is_dead(void)
 
 /**
  * register_ftrace_function - register a function for profiling
- * @ops - ops structure that holds the function for profiling.
+ * @ops:       ops structure that holds the function for profiling.
  *
  * Register a function to be called by all functions in the
  * kernel.
@@ -7857,7 +7857,7 @@ EXPORT_SYMBOL_GPL(register_ftrace_function);
 
 /**
  * unregister_ftrace_function - unregister a function for profiling.
- * @ops - ops structure that holds the function to unregister
+ * @ops:       ops structure that holds the function to unregister
  *
  * Unregister a function that was added to be called by ftrace profiling.
  */
index 3050892..eb44418 100644 (file)
@@ -235,7 +235,7 @@ static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
 static int __init set_trace_boot_options(char *str)
 {
        strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
-       return 0;
+       return 1;
 }
 __setup("trace_options=", set_trace_boot_options);
 
@@ -246,7 +246,7 @@ static int __init set_trace_boot_clock(char *str)
 {
        strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
        trace_boot_clock = trace_boot_clock_buf;
-       return 0;
+       return 1;
 }
 __setup("trace_clock=", set_trace_boot_clock);
 
index ada87bf..dc7f733 100644 (file)
@@ -2289,9 +2289,9 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
                        /*
                         * For backward compatibility, if field_name
                         * was "cpu", then we treat this the same as
-                        * common_cpu.
+                        * common_cpu. This also works for "CPU".
                         */
-                       if (strcmp(field_name, "cpu") == 0) {
+                       if (field && field->filter_type == FILTER_CPU) {
                                *flags |= HIST_FIELD_FL_CPU;
                        } else {
                                hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
@@ -4832,7 +4832,7 @@ static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
 
                        if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
                                cmp_fn = tracing_map_cmp_none;
-                       else if (!field)
+                       else if (!field || hist_field->flags & HIST_FIELD_FL_CPU)
                                cmp_fn = tracing_map_cmp_num(hist_field->size,
                                                             hist_field->is_signed);
                        else if (is_string_field(field))
index 508f14a..b62fd78 100644 (file)
@@ -32,7 +32,7 @@ static int __init set_kprobe_boot_events(char *str)
        strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
        disable_tracing_selftest("running kprobe events");
 
-       return 0;
+       return 1;
 }
 __setup("kprobe_event=", set_kprobe_boot_events);
 
index cfddb30..5e3c62a 100644 (file)
@@ -1386,6 +1386,26 @@ static int run_osnoise(void)
                                        osnoise_stop_tracing();
                }
 
+               /*
+                * In some cases, notably when running on a nohz_full CPU with
+                * a stopped tick PREEMPT_RCU has no way to account for QSs.
+                * This will eventually cause unwarranted noise as PREEMPT_RCU
+                * will force preemption as the means of ending the current
+                * grace period. We avoid this problem by calling
+                * rcu_momentary_dyntick_idle(), which performs a zero duration
+                * EQS allowing PREEMPT_RCU to end the current grace period.
+                * This call shouldn't be wrapped inside an RCU critical
+                * section.
+                *
+                * Note that in non PREEMPT_RCU kernels QSs are handled through
+                * cond_resched()
+                */
+               if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
+                       local_irq_disable();
+                       rcu_momentary_dyntick_idle();
+                       local_irq_enable();
+               }
+
                /*
                 * For the non-preemptive kernel config: let threads runs, if
                 * they so wish.
@@ -2200,6 +2220,17 @@ static void osnoise_workload_stop(void)
        if (osnoise_has_registered_instances())
                return;
 
+       /*
+        * If callbacks were already disabled in a previous stop
+        * call, there is no need to disable then again.
+        *
+        * For instance, this happens when tracing is stopped via:
+        * echo 0 > tracing_on
+        * echo nop > current_tracer.
+        */
+       if (!trace_osnoise_callback_enabled)
+               return;
+
        trace_osnoise_callback_enabled = false;
        /*
         * Make sure that ftrace_nmi_enter/exit() see
index 9c9eb20..0070344 100644 (file)
@@ -54,6 +54,7 @@ static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
        bit += page->index;
 
        set_bit(bit, wqueue->notes_bitmap);
+       generic_pipe_buf_release(pipe, buf);
 }
 
 // No try_steal function => no stealing
@@ -112,7 +113,7 @@ static bool post_one_notification(struct watch_queue *wqueue,
        buf->offset = offset;
        buf->len = len;
        buf->flags = PIPE_BUF_FLAG_WHOLE;
-       pipe->head = head + 1;
+       smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */
 
        if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
                spin_unlock_irq(&pipe->rd_wait.lock);
@@ -219,7 +220,6 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
        struct page **pages;
        unsigned long *bitmap;
        unsigned long user_bufs;
-       unsigned int bmsize;
        int ret, i, nr_pages;
 
        if (!wqueue)
@@ -243,7 +243,8 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
                goto error;
        }
 
-       ret = pipe_resize_ring(pipe, nr_notes);
+       nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
+       ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes));
        if (ret < 0)
                goto error;
 
@@ -258,17 +259,15 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
                pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE;
        }
 
-       bmsize = (nr_notes + BITS_PER_LONG - 1) / BITS_PER_LONG;
-       bmsize *= sizeof(unsigned long);
-       bitmap = kmalloc(bmsize, GFP_KERNEL);
+       bitmap = bitmap_alloc(nr_notes, GFP_KERNEL);
        if (!bitmap)
                goto error_p;
 
-       memset(bitmap, 0xff, bmsize);
+       bitmap_fill(bitmap, nr_notes);
        wqueue->notes = pages;
        wqueue->notes_bitmap = bitmap;
        wqueue->nr_pages = nr_pages;
-       wqueue->nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
+       wqueue->nr_notes = nr_notes;
        return 0;
 
 error_p:
@@ -320,7 +319,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
                    tf[i].info_mask & WATCH_INFO_LENGTH)
                        goto err_filter;
                /* Ignore any unknown types */
-               if (tf[i].type >= sizeof(wfilter->type_filter) * 8)
+               if (tf[i].type >= WATCH_TYPE__NR)
                        continue;
                nr_filter++;
        }
@@ -336,7 +335,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
 
        q = wfilter->filters;
        for (i = 0; i < filter.nr_filters; i++) {
-               if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG)
+               if (tf[i].type >= WATCH_TYPE__NR)
                        continue;
 
                q->type                 = tf[i].type;
@@ -371,6 +370,7 @@ static void __put_watch_queue(struct kref *kref)
 
        for (i = 0; i < wqueue->nr_pages; i++)
                __free_page(wqueue->notes[i]);
+       bitmap_free(wqueue->notes_bitmap);
 
        wfilter = rcu_access_pointer(wqueue->filter);
        if (wfilter)
@@ -566,7 +566,7 @@ void watch_queue_clear(struct watch_queue *wqueue)
        rcu_read_lock();
        spin_lock_bh(&wqueue->lock);
 
-       /* Prevent new additions and prevent notifications from happening */
+       /* Prevent new notifications from being stored. */
        wqueue->defunct = true;
 
        while (!hlist_empty(&wqueue->watches)) {
index a9d4d72..7bc1ba9 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1729,11 +1729,11 @@ EXPORT_SYMBOL(fault_in_writeable);
  * @uaddr: start of address range
  * @size: length of address range
  *
- * Faults in an address range using get_user_pages, i.e., without triggering
- * hardware page faults.  This is primarily useful when we already know that
- * some or all of the pages in the address range aren't in memory.
+ * Faults in an address range for writing.  This is primarily useful when we
+ * already know that some or all of the pages in the address range aren't in
+ * memory.
  *
- * Other than fault_in_writeable(), this function is non-destructive.
+ * Unlike fault_in_writeable(), this function is non-destructive.
  *
  * Note that we don't pin or otherwise hold the pages referenced that we fault
  * in.  There's no guarantee that they'll stay in memory for any duration of
@@ -1744,46 +1744,27 @@ EXPORT_SYMBOL(fault_in_writeable);
  */
 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
 {
-       unsigned long start = (unsigned long)untagged_addr(uaddr);
-       unsigned long end, nstart, nend;
+       unsigned long start = (unsigned long)uaddr, end;
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma = NULL;
-       int locked = 0;
+       bool unlocked = false;
 
-       nstart = start & PAGE_MASK;
+       if (unlikely(size == 0))
+               return 0;
        end = PAGE_ALIGN(start + size);
-       if (end < nstart)
+       if (end < start)
                end = 0;
-       for (; nstart != end; nstart = nend) {
-               unsigned long nr_pages;
-               long ret;
 
-               if (!locked) {
-                       locked = 1;
-                       mmap_read_lock(mm);
-                       vma = find_vma(mm, nstart);
-               } else if (nstart >= vma->vm_end)
-                       vma = vma->vm_next;
-               if (!vma || vma->vm_start >= end)
-                       break;
-               nend = end ? min(end, vma->vm_end) : vma->vm_end;
-               if (vma->vm_flags & (VM_IO | VM_PFNMAP))
-                       continue;
-               if (nstart < vma->vm_start)
-                       nstart = vma->vm_start;
-               nr_pages = (nend - nstart) / PAGE_SIZE;
-               ret = __get_user_pages_locked(mm, nstart, nr_pages,
-                                             NULL, NULL, &locked,
-                                             FOLL_TOUCH | FOLL_WRITE);
-               if (ret <= 0)
+       mmap_read_lock(mm);
+       do {
+               if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
                        break;
-               nend = nstart + ret * PAGE_SIZE;
-       }
-       if (locked)
-               mmap_read_unlock(mm);
-       if (nstart == end)
-               return 0;
-       return size - min_t(size_t, nstart - start, size);
+               start = (start + PAGE_SIZE) & PAGE_MASK;
+       } while (start != end);
+       mmap_read_unlock(mm);
+
+       if (size > (unsigned long)uaddr - start)
+               return size - ((unsigned long)uaddr - start);
+       return 0;
 }
 EXPORT_SYMBOL(fault_in_safe_writeable);
 
index 5604064..38d0f51 100644 (file)
@@ -65,7 +65,7 @@ static int madvise_need_mmap_write(int behavior)
 }
 
 #ifdef CONFIG_ANON_VMA_NAME
-static struct anon_vma_name *anon_vma_name_alloc(const char *name)
+struct anon_vma_name *anon_vma_name_alloc(const char *name)
 {
        struct anon_vma_name *anon_name;
        size_t count;
@@ -81,78 +81,48 @@ static struct anon_vma_name *anon_vma_name_alloc(const char *name)
        return anon_name;
 }
 
-static void vma_anon_name_free(struct kref *kref)
+void anon_vma_name_free(struct kref *kref)
 {
        struct anon_vma_name *anon_name =
                        container_of(kref, struct anon_vma_name, kref);
        kfree(anon_name);
 }
 
-static inline bool has_vma_anon_name(struct vm_area_struct *vma)
+struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
 {
-       return !vma->vm_file && vma->anon_name;
-}
-
-const char *vma_anon_name(struct vm_area_struct *vma)
-{
-       if (!has_vma_anon_name(vma))
-               return NULL;
-
        mmap_assert_locked(vma->vm_mm);
 
-       return vma->anon_name->name;
-}
-
-void dup_vma_anon_name(struct vm_area_struct *orig_vma,
-                      struct vm_area_struct *new_vma)
-{
-       if (!has_vma_anon_name(orig_vma))
-               return;
-
-       kref_get(&orig_vma->anon_name->kref);
-       new_vma->anon_name = orig_vma->anon_name;
-}
-
-void free_vma_anon_name(struct vm_area_struct *vma)
-{
-       struct anon_vma_name *anon_name;
-
-       if (!has_vma_anon_name(vma))
-               return;
+       if (vma->vm_file)
+               return NULL;
 
-       anon_name = vma->anon_name;
-       vma->anon_name = NULL;
-       kref_put(&anon_name->kref, vma_anon_name_free);
+       return vma->anon_name;
 }
 
 /* mmap_lock should be write-locked */
-static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name)
+static int replace_anon_vma_name(struct vm_area_struct *vma,
+                                struct anon_vma_name *anon_name)
 {
-       const char *anon_name;
+       struct anon_vma_name *orig_name = anon_vma_name(vma);
 
-       if (!name) {
-               free_vma_anon_name(vma);
+       if (!anon_name) {
+               vma->anon_name = NULL;
+               anon_vma_name_put(orig_name);
                return 0;
        }
 
-       anon_name = vma_anon_name(vma);
-       if (anon_name) {
-               /* Same name, nothing to do here */
-               if (!strcmp(name, anon_name))
-                       return 0;
+       if (anon_vma_name_eq(orig_name, anon_name))
+               return 0;
 
-               free_vma_anon_name(vma);
-       }
-       vma->anon_name = anon_vma_name_alloc(name);
-       if (!vma->anon_name)
-               return -ENOMEM;
+       vma->anon_name = anon_vma_name_reuse(anon_name);
+       anon_vma_name_put(orig_name);
 
        return 0;
 }
 #else /* CONFIG_ANON_VMA_NAME */
-static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name)
+static int replace_anon_vma_name(struct vm_area_struct *vma,
+                                struct anon_vma_name *anon_name)
 {
-       if (name)
+       if (anon_name)
                return -EINVAL;
 
        return 0;
@@ -161,17 +131,19 @@ static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name)
 /*
  * Update the vm_flags on region of a vma, splitting it or merging it as
  * necessary.  Must be called with mmap_sem held for writing;
+ * Caller should ensure anon_name stability by raising its refcount even when
+ * anon_name belongs to a valid vma because this function might free that vma.
  */
 static int madvise_update_vma(struct vm_area_struct *vma,
                              struct vm_area_struct **prev, unsigned long start,
                              unsigned long end, unsigned long new_flags,
-                             const char *name)
+                             struct anon_vma_name *anon_name)
 {
        struct mm_struct *mm = vma->vm_mm;
        int error;
        pgoff_t pgoff;
 
-       if (new_flags == vma->vm_flags && is_same_vma_anon_name(vma, name)) {
+       if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
                *prev = vma;
                return 0;
        }
@@ -179,7 +151,7 @@ static int madvise_update_vma(struct vm_area_struct *vma,
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
                          vma->vm_file, pgoff, vma_policy(vma),
-                         vma->vm_userfaultfd_ctx, name);
+                         vma->vm_userfaultfd_ctx, anon_name);
        if (*prev) {
                vma = *prev;
                goto success;
@@ -209,7 +181,7 @@ success:
         */
        vma->vm_flags = new_flags;
        if (!vma->vm_file) {
-               error = replace_vma_anon_name(vma, name);
+               error = replace_anon_vma_name(vma, anon_name);
                if (error)
                        return error;
        }
@@ -975,6 +947,7 @@ static int madvise_vma_behavior(struct vm_area_struct *vma,
                                unsigned long behavior)
 {
        int error;
+       struct anon_vma_name *anon_name;
        unsigned long new_flags = vma->vm_flags;
 
        switch (behavior) {
@@ -1040,8 +1013,11 @@ static int madvise_vma_behavior(struct vm_area_struct *vma,
                break;
        }
 
+       anon_name = anon_vma_name(vma);
+       anon_vma_name_get(anon_name);
        error = madvise_update_vma(vma, prev, start, end, new_flags,
-                                  vma_anon_name(vma));
+                                  anon_name);
+       anon_vma_name_put(anon_name);
 
 out:
        /*
@@ -1225,7 +1201,7 @@ int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
 static int madvise_vma_anon_name(struct vm_area_struct *vma,
                                 struct vm_area_struct **prev,
                                 unsigned long start, unsigned long end,
-                                unsigned long name)
+                                unsigned long anon_name)
 {
        int error;
 
@@ -1234,7 +1210,7 @@ static int madvise_vma_anon_name(struct vm_area_struct *vma,
                return -EBADF;
 
        error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,
-                                  (const char *)name);
+                                  (struct anon_vma_name *)anon_name);
 
        /*
         * madvise() returns EAGAIN if kernel resources, such as
@@ -1246,7 +1222,7 @@ static int madvise_vma_anon_name(struct vm_area_struct *vma,
 }
 
 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
-                         unsigned long len_in, const char *name)
+                         unsigned long len_in, struct anon_vma_name *anon_name)
 {
        unsigned long end;
        unsigned long len;
@@ -1266,7 +1242,7 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
        if (end == start)
                return 0;
 
-       return madvise_walk_vmas(mm, start, end, (unsigned long)name,
+       return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name,
                                 madvise_vma_anon_name);
 }
 #endif /* CONFIG_ANON_VMA_NAME */
index 9f80f16..08f5f83 100644 (file)
 static void memfd_tag_pins(struct xa_state *xas)
 {
        struct page *page;
-       unsigned int tagged = 0;
+       int latency = 0;
+       int cache_count;
 
        lru_add_drain();
 
        xas_lock_irq(xas);
        xas_for_each(xas, page, ULONG_MAX) {
-               if (xa_is_value(page))
-                       continue;
-               page = find_subpage(page, xas->xa_index);
-               if (page_count(page) - page_mapcount(page) > 1)
+               cache_count = 1;
+               if (!xa_is_value(page) &&
+                   PageTransHuge(page) && !PageHuge(page))
+                       cache_count = HPAGE_PMD_NR;
+
+               if (!xa_is_value(page) &&
+                   page_count(page) - total_mapcount(page) != cache_count)
                        xas_set_mark(xas, MEMFD_TAG_PINNED);
+               if (cache_count != 1)
+                       xas_set(xas, page->index + cache_count);
 
-               if (++tagged % XA_CHECK_SCHED)
+               latency += cache_count;
+               if (latency < XA_CHECK_SCHED)
                        continue;
+               latency = 0;
 
                xas_pause(xas);
                xas_unlock_irq(xas);
@@ -73,7 +81,8 @@ static int memfd_wait_for_pins(struct address_space *mapping)
 
        error = 0;
        for (scan = 0; scan <= LAST_SCAN; scan++) {
-               unsigned int tagged = 0;
+               int latency = 0;
+               int cache_count;
 
                if (!xas_marked(&xas, MEMFD_TAG_PINNED))
                        break;
@@ -87,10 +96,14 @@ static int memfd_wait_for_pins(struct address_space *mapping)
                xas_lock_irq(&xas);
                xas_for_each_marked(&xas, page, ULONG_MAX, MEMFD_TAG_PINNED) {
                        bool clear = true;
-                       if (xa_is_value(page))
-                               continue;
-                       page = find_subpage(page, xas.xa_index);
-                       if (page_count(page) - page_mapcount(page) != 1) {
+
+                       cache_count = 1;
+                       if (!xa_is_value(page) &&
+                           PageTransHuge(page) && !PageHuge(page))
+                               cache_count = HPAGE_PMD_NR;
+
+                       if (!xa_is_value(page) && cache_count !=
+                           page_count(page) - total_mapcount(page)) {
                                /*
                                 * On the last scan, we clean up all those tags
                                 * we inserted; but make a note that we still
@@ -103,8 +116,11 @@ static int memfd_wait_for_pins(struct address_space *mapping)
                        }
                        if (clear)
                                xas_clear_mark(&xas, MEMFD_TAG_PINNED);
-                       if (++tagged % XA_CHECK_SCHED)
+
+                       latency += cache_count;
+                       if (latency < XA_CHECK_SCHED)
                                continue;
+                       latency = 0;
 
                        xas_pause(&xas);
                        xas_unlock_irq(&xas);
index 028e8dd..69284d3 100644 (file)
@@ -814,7 +814,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
                                 vma->anon_vma, vma->vm_file, pgoff,
                                 new_pol, vma->vm_userfaultfd_ctx,
-                                vma_anon_name(vma));
+                                anon_vma_name(vma));
                if (prev) {
                        vma = prev;
                        next = vma->vm_next;
index 8f584ed..25934e7 100644 (file)
@@ -512,7 +512,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
                          vma->vm_file, pgoff, vma_policy(vma),
-                         vma->vm_userfaultfd_ctx, vma_anon_name(vma));
+                         vma->vm_userfaultfd_ctx, anon_vma_name(vma));
        if (*prev) {
                vma = *prev;
                goto success;
index d445c1b..f61a154 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1031,7 +1031,7 @@ again:
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                                struct file *file, unsigned long vm_flags,
                                struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
-                               const char *anon_name)
+                               struct anon_vma_name *anon_name)
 {
        /*
         * VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -1049,7 +1049,7 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
                return 0;
        if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
                return 0;
-       if (!is_same_vma_anon_name(vma, anon_name))
+       if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
                return 0;
        return 1;
 }
@@ -1084,7 +1084,7 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
                     struct anon_vma *anon_vma, struct file *file,
                     pgoff_t vm_pgoff,
                     struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
-                    const char *anon_name)
+                    struct anon_vma_name *anon_name)
 {
        if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
@@ -1106,7 +1106,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
                    struct anon_vma *anon_vma, struct file *file,
                    pgoff_t vm_pgoff,
                    struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
-                   const char *anon_name)
+                   struct anon_vma_name *anon_name)
 {
        if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
@@ -1167,7 +1167,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        struct anon_vma *anon_vma, struct file *file,
                        pgoff_t pgoff, struct mempolicy *policy,
                        struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
-                       const char *anon_name)
+                       struct anon_vma_name *anon_name)
 {
        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
        struct vm_area_struct *area, *next;
@@ -3256,7 +3256,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                return NULL;    /* should never get here */
        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
                            vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-                           vma->vm_userfaultfd_ctx, vma_anon_name(vma));
+                           vma->vm_userfaultfd_ctx, anon_vma_name(vma));
        if (new_vma) {
                /*
                 * Source vma may have been merged into new_vma
index 5ca3fbc..2887644 100644 (file)
@@ -464,7 +464,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *pprev = vma_merge(mm, *pprev, start, end, newflags,
                           vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-                          vma->vm_userfaultfd_ctx, vma_anon_name(vma));
+                          vma->vm_userfaultfd_ctx, anon_vma_name(vma));
        if (*pprev) {
                vma = *pprev;
                VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
index 8d41042..ee67164 100644 (file)
@@ -478,7 +478,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
                 * in swap_map, but not yet added its page to swap cache.
                 */
-               cond_resched();
+               schedule_timeout_uninterruptible(1);
        }
 
        /*
index 7e43369..d310208 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -587,8 +587,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
                return ret;
 
        /* Don't even allow crazy sizes */
-       if (WARN_ON_ONCE(size > INT_MAX))
+       if (unlikely(size > INT_MAX)) {
+               WARN_ON_ONCE(!(flags & __GFP_NOWARN));
                return NULL;
+       }
 
        return __vmalloc_node(size, 1, flags, node,
                        __builtin_return_address(0));
index 08bf6c8..7825c12 100644 (file)
@@ -280,7 +280,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
                const struct vlan_priority_tci_mapping *mp
                        = vlan->egress_priority_map[i];
                while (mp) {
-                       seq_printf(seq, "%u:%hu ",
+                       seq_printf(seq, "%u:%d ",
                                   mp->priority, ((mp->vlan_qos >> 13) & 0x7));
                        mp = mp->next;
                }
index eb9fb55..01f8067 100644 (file)
@@ -281,9 +281,9 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
                                ref = priv->rings[i].intf->ref[j];
                                gnttab_end_foreign_access(ref, 0, 0);
                        }
-                       free_pages((unsigned long)priv->rings[i].data.in,
-                                  priv->rings[i].intf->ring_order -
-                                  (PAGE_SHIFT - XEN_PAGE_SHIFT));
+                       free_pages_exact(priv->rings[i].data.in,
+                                  1UL << (priv->rings[i].intf->ring_order +
+                                          XEN_PAGE_SHIFT));
                }
                gnttab_end_foreign_access(priv->rings[i].ref, 0, 0);
                free_page((unsigned long)priv->rings[i].intf);
@@ -322,8 +322,8 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
        if (ret < 0)
                goto out;
        ring->ref = ret;
-       bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                       order - (PAGE_SHIFT - XEN_PAGE_SHIFT));
+       bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT),
+                                 GFP_KERNEL | __GFP_ZERO);
        if (!bytes) {
                ret = -ENOMEM;
                goto out;
@@ -354,9 +354,7 @@ out:
        if (bytes) {
                for (i--; i >= 0; i--)
                        gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
-               free_pages((unsigned long)bytes,
-                          ring->intf->ring_order -
-                          (PAGE_SHIFT - XEN_PAGE_SHIFT));
+               free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
        }
        gnttab_end_foreign_access(ring->ref, 0, 0);
        free_page((unsigned long)ring->intf);
index d53cbb4..6bd0971 100644 (file)
@@ -87,6 +87,13 @@ again:
        ax25_for_each(s, &ax25_list) {
                if (s->ax25_dev == ax25_dev) {
                        sk = s->sk;
+                       if (!sk) {
+                               spin_unlock_bh(&ax25_list_lock);
+                               s->ax25_dev = NULL;
+                               ax25_disconnect(s, ENETUNREACH);
+                               spin_lock_bh(&ax25_list_lock);
+                               goto again;
+                       }
                        sock_hold(sk);
                        spin_unlock_bh(&ax25_list_lock);
                        lock_sock(sk);
index 337e20b..7f8a14d 100644 (file)
@@ -444,7 +444,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac,
        batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
                           skb->len + ETH_HLEN);
 
-       netif_rx_any_context(skb);
+       netif_rx(skb);
 out:
        batadv_hardif_put(primary_if);
 }
index 8e8c075..215af9b 100644 (file)
@@ -240,7 +240,7 @@ static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
        if (!skb_cp)
                return NET_RX_DROP;
 
-       return netif_rx_ni(skb_cp);
+       return netif_rx(skb_cp);
 }
 
 static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
index ee31977..a0cb2e3 100644 (file)
@@ -568,7 +568,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
 EXPORT_SYMBOL(bt_sock_wait_state);
 
 /* This function expects the sk lock to be held when called */
-int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
+int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags)
 {
        DECLARE_WAITQUEUE(wait, current);
        unsigned long timeo;
@@ -576,7 +576,7 @@ int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
 
        BT_DBG("sk %p", sk);
 
-       timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
+       timeo = sock_sndtimeo(sk, !!(msg_flags & MSG_DONTWAIT));
 
        add_wait_queue(sk_sleep(sk), &wait);
        set_current_state(TASK_INTERRUPTIBLE);
index 40baa6b..5a6a498 100644 (file)
@@ -400,7 +400,7 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
        dev->stats.rx_packets++;
        nskb->ip_summed = CHECKSUM_NONE;
        nskb->protocol  = eth_type_trans(nskb, dev);
-       netif_rx_ni(nskb);
+       netif_rx(nskb);
        return 0;
 
 badframe:
index d106511..84312c8 100644 (file)
@@ -669,7 +669,9 @@ static void le_conn_timeout(struct work_struct *work)
        if (conn->role == HCI_ROLE_SLAVE) {
                /* Disable LE Advertising */
                le_disable_advertising(hdev);
+               hci_dev_lock(hdev);
                hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
+               hci_dev_unlock(hdev);
                return;
        }
 
index 4888c1f..abaabfa 100644 (file)
@@ -5453,8 +5453,9 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
-       if (hcon) {
+       if (hcon && hcon->type == AMP_LINK) {
                hcon->state = BT_CLOSED;
+               hci_disconn_cfm(hcon, ev->reason);
                hci_conn_del(hcon);
        }
 
index e31d115..8f4c569 100644 (file)
@@ -276,40 +276,37 @@ EXPORT_SYMBOL(__hci_cmd_sync_status);
 static void hci_cmd_sync_work(struct work_struct *work)
 {
        struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
-       struct hci_cmd_sync_work_entry *entry;
-       hci_cmd_sync_work_func_t func;
-       hci_cmd_sync_work_destroy_t destroy;
-       void *data;
 
        bt_dev_dbg(hdev, "");
 
-       mutex_lock(&hdev->cmd_sync_work_lock);
-       entry = list_first_entry(&hdev->cmd_sync_work_list,
-                                struct hci_cmd_sync_work_entry, list);
-       if (entry) {
-               list_del(&entry->list);
-               func = entry->func;
-               data = entry->data;
-               destroy = entry->destroy;
-               kfree(entry);
-       } else {
-               func = NULL;
-               data = NULL;
-               destroy = NULL;
-       }
-       mutex_unlock(&hdev->cmd_sync_work_lock);
+       /* Dequeue all entries and run them */
+       while (1) {
+               struct hci_cmd_sync_work_entry *entry;
 
-       if (func) {
-               int err;
+               mutex_lock(&hdev->cmd_sync_work_lock);
+               entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
+                                                struct hci_cmd_sync_work_entry,
+                                                list);
+               if (entry)
+                       list_del(&entry->list);
+               mutex_unlock(&hdev->cmd_sync_work_lock);
+
+               if (!entry)
+                       break;
 
-               hci_req_sync_lock(hdev);
+               bt_dev_dbg(hdev, "entry %p", entry);
 
-               err = func(hdev, data);
+               if (entry->func) {
+                       int err;
 
-               if (destroy)
-                       destroy(hdev, data, err);
+                       hci_req_sync_lock(hdev);
+                       err = entry->func(hdev, entry->data);
+                       if (entry->destroy)
+                               entry->destroy(hdev, entry->data, err);
+                       hci_req_sync_unlock(hdev);
+               }
 
-               hci_req_sync_unlock(hdev);
+               kfree(entry);
        }
 }
 
@@ -2812,6 +2809,9 @@ static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
                return 0;
 
+       if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
+               return 0;
+
        memset(&cp, 0, sizeof(cp));
        cp.flt_type = flt_type;
 
@@ -2832,6 +2832,13 @@ static int hci_clear_event_filter_sync(struct hci_dev *hdev)
        if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
                return 0;
 
+       /* In theory the state machine should not reach here unless
+        * a hci_set_event_filter_sync() call succeeds, but we do
+        * the check both for parity and as a future reminder.
+        */
+       if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
+               return 0;
+
        return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
                                         BDADDR_ANY, 0x00);
 }
@@ -4831,6 +4838,12 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev)
        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
                return 0;
 
+       /* Some fake CSR controllers lock up after setting this type of
+        * filter, so avoid sending the request altogether.
+        */
+       if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
+               return 0;
+
        /* Always clear event filter when starting */
        hci_clear_event_filter_sync(hdev);
 
index 8df99c0..ae78490 100644 (file)
@@ -1444,7 +1444,6 @@ static void l2cap_ecred_connect(struct l2cap_chan *chan)
        data.pdu.scid[0]     = cpu_to_le16(chan->scid);
 
        chan->ident = l2cap_get_ident(conn);
-       data.pid = chan->ops->get_peer_pid(chan);
 
        data.count = 1;
        data.chan = chan;
index 68ce7dc..d2d3905 100644 (file)
@@ -4545,9 +4545,9 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
                }
        }
 
-done:
        hci_dev_unlock(hdev);
 
+done:
        if (status == MGMT_STATUS_SUCCESS)
                device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
                                     supported_flags, current_flags);
@@ -7955,7 +7955,7 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
                return false;
 
        /* Make sure that the data is correctly formatted. */
-       for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
+       for (i = 0; i < len; i += (cur_len + 1)) {
                cur_len = data[i];
 
                if (!cur_len)
@@ -9628,17 +9628,44 @@ void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
                   NULL);
 }
 
+static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
+                                              struct sk_buff *skb,
+                                              struct sock *skip_sk,
+                                              u16 handle)
+{
+       struct sk_buff *advmon_skb;
+       size_t advmon_skb_len;
+       __le16 *monitor_handle;
+
+       if (!skb)
+               return;
+
+       advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
+                         sizeof(struct mgmt_ev_device_found)) + skb->len;
+       advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
+                                   advmon_skb_len);
+       if (!advmon_skb)
+               return;
+
+       /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
+        * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
+        * store monitor_handle of the matched monitor.
+        */
+       monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
+       *monitor_handle = cpu_to_le16(handle);
+       skb_put_data(advmon_skb, skb->data, skb->len);
+
+       mgmt_event_skb(advmon_skb, skip_sk);
+}
+
 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
                                          bdaddr_t *bdaddr, bool report_device,
                                          struct sk_buff *skb,
                                          struct sock *skip_sk)
 {
-       struct sk_buff *advmon_skb;
-       size_t advmon_skb_len;
-       __le16 *monitor_handle;
        struct monitored_device *dev, *tmp;
        bool matched = false;
-       bool notify = false;
+       bool notified = false;
 
        /* We have received the Advertisement Report because:
         * 1. the kernel has initiated active discovery
@@ -9660,25 +9687,6 @@ static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
                return;
        }
 
-       advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
-                         sizeof(struct mgmt_ev_device_found)) + skb->len;
-       advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
-                                   advmon_skb_len);
-       if (!advmon_skb) {
-               if (report_device)
-                       mgmt_event_skb(skb, skip_sk);
-               else
-                       kfree_skb(skb);
-               return;
-       }
-
-       /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
-        * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
-        * store monitor_handle of the matched monitor.
-        */
-       monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
-       skb_put_data(advmon_skb, skb->data, skb->len);
-
        hdev->advmon_pend_notify = false;
 
        list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
@@ -9686,8 +9694,10 @@ static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
                        matched = true;
 
                        if (!dev->notified) {
-                               *monitor_handle = cpu_to_le16(dev->handle);
-                               notify = true;
+                               mgmt_send_adv_monitor_device_found(hdev, skb,
+                                                                  skip_sk,
+                                                                  dev->handle);
+                               notified = true;
                                dev->notified = true;
                        }
                }
@@ -9697,25 +9707,19 @@ static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
        }
 
        if (!report_device &&
-           ((matched && !notify) || !msft_monitor_supported(hdev))) {
+           ((matched && !notified) || !msft_monitor_supported(hdev))) {
                /* Handle 0 indicates that we are not active scanning and this
                 * is a subsequent advertisement report for an already matched
                 * Advertisement Monitor or the controller offloading support
                 * is not available.
                 */
-               *monitor_handle = 0;
-               notify = true;
+               mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
        }
 
        if (report_device)
                mgmt_event_skb(skb, skip_sk);
        else
                kfree_skb(skb);
-
-       if (notify)
-               mgmt_event_skb(advmon_skb, skip_sk);
-       else
-               kfree_skb(advmon_skb);
 }
 
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
index 9a3d77d..f439945 100644 (file)
@@ -330,12 +330,13 @@ static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
                /* Do not free the monitor if it is being removed due to
                 * suspend. It will be re-monitored on resume.
                 */
-               if (monitor && !msft->suspending)
+               if (monitor && !msft->suspending) {
                        hci_free_adv_monitor(hdev, monitor);
 
-               /* Clear any monitored devices by this Adv Monitor */
-               msft_monitor_device_del(hdev, handle_data->mgmt_handle, NULL,
-                                       0, false);
+                       /* Clear any monitored devices by this Adv Monitor */
+                       msft_monitor_device_del(hdev, handle_data->mgmt_handle,
+                                               NULL, 0, false);
+               }
 
                list_del(&handle_data->list);
                kfree(handle_data);
@@ -522,6 +523,16 @@ int msft_resume_sync(struct hci_dev *hdev)
        if (!msft || !msft_monitor_supported(hdev))
                return 0;
 
+       hci_dev_lock(hdev);
+
+       /* Clear already tracked devices on resume. Once the monitors are
+        * reregistered, devices in range will be found again after resume.
+        */
+       hdev->advmon_pend_notify = false;
+       msft_monitor_device_del(hdev, 0, NULL, 0, true);
+
+       hci_dev_unlock(hdev);
+
        msft->resuming = true;
 
        while (1) {
index 7fb9a02..24bd1c0 100644 (file)
@@ -20,7 +20,7 @@ obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
 
 bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o br_multicast_eht.o
 
-bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o br_vlan_options.o
+bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o br_vlan_options.o br_mst.o
 
 bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o
 
index b1dea3f..96e91d6 100644 (file)
@@ -265,6 +265,9 @@ int br_boolopt_toggle(struct net_bridge *br, enum br_boolopt_id opt, bool on,
        case BR_BOOLOPT_MCAST_VLAN_SNOOPING:
                err = br_multicast_toggle_vlan_snooping(br, on, extack);
                break;
+       case BR_BOOLOPT_MST_ENABLE:
+               err = br_mst_set_enabled(br, on, extack);
+               break;
        default:
                /* shouldn't be called with unsupported options */
                WARN_ON(1);
@@ -281,6 +284,8 @@ int br_boolopt_get(const struct net_bridge *br, enum br_boolopt_id opt)
                return br_opt_get(br, BROPT_NO_LL_LEARN);
        case BR_BOOLOPT_MCAST_VLAN_SNOOPING:
                return br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED);
+       case BR_BOOLOPT_MST_ENABLE:
+               return br_opt_get(br, BROPT_MST_ENABLED);
        default:
                /* shouldn't be called with unsupported options */
                WARN_ON(1);
index e0c13fc..1964178 100644 (file)
@@ -78,13 +78,22 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
        u16 vid = 0;
        u8 state;
 
-       if (!p || p->state == BR_STATE_DISABLED)
+       if (!p)
                goto drop;
 
        br = p->br;
+
+       if (br_mst_is_enabled(br)) {
+               state = BR_STATE_FORWARDING;
+       } else {
+               if (p->state == BR_STATE_DISABLED)
+                       goto drop;
+
+               state = p->state;
+       }
+
        brmctx = &p->br->multicast_ctx;
        pmctx = &p->multicast_ctx;
-       state = p->state;
        if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid,
                                &state, &vlan))
                goto out;
@@ -370,9 +379,13 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                return RX_HANDLER_PASS;
 
 forward:
+       if (br_mst_is_enabled(p->br))
+               goto defer_stp_filtering;
+
        switch (p->state) {
        case BR_STATE_FORWARDING:
        case BR_STATE_LEARNING:
+defer_stp_filtering:
                if (ether_addr_equal(p->br->dev->dev_addr, dest))
                        skb->pkt_type = PACKET_HOST;
 
diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
new file mode 100644 (file)
index 0000000..ee680ad
--- /dev/null
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *     Bridge Multiple Spanning Tree Support
+ *
+ *     Authors:
+ *     Tobias Waldekranz               <tobias@waldekranz.com>
+ */
+
+#include <linux/kernel.h>
+#include <net/switchdev.h>
+
+#include "br_private.h"
+
+DEFINE_STATIC_KEY_FALSE(br_mst_used);
+
+bool br_mst_enabled(const struct net_device *dev)
+{
+       if (!netif_is_bridge_master(dev))
+               return false;
+
+       return br_opt_get(netdev_priv(dev), BROPT_MST_ENABLED);
+}
+EXPORT_SYMBOL_GPL(br_mst_enabled);
+
+int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids)
+{
+       const struct net_bridge_vlan_group *vg;
+       const struct net_bridge_vlan *v;
+       const struct net_bridge *br;
+
+       ASSERT_RTNL();
+
+       if (!netif_is_bridge_master(dev))
+               return -EINVAL;
+
+       br = netdev_priv(dev);
+       if (!br_opt_get(br, BROPT_MST_ENABLED))
+               return -EINVAL;
+
+       vg = br_vlan_group(br);
+
+       list_for_each_entry(v, &vg->vlan_list, vlist) {
+               if (v->msti == msti)
+                       __set_bit(v->vid, vids);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(br_mst_get_info);
+
+int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state)
+{
+       const struct net_bridge_port *p = NULL;
+       const struct net_bridge_vlan_group *vg;
+       const struct net_bridge_vlan *v;
+
+       ASSERT_RTNL();
+
+       p = br_port_get_check_rtnl(dev);
+       if (!p || !br_opt_get(p->br, BROPT_MST_ENABLED))
+               return -EINVAL;
+
+       vg = nbp_vlan_group(p);
+
+       list_for_each_entry(v, &vg->vlan_list, vlist) {
+               if (v->brvlan->msti == msti) {
+                       *state = v->state;
+                       return 0;
+               }
+       }
+
+       return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(br_mst_get_state);
+
+static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v,
+                                 u8 state)
+{
+       struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
+
+       if (v->state == state)
+               return;
+
+       br_vlan_set_state(v, state);
+
+       if (v->vid == vg->pvid)
+               br_vlan_set_pvid_state(vg, state);
+}
+
+int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+                    struct netlink_ext_ack *extack)
+{
+       struct switchdev_attr attr = {
+               .id = SWITCHDEV_ATTR_ID_PORT_MST_STATE,
+               .orig_dev = p->dev,
+               .u.mst_state = {
+                       .msti = msti,
+                       .state = state,
+               },
+       };
+       struct net_bridge_vlan_group *vg;
+       struct net_bridge_vlan *v;
+       int err;
+
+       vg = nbp_vlan_group(p);
+       if (!vg)
+               return 0;
+
+       /* MSTI 0 (CST) state changes are notified via the regular
+        * SWITCHDEV_ATTR_ID_PORT_STP_STATE.
+        */
+       if (msti) {
+               err = switchdev_port_attr_set(p->dev, &attr, extack);
+               if (err && err != -EOPNOTSUPP)
+                       return err;
+       }
+
+       list_for_each_entry(v, &vg->vlan_list, vlist) {
+               if (v->brvlan->msti != msti)
+                       continue;
+
+               br_mst_vlan_set_state(p, v, state);
+       }
+
+       return 0;
+}
+
+static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
+{
+       struct net_bridge_vlan_group *vg = nbp_vlan_group(pv->port);
+       struct net_bridge_vlan *v;
+
+       list_for_each_entry(v, &vg->vlan_list, vlist) {
+               /* If this port already has a defined state in this
+                * MSTI (through some other VLAN membership), inherit
+                * it.
+                */
+               if (v != pv && v->brvlan->msti == msti) {
+                       br_mst_vlan_set_state(pv->port, pv, v->state);
+                       return;
+               }
+       }
+
+       /* Otherwise, start out in a new MSTI with all ports disabled. */
+       return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED);
+}
+
+int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti)
+{
+       struct switchdev_attr attr = {
+               .id = SWITCHDEV_ATTR_ID_VLAN_MSTI,
+               .orig_dev = mv->br->dev,
+               .u.vlan_msti = {
+                       .vid = mv->vid,
+                       .msti = msti,
+               },
+       };
+       struct net_bridge_vlan_group *vg;
+       struct net_bridge_vlan *pv;
+       struct net_bridge_port *p;
+       int err;
+
+       if (mv->msti == msti)
+               return 0;
+
+       err = switchdev_port_attr_set(mv->br->dev, &attr, NULL);
+       if (err && err != -EOPNOTSUPP)
+               return err;
+
+       mv->msti = msti;
+
+       list_for_each_entry(p, &mv->br->port_list, list) {
+               vg = nbp_vlan_group(p);
+
+               pv = br_vlan_find(vg, mv->vid);
+               if (pv)
+                       br_mst_vlan_sync_state(pv, msti);
+       }
+
+       return 0;
+}
+
+void br_mst_vlan_init_state(struct net_bridge_vlan *v)
+{
+       /* VLANs always start out in MSTI 0 (CST) */
+       v->msti = 0;
+
+       if (br_vlan_is_master(v))
+               v->state = BR_STATE_FORWARDING;
+       else
+               v->state = v->port->state;
+}
+
+int br_mst_set_enabled(struct net_bridge *br, bool on,
+                      struct netlink_ext_ack *extack)
+{
+       struct switchdev_attr attr = {
+               .id = SWITCHDEV_ATTR_ID_BRIDGE_MST,
+               .orig_dev = br->dev,
+               .u.mst = on,
+       };
+       struct net_bridge_vlan_group *vg;
+       struct net_bridge_port *p;
+       int err;
+
+       list_for_each_entry(p, &br->port_list, list) {
+               vg = nbp_vlan_group(p);
+
+               if (!vg->num_vlans)
+                       continue;
+
+               NL_SET_ERR_MSG(extack,
+                              "MST mode can't be changed while VLANs exist");
+               return -EBUSY;
+       }
+
+       if (br_opt_get(br, BROPT_MST_ENABLED) == on)
+               return 0;
+
+       err = switchdev_port_attr_set(br->dev, &attr, extack);
+       if (err && err != -EOPNOTSUPP)
+               return err;
+
+       if (on)
+               static_branch_enable(&br_mst_used);
+       else
+               static_branch_disable(&br_mst_used);
+
+       br_opt_toggle(br, BROPT_MST_ENABLED, on);
+       return 0;
+}
+
+size_t br_mst_info_size(const struct net_bridge_vlan_group *vg)
+{
+       DECLARE_BITMAP(seen, VLAN_N_VID) = { 0 };
+       const struct net_bridge_vlan *v;
+       size_t sz;
+
+       /* IFLA_BRIDGE_MST */
+       sz = nla_total_size(0);
+
+       list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
+               if (test_bit(v->brvlan->msti, seen))
+                       continue;
+
+               /* IFLA_BRIDGE_MST_ENTRY */
+               sz += nla_total_size(0) +
+                       /* IFLA_BRIDGE_MST_ENTRY_MSTI */
+                       nla_total_size(sizeof(u16)) +
+                       /* IFLA_BRIDGE_MST_ENTRY_STATE */
+                       nla_total_size(sizeof(u8));
+
+               __set_bit(v->brvlan->msti, seen);
+       }
+
+       return sz;
+}
+
+int br_mst_fill_info(struct sk_buff *skb,
+                    const struct net_bridge_vlan_group *vg)
+{
+       DECLARE_BITMAP(seen, VLAN_N_VID) = { 0 };
+       const struct net_bridge_vlan *v;
+       struct nlattr *nest;
+       int err = 0;
+
+       list_for_each_entry(v, &vg->vlan_list, vlist) {
+               if (test_bit(v->brvlan->msti, seen))
+                       continue;
+
+               nest = nla_nest_start_noflag(skb, IFLA_BRIDGE_MST_ENTRY);
+               if (!nest ||
+                   nla_put_u16(skb, IFLA_BRIDGE_MST_ENTRY_MSTI, v->brvlan->msti) ||
+                   nla_put_u8(skb, IFLA_BRIDGE_MST_ENTRY_STATE, v->state)) {
+                       err = -EMSGSIZE;
+                       break;
+               }
+               nla_nest_end(skb, nest);
+
+               __set_bit(v->brvlan->msti, seen);
+       }
+
+       return err;
+}
+
+static const struct nla_policy br_mst_nl_policy[IFLA_BRIDGE_MST_ENTRY_MAX + 1] = {
+       [IFLA_BRIDGE_MST_ENTRY_MSTI] = NLA_POLICY_RANGE(NLA_U16,
+                                                  1, /* 0 reserved for CST */
+                                                  VLAN_N_VID - 1),
+       [IFLA_BRIDGE_MST_ENTRY_STATE] = NLA_POLICY_RANGE(NLA_U8,
+                                                   BR_STATE_DISABLED,
+                                                   BR_STATE_BLOCKING),
+};
+
+static int br_mst_process_one(struct net_bridge_port *p,
+                             const struct nlattr *attr,
+                             struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[IFLA_BRIDGE_MST_ENTRY_MAX + 1];
+       u16 msti;
+       u8 state;
+       int err;
+
+       err = nla_parse_nested(tb, IFLA_BRIDGE_MST_ENTRY_MAX, attr,
+                              br_mst_nl_policy, extack);
+       if (err)
+               return err;
+
+       if (!tb[IFLA_BRIDGE_MST_ENTRY_MSTI]) {
+               NL_SET_ERR_MSG_MOD(extack, "MSTI not specified");
+               return -EINVAL;
+       }
+
+       if (!tb[IFLA_BRIDGE_MST_ENTRY_STATE]) {
+               NL_SET_ERR_MSG_MOD(extack, "State not specified");
+               return -EINVAL;
+       }
+
+       msti = nla_get_u16(tb[IFLA_BRIDGE_MST_ENTRY_MSTI]);
+       state = nla_get_u8(tb[IFLA_BRIDGE_MST_ENTRY_STATE]);
+
+       return br_mst_set_state(p, msti, state, extack);
+}
+
+int br_mst_process(struct net_bridge_port *p, const struct nlattr *mst_attr,
+                  struct netlink_ext_ack *extack)
+{
+       struct nlattr *attr;
+       int err, msts = 0;
+       int rem;
+
+       if (!br_opt_get(p->br, BROPT_MST_ENABLED)) {
+               NL_SET_ERR_MSG_MOD(extack, "Can't modify MST state when MST is disabled");
+               return -EBUSY;
+       }
+
+       nla_for_each_nested(attr, mst_attr, rem) {
+               switch (nla_type(attr)) {
+               case IFLA_BRIDGE_MST_ENTRY:
+                       err = br_mst_process_one(p, attr, extack);
+                       break;
+               default:
+                       continue;
+               }
+
+               msts++;
+               if (err)
+                       break;
+       }
+
+       if (!msts) {
+               NL_SET_ERR_MSG_MOD(extack, "Found no MST entries to process");
+               err = -EINVAL;
+       }
+
+       return err;
+}
index 7d4432c..2044724 100644 (file)
@@ -119,6 +119,9 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
        /* Each VLAN is returned in bridge_vlan_info along with flags */
        vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
 
+       if (vg && (filter_mask & RTEXT_FILTER_MST))
+               vinfo_sz += br_mst_info_size(vg);
+
        if (!(filter_mask & RTEXT_FILTER_CFM_STATUS))
                return vinfo_sz;
 
@@ -485,7 +488,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                           RTEXT_FILTER_BRVLAN_COMPRESSED |
                           RTEXT_FILTER_MRP |
                           RTEXT_FILTER_CFM_CONFIG |
-                          RTEXT_FILTER_CFM_STATUS)) {
+                          RTEXT_FILTER_CFM_STATUS |
+                          RTEXT_FILTER_MST)) {
                af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
                if (!af)
                        goto nla_put_failure;
@@ -564,7 +568,28 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                nla_nest_end(skb, cfm_nest);
        }
 
+       if ((filter_mask & RTEXT_FILTER_MST) &&
+           br_opt_get(br, BROPT_MST_ENABLED) && port) {
+               const struct net_bridge_vlan_group *vg = nbp_vlan_group(port);
+               struct nlattr *mst_nest;
+               int err;
+
+               if (!vg || !vg->num_vlans)
+                       goto done;
+
+               mst_nest = nla_nest_start(skb, IFLA_BRIDGE_MST);
+               if (!mst_nest)
+                       goto nla_put_failure;
+
+               err = br_mst_fill_info(skb, vg);
+               if (err)
+                       goto nla_put_failure;
+
+               nla_nest_end(skb, mst_nest);
+       }
+
 done:
+
        if (af)
                nla_nest_end(skb, af);
        nlmsg_end(skb, nlh);
@@ -803,6 +828,23 @@ static int br_afspec(struct net_bridge *br,
                        if (err)
                                return err;
                        break;
+               case IFLA_BRIDGE_MST:
+                       if (!p) {
+                               NL_SET_ERR_MSG(extack,
+                                              "MST states can only be set on bridge ports");
+                               return -EINVAL;
+                       }
+
+                       if (cmd != RTM_SETLINK) {
+                               NL_SET_ERR_MSG(extack,
+                                              "MST states can only be set through RTM_SETLINK");
+                               return -EINVAL;
+                       }
+
+                       err = br_mst_process(p, attr, extack);
+                       if (err)
+                               return err;
+                       break;
                }
        }
 
index 48bc61e..18ccc3d 100644 (file)
@@ -178,6 +178,7 @@ enum {
  * @br_mcast_ctx: if MASTER flag set, this is the global vlan multicast context
  * @port_mcast_ctx: if MASTER flag unset, this is the per-port/vlan multicast
  *                  context
+ * @msti: if MASTER flag set, this holds the VLANs MST instance
  * @vlist: sorted list of VLAN entries
  * @rcu: used for entry destruction
  *
@@ -210,6 +211,8 @@ struct net_bridge_vlan {
                struct net_bridge_mcast_port    port_mcast_ctx;
        };
 
+       u16                             msti;
+
        struct list_head                vlist;
 
        struct rcu_head                 rcu;
@@ -445,6 +448,7 @@ enum net_bridge_opts {
        BROPT_NO_LL_LEARN,
        BROPT_VLAN_BRIDGE_BINDING,
        BROPT_MCAST_VLAN_SNOOPING_ENABLED,
+       BROPT_MST_ENABLED,
 };
 
 struct net_bridge {
@@ -1765,6 +1769,63 @@ static inline bool br_vlan_state_allowed(u8 state, bool learn_allow)
 }
 #endif
 
+/* br_mst.c */
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+DECLARE_STATIC_KEY_FALSE(br_mst_used);
+static inline bool br_mst_is_enabled(struct net_bridge *br)
+{
+       return static_branch_unlikely(&br_mst_used) &&
+               br_opt_get(br, BROPT_MST_ENABLED);
+}
+
+int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+                    struct netlink_ext_ack *extack);
+int br_mst_vlan_set_msti(struct net_bridge_vlan *v, u16 msti);
+void br_mst_vlan_init_state(struct net_bridge_vlan *v);
+int br_mst_set_enabled(struct net_bridge *br, bool on,
+                      struct netlink_ext_ack *extack);
+size_t br_mst_info_size(const struct net_bridge_vlan_group *vg);
+int br_mst_fill_info(struct sk_buff *skb,
+                    const struct net_bridge_vlan_group *vg);
+int br_mst_process(struct net_bridge_port *p, const struct nlattr *mst_attr,
+                  struct netlink_ext_ack *extack);
+#else
+static inline bool br_mst_is_enabled(struct net_bridge *br)
+{
+       return false;
+}
+
+static inline int br_mst_set_state(struct net_bridge_port *p, u16 msti,
+                                  u8 state, struct netlink_ext_ack *extack)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int br_mst_set_enabled(struct net_bridge *br, bool on,
+                                    struct netlink_ext_ack *extack)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline size_t br_mst_info_size(const struct net_bridge_vlan_group *vg)
+{
+       return 0;
+}
+
+static inline int br_mst_fill_info(struct sk_buff *skb,
+                                  const struct net_bridge_vlan_group *vg)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int br_mst_process(struct net_bridge_port *p,
+                                const struct nlattr *mst_attr,
+                                struct netlink_ext_ack *extack)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
 struct nf_br_ops {
        int (*br_dev_xmit_hook)(struct sk_buff *skb);
 };
index 1d80f34..7d27b2e 100644 (file)
@@ -43,6 +43,12 @@ void br_set_state(struct net_bridge_port *p, unsigned int state)
                return;
 
        p->state = state;
+       if (br_opt_get(p->br, BROPT_MST_ENABLED)) {
+               err = br_mst_set_state(p, 0, state, NULL);
+               if (err)
+                       br_warn(p->br, "error setting MST state on port %u(%s)\n",
+                               p->port_no, netdev_name(p->dev));
+       }
        err = switchdev_port_attr_set(p->dev, &attr, NULL);
        if (err && err != -EOPNOTSUPP)
                br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
index 6f6a701..8cc44c3 100644 (file)
@@ -331,6 +331,46 @@ br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
        return err;
 }
 
+static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
+                                        const void *ctx,
+                                        struct notifier_block *nb,
+                                        struct netlink_ext_ack *extack)
+{
+       struct switchdev_notifier_port_attr_info attr_info = {
+               .info = {
+                       .dev = br_dev,
+                       .extack = extack,
+                       .ctx = ctx,
+               },
+       };
+       struct net_bridge *br = netdev_priv(br_dev);
+       struct net_bridge_vlan_group *vg;
+       struct switchdev_attr attr;
+       struct net_bridge_vlan *v;
+       int err;
+
+       attr_info.attr = &attr;
+       attr.orig_dev = br_dev;
+
+       vg = br_vlan_group(br);
+
+       list_for_each_entry(v, &vg->vlan_list, vlist) {
+               if (v->msti) {
+                       attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI;
+                       attr.u.vlan_msti.vid = v->vid;
+                       attr.u.vlan_msti.msti = v->msti;
+
+                       err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET,
+                                               &attr_info);
+                       err = notifier_to_errno(err);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
 static int
 br_switchdev_vlan_replay_one(struct notifier_block *nb,
                             struct net_device *dev,
@@ -425,6 +465,12 @@ static int br_switchdev_vlan_replay(struct net_device *br_dev,
                        return err;
        }
 
+       if (adding) {
+               err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack);
+               if (err)
+                       return err;
+       }
+
        return 0;
 }
 
index 7557e90..0f5e75c 100644 (file)
@@ -226,6 +226,24 @@ static void nbp_vlan_rcu_free(struct rcu_head *rcu)
        kfree(v);
 }
 
+static void br_vlan_init_state(struct net_bridge_vlan *v)
+{
+       struct net_bridge *br;
+
+       if (br_vlan_is_master(v))
+               br = v->br;
+       else
+               br = v->port->br;
+
+       if (br_opt_get(br, BROPT_MST_ENABLED)) {
+               br_mst_vlan_init_state(v);
+               return;
+       }
+
+       v->state = BR_STATE_FORWARDING;
+       v->msti = 0;
+}
+
 /* This is the shared VLAN add function which works for both ports and bridge
  * devices. There are four possible calls to this function in terms of the
  * vlan entry type:
@@ -322,7 +340,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
        }
 
        /* set the state before publishing */
-       v->state = BR_STATE_FORWARDING;
+       br_vlan_init_state(v);
 
        err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
                                            br_vlan_rht_params);
index a638297..a2724d0 100644 (file)
@@ -99,6 +99,11 @@ static int br_vlan_modify_state(struct net_bridge_vlan_group *vg,
                return -EBUSY;
        }
 
+       if (br_opt_get(br, BROPT_MST_ENABLED)) {
+               NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state directly when MST is enabled");
+               return -EBUSY;
+       }
+
        if (v->state == state)
                return 0;
 
@@ -291,6 +296,7 @@ bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
                                         const struct net_bridge_vlan *r_end)
 {
        return v_curr->vid - r_end->vid == 1 &&
+               v_curr->msti == r_end->msti &&
               ((v_curr->priv_flags ^ r_end->priv_flags) &
                BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 &&
                br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx,
@@ -379,6 +385,9 @@ bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
 #endif
 #endif
 
+       if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_MSTI, v_opts->msti))
+               goto out_err;
+
        nla_nest_end(skb, nest);
 
        return true;
@@ -410,6 +419,7 @@ static size_t rtnl_vlan_global_opts_nlmsg_size(const struct net_bridge_vlan *v)
                + nla_total_size(0) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */
                + br_rports_size(&v->br_mcast_ctx) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */
 #endif
+               + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_GOPTS_MSTI */
                + nla_total_size(sizeof(u16)); /* BRIDGE_VLANDB_GOPTS_RANGE */
 }
 
@@ -559,6 +569,15 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
        }
 #endif
 #endif
+       if (tb[BRIDGE_VLANDB_GOPTS_MSTI]) {
+               u16 msti;
+
+               msti = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_MSTI]);
+               err = br_mst_vlan_set_msti(v, msti);
+               if (err)
+                       return err;
+               *changed = true;
+       }
 
        return 0;
 }
@@ -578,6 +597,7 @@ static const struct nla_policy br_vlan_db_gpol[BRIDGE_VLANDB_GOPTS_MAX + 1] = {
        [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]       = { .type = NLA_U64 },
        [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
        [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
+       [BRIDGE_VLANDB_GOPTS_MSTI] = NLA_POLICY_MAX(NLA_U16, VLAN_N_VID - 1),
 };
 
 int br_vlan_rtm_process_global_options(struct net_device *dev,
index ebfb2a5..7324296 100644 (file)
@@ -381,7 +381,7 @@ static unsigned int nf_ct_bridge_confirm(struct sk_buff *skb)
                protoff = skb_network_offset(skb) + ip_hdrlen(skb);
                break;
        case htons(ETH_P_IPV6): {
-                unsigned char pnum = ipv6_hdr(skb)->nexthdr;
+               unsigned char pnum = ipv6_hdr(skb)->nexthdr;
                __be16 frag_off;
 
                protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
index c1ef9cc..8c3eaba 100644 (file)
@@ -87,6 +87,7 @@ static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
                return nft_meta_get_init(ctx, expr, tb);
        }
 
+       priv->len = len;
        return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
                                        NULL, NFT_DATA_VALUE, len);
 }
@@ -98,6 +99,7 @@ static const struct nft_expr_ops nft_meta_bridge_get_ops = {
        .eval           = nft_meta_bridge_get_eval,
        .init           = nft_meta_bridge_get_init,
        .dump           = nft_meta_get_dump,
+       .reduce         = nft_meta_get_reduce,
 };
 
 static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track,
@@ -112,8 +114,7 @@ static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track,
                if (track->regs[i].selector->ops != &nft_meta_bridge_get_ops)
                        continue;
 
-               track->regs[i].selector = NULL;
-               track->regs[i].bitwise = NULL;
+               __nft_reg_track_cancel(track, i);
        }
 
        return false;
index fbf858d..71b54fe 100644 (file)
@@ -185,6 +185,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = {
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
        .validate       = nft_reject_bridge_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
index cce2af1..1fb49d5 100644 (file)
@@ -284,7 +284,7 @@ int can_send(struct sk_buff *skb, int loop)
        }
 
        if (newskb)
-               netif_rx_ni(newskb);
+               netif_rx(newskb);
 
        /* update statistics */
        pkg_stats->tx_frames++;
index d2a430b..f6f8ba1 100644 (file)
@@ -14,7 +14,6 @@
  * - use CAN_ISOTP_WAIT_TX_DONE flag to block the caller until the PDU is sent
  * - as we have static buffers the check whether the PDU fits into the buffer
  *   is done at FF reception time (no support for sending 'wait frames')
- * - take care of the tx-queue-len as traffic shaping is still on the TODO list
  *
  * Copyright (c) 2020 Volkswagen Group Electronic Research
  * All rights reserved.
@@ -87,9 +86,9 @@ MODULE_ALIAS("can-proto-6");
 /* ISO 15765-2:2016 supports more than 4095 byte per ISO PDU as the FF_DL can
  * take full 32 bit values (4 Gbyte). We would need some good concept to handle
  * this between user space and kernel space. For now increase the static buffer
- * to something about 8 kbyte to be able to test this new functionality.
+ * to something about 64 kbyte to be able to test this new functionality.
  */
-#define MAX_MSG_LENGTH 8200
+#define MAX_MSG_LENGTH 66000
 
 /* N_PCI type values in bits 7-4 of N_PCI bytes */
 #define N_PCI_SF 0x00  /* single frame */
@@ -141,8 +140,10 @@ struct isotp_sock {
        struct can_isotp_options opt;
        struct can_isotp_fc_options rxfc, txfc;
        struct can_isotp_ll_options ll;
+       u32 frame_txtime;
        u32 force_tx_stmin;
        u32 force_rx_stmin;
+       u32 cfecho; /* consecutive frame echo tag */
        struct tpcon rx, tx;
        struct list_head notifier;
        wait_queue_head_t wait;
@@ -360,7 +361,7 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
 
                so->tx_gap = ktime_set(0, 0);
                /* add transmission time for CAN frame N_As */
-               so->tx_gap = ktime_add_ns(so->tx_gap, so->opt.frame_txtime);
+               so->tx_gap = ktime_add_ns(so->tx_gap, so->frame_txtime);
                /* add waiting time for consecutive frames N_Cs */
                if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN)
                        so->tx_gap = ktime_add_ns(so->tx_gap,
@@ -712,6 +713,63 @@ static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so,
                cf->data[0] = so->opt.ext_address;
 }
 
+static void isotp_send_cframe(struct isotp_sock *so)
+{
+       struct sock *sk = &so->sk;
+       struct sk_buff *skb;
+       struct net_device *dev;
+       struct canfd_frame *cf;
+       int can_send_ret;
+       int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
+
+       dev = dev_get_by_index(sock_net(sk), so->ifindex);
+       if (!dev)
+               return;
+
+       skb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv), GFP_ATOMIC);
+       if (!skb) {
+               dev_put(dev);
+               return;
+       }
+
+       can_skb_reserve(skb);
+       can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
+
+       cf = (struct canfd_frame *)skb->data;
+       skb_put_zero(skb, so->ll.mtu);
+
+       /* create consecutive frame */
+       isotp_fill_dataframe(cf, so, ae, 0);
+
+       /* place consecutive frame N_PCI in appropriate index */
+       cf->data[ae] = N_PCI_CF | so->tx.sn++;
+       so->tx.sn %= 16;
+       so->tx.bs++;
+
+       cf->flags = so->ll.tx_flags;
+
+       skb->dev = dev;
+       can_skb_set_owner(skb, sk);
+
+       /* cfecho should have been zero'ed by init/isotp_rcv_echo() */
+       if (so->cfecho)
+               pr_notice_once("can-isotp: cfecho is %08X != 0\n", so->cfecho);
+
+       /* set consecutive frame echo tag */
+       so->cfecho = *(u32 *)cf->data;
+
+       /* send frame with local echo enabled */
+       can_send_ret = can_send(skb, 1);
+       if (can_send_ret) {
+               pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
+                              __func__, ERR_PTR(can_send_ret));
+               if (can_send_ret == -ENOBUFS)
+                       pr_notice_once("can-isotp: tx queue is full\n");
+       }
+       dev_put(dev);
+}
+
 static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
                                int ae)
 {
@@ -748,19 +806,74 @@ static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
        so->tx.state = ISOTP_WAIT_FIRST_FC;
 }
 
+static void isotp_rcv_echo(struct sk_buff *skb, void *data)
+{
+       struct sock *sk = (struct sock *)data;
+       struct isotp_sock *so = isotp_sk(sk);
+       struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+
+       /* only handle my own local echo skb's */
+       if (skb->sk != sk || so->cfecho != *(u32 *)cf->data)
+               return;
+
+       /* cancel local echo timeout */
+       hrtimer_cancel(&so->txtimer);
+
+       /* local echo skb with consecutive frame has been consumed */
+       so->cfecho = 0;
+
+       if (so->tx.idx >= so->tx.len) {
+               /* we are done */
+               so->tx.state = ISOTP_IDLE;
+               wake_up_interruptible(&so->wait);
+               return;
+       }
+
+       if (so->txfc.bs && so->tx.bs >= so->txfc.bs) {
+               /* stop and wait for FC with timeout */
+               so->tx.state = ISOTP_WAIT_FC;
+               hrtimer_start(&so->txtimer, ktime_set(1, 0),
+                             HRTIMER_MODE_REL_SOFT);
+               return;
+       }
+
+       /* no gap between data frames needed => use burst mode */
+       if (!so->tx_gap) {
+               isotp_send_cframe(so);
+               return;
+       }
+
+       /* start timer to send next consecutive frame with correct delay */
+       hrtimer_start(&so->txtimer, so->tx_gap, HRTIMER_MODE_REL_SOFT);
+}
+
 static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
 {
        struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
                                             txtimer);
        struct sock *sk = &so->sk;
-       struct sk_buff *skb;
-       struct net_device *dev;
-       struct canfd_frame *cf;
        enum hrtimer_restart restart = HRTIMER_NORESTART;
-       int can_send_ret;
-       int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
 
        switch (so->tx.state) {
+       case ISOTP_SENDING:
+
+               /* cfecho should be consumed by isotp_rcv_echo() here */
+               if (!so->cfecho) {
+                       /* start timeout for unlikely lost echo skb */
+                       hrtimer_set_expires(&so->txtimer,
+                                           ktime_add(ktime_get(),
+                                                     ktime_set(2, 0)));
+                       restart = HRTIMER_RESTART;
+
+                       /* push out the next consecutive frame */
+                       isotp_send_cframe(so);
+                       break;
+               }
+
+               /* cfecho has not been cleared in isotp_rcv_echo() */
+               pr_notice_once("can-isotp: cfecho %08X timeout\n", so->cfecho);
+               fallthrough;
+
        case ISOTP_WAIT_FC:
        case ISOTP_WAIT_FIRST_FC:
 
@@ -776,78 +889,6 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
                wake_up_interruptible(&so->wait);
                break;
 
-       case ISOTP_SENDING:
-
-               /* push out the next segmented pdu */
-               dev = dev_get_by_index(sock_net(sk), so->ifindex);
-               if (!dev)
-                       break;
-
-isotp_tx_burst:
-               skb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv),
-                               GFP_ATOMIC);
-               if (!skb) {
-                       dev_put(dev);
-                       break;
-               }
-
-               can_skb_reserve(skb);
-               can_skb_prv(skb)->ifindex = dev->ifindex;
-               can_skb_prv(skb)->skbcnt = 0;
-
-               cf = (struct canfd_frame *)skb->data;
-               skb_put_zero(skb, so->ll.mtu);
-
-               /* create consecutive frame */
-               isotp_fill_dataframe(cf, so, ae, 0);
-
-               /* place consecutive frame N_PCI in appropriate index */
-               cf->data[ae] = N_PCI_CF | so->tx.sn++;
-               so->tx.sn %= 16;
-               so->tx.bs++;
-
-               cf->flags = so->ll.tx_flags;
-
-               skb->dev = dev;
-               can_skb_set_owner(skb, sk);
-
-               can_send_ret = can_send(skb, 1);
-               if (can_send_ret) {
-                       pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
-                                      __func__, ERR_PTR(can_send_ret));
-                       if (can_send_ret == -ENOBUFS)
-                               pr_notice_once("can-isotp: tx queue is full, increasing txqueuelen may prevent this error\n");
-               }
-               if (so->tx.idx >= so->tx.len) {
-                       /* we are done */
-                       so->tx.state = ISOTP_IDLE;
-                       dev_put(dev);
-                       wake_up_interruptible(&so->wait);
-                       break;
-               }
-
-               if (so->txfc.bs && so->tx.bs >= so->txfc.bs) {
-                       /* stop and wait for FC */
-                       so->tx.state = ISOTP_WAIT_FC;
-                       dev_put(dev);
-                       hrtimer_set_expires(&so->txtimer,
-                                           ktime_add(ktime_get(),
-                                                     ktime_set(1, 0)));
-                       restart = HRTIMER_RESTART;
-                       break;
-               }
-
-               /* no gap between data frames needed => use burst mode */
-               if (!so->tx_gap)
-                       goto isotp_tx_burst;
-
-               /* start timer to send next data frame with correct delay */
-               dev_put(dev);
-               hrtimer_set_expires(&so->txtimer,
-                                   ktime_add(ktime_get(), so->tx_gap));
-               restart = HRTIMER_RESTART;
-               break;
-
        default:
                WARN_ON_ONCE(1);
        }
@@ -1005,26 +1046,29 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 {
        struct sock *sk = sock->sk;
        struct sk_buff *skb;
-       int err = 0;
-       int noblock;
+       struct isotp_sock *so = isotp_sk(sk);
+       int noblock = flags & MSG_DONTWAIT;
+       int ret = 0;
 
-       noblock = flags & MSG_DONTWAIT;
-       flags &= ~MSG_DONTWAIT;
+       if (flags & ~(MSG_DONTWAIT | MSG_TRUNC))
+               return -EINVAL;
 
-       skb = skb_recv_datagram(sk, flags, noblock, &err);
+       if (!so->bound)
+               return -EADDRNOTAVAIL;
+
+       flags &= ~MSG_DONTWAIT;
+       skb = skb_recv_datagram(sk, flags, noblock, &ret);
        if (!skb)
-               return err;
+               return ret;
 
        if (size < skb->len)
                msg->msg_flags |= MSG_TRUNC;
        else
                size = skb->len;
 
-       err = memcpy_to_msg(msg, skb->data, size);
-       if (err < 0) {
-               skb_free_datagram(sk, skb);
-               return err;
-       }
+       ret = memcpy_to_msg(msg, skb->data, size);
+       if (ret < 0)
+               goto out_err;
 
        sock_recv_timestamp(msg, sk, skb);
 
@@ -1034,9 +1078,13 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
+       /* set length of return value */
+       ret = (flags & MSG_TRUNC) ? skb->len : size;
+
+out_err:
        skb_free_datagram(sk, skb);
 
-       return size;
+       return ret;
 }
 
 static int isotp_release(struct socket *sock)
@@ -1075,6 +1123,9 @@ static int isotp_release(struct socket *sock)
                                can_rx_unregister(net, dev, so->rxid,
                                                  SINGLE_MASK(so->rxid),
                                                  isotp_rcv, sk);
+                               can_rx_unregister(net, dev, so->txid,
+                                                 SINGLE_MASK(so->txid),
+                                                 isotp_rcv_echo, sk);
                                dev_put(dev);
                                synchronize_rcu();
                        }
@@ -1104,6 +1155,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        struct net *net = sock_net(sk);
        int ifindex;
        struct net_device *dev;
+       canid_t tx_id, rx_id;
        int err = 0;
        int notify_enetdown = 0;
        int do_rx_reg = 1;
@@ -1111,8 +1163,18 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        if (len < ISOTP_MIN_NAMELEN)
                return -EINVAL;
 
-       if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
-               return -EADDRNOTAVAIL;
+       /* sanitize tx/rx CAN identifiers */
+       tx_id = addr->can_addr.tp.tx_id;
+       if (tx_id & CAN_EFF_FLAG)
+               tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
+       else
+               tx_id &= CAN_SFF_MASK;
+
+       rx_id = addr->can_addr.tp.rx_id;
+       if (rx_id & CAN_EFF_FLAG)
+               rx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
+       else
+               rx_id &= CAN_SFF_MASK;
 
        if (!addr->can_ifindex)
                return -ENODEV;
@@ -1124,21 +1186,13 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
                do_rx_reg = 0;
 
        /* do not validate rx address for functional addressing */
-       if (do_rx_reg) {
-               if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) {
-                       err = -EADDRNOTAVAIL;
-                       goto out;
-               }
-
-               if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) {
-                       err = -EADDRNOTAVAIL;
-                       goto out;
-               }
+       if (do_rx_reg && rx_id == tx_id) {
+               err = -EADDRNOTAVAIL;
+               goto out;
        }
 
        if (so->bound && addr->can_ifindex == so->ifindex &&
-           addr->can_addr.tp.rx_id == so->rxid &&
-           addr->can_addr.tp.tx_id == so->txid)
+           rx_id == so->rxid && tx_id == so->txid)
                goto out;
 
        dev = dev_get_by_index(net, addr->can_ifindex);
@@ -1161,11 +1215,18 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
 
        ifindex = dev->ifindex;
 
-       if (do_rx_reg)
-               can_rx_register(net, dev, addr->can_addr.tp.rx_id,
-                               SINGLE_MASK(addr->can_addr.tp.rx_id),
+       if (do_rx_reg) {
+               can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id),
                                isotp_rcv, sk, "isotp", sk);
 
+               /* no consecutive frame echo skb in flight */
+               so->cfecho = 0;
+
+               /* register for echo skb's */
+               can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id),
+                               isotp_rcv_echo, sk, "isotpe", sk);
+       }
+
        dev_put(dev);
 
        if (so->bound && do_rx_reg) {
@@ -1176,6 +1237,9 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
                                can_rx_unregister(net, dev, so->rxid,
                                                  SINGLE_MASK(so->rxid),
                                                  isotp_rcv, sk);
+                               can_rx_unregister(net, dev, so->txid,
+                                                 SINGLE_MASK(so->txid),
+                                                 isotp_rcv_echo, sk);
                                dev_put(dev);
                        }
                }
@@ -1183,8 +1247,8 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
 
        /* switch to new settings */
        so->ifindex = ifindex;
-       so->rxid = addr->can_addr.tp.rx_id;
-       so->txid = addr->can_addr.tp.tx_id;
+       so->rxid = rx_id;
+       so->txid = tx_id;
        so->bound = 1;
 
 out:
@@ -1238,6 +1302,14 @@ static int isotp_setsockopt_locked(struct socket *sock, int level, int optname,
                /* no separate rx_ext_address is given => use ext_address */
                if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR))
                        so->opt.rx_ext_address = so->opt.ext_address;
+
+               /* check for frame_txtime changes (0 => no changes) */
+               if (so->opt.frame_txtime) {
+                       if (so->opt.frame_txtime == CAN_ISOTP_FRAME_TXTIME_ZERO)
+                               so->frame_txtime = 0;
+                       else
+                               so->frame_txtime = so->opt.frame_txtime;
+               }
                break;
 
        case CAN_ISOTP_RECV_FC:
@@ -1381,10 +1453,14 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg,
        case NETDEV_UNREGISTER:
                lock_sock(sk);
                /* remove current filters & unregister */
-               if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST)))
+               if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) {
                        can_rx_unregister(dev_net(dev), dev, so->rxid,
                                          SINGLE_MASK(so->rxid),
                                          isotp_rcv, sk);
+                       can_rx_unregister(dev_net(dev), dev, so->txid,
+                                         SINGLE_MASK(so->txid),
+                                         isotp_rcv_echo, sk);
+               }
 
                so->ifindex = 0;
                so->bound  = 0;
@@ -1439,6 +1515,7 @@ static int isotp_init(struct sock *sk)
        so->opt.rxpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT;
        so->opt.txpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT;
        so->opt.frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME;
+       so->frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME;
        so->rxfc.bs = CAN_ISOTP_DEFAULT_RECV_BS;
        so->rxfc.stmin = CAN_ISOTP_DEFAULT_RECV_STMIN;
        so->rxfc.wftmax = CAN_ISOTP_DEFAULT_RECV_WFTMAX;
index ba69ddf..8a51094 100644 (file)
@@ -340,7 +340,6 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
 
        return 0;
 }
-EXPORT_SYMBOL(netdev_name_node_alt_create);
 
 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
 {
@@ -368,7 +367,6 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
 
        return 0;
 }
-EXPORT_SYMBOL(netdev_name_node_alt_destroy);
 
 static void netdev_name_node_alt_flush(struct net_device *dev)
 {
@@ -2992,13 +2990,25 @@ EXPORT_SYMBOL(netif_set_real_num_queues);
 /**
  * netif_get_num_default_rss_queues - default number of RSS queues
  *
- * This routine should set an upper limit on the number of RSS queues
- * used by default by multiqueue devices.
+ * Default value is the number of physical cores if there are only 1 or 2, or
+ * divided by 2 if there are more.
  */
 int netif_get_num_default_rss_queues(void)
 {
-       return is_kdump_kernel() ?
-               1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
+       cpumask_var_t cpus;
+       int cpu, count = 0;
+
+       if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
+               return 1;
+
+       cpumask_copy(cpus, cpu_online_mask);
+       for_each_cpu(cpu, cpus) {
+               ++count;
+               cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
+       }
+       free_cpumask_var(cpus);
+
+       return count > 2 ? DIV_ROUND_UP(count, 2) : count;
 }
 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
 
@@ -3635,7 +3645,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
 out_kfree_skb:
        kfree_skb(skb);
 out_null:
-       atomic_long_inc(&dev->tx_dropped);
+       dev_core_stats_tx_dropped_inc(dev);
        return NULL;
 }
 
@@ -4186,7 +4196,7 @@ recursion_alert:
        rc = -ENETDOWN;
        rcu_read_unlock_bh();
 
-       atomic_long_inc(&dev->tx_dropped);
+       dev_core_stats_tx_dropped_inc(dev);
        kfree_skb_list(skb);
        return rc;
 out:
@@ -4238,7 +4248,7 @@ int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
        local_bh_enable();
        return ret;
 drop:
-       atomic_long_inc(&dev->tx_dropped);
+       dev_core_stats_tx_dropped_inc(dev);
        kfree_skb_list(skb);
        return NET_XMIT_DROP;
 }
@@ -4267,6 +4277,8 @@ static inline void ____napi_schedule(struct softnet_data *sd,
 {
        struct task_struct *thread;
 
+       lockdep_assert_irqs_disabled();
+
        if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
                /* Paired with smp_mb__before_atomic() in
                 * napi_enable()/dev_set_threaded().
@@ -4604,7 +4616,7 @@ drop:
        sd->dropped++;
        rps_unlock_irq_restore(sd, &flags);
 
-       atomic_long_inc(&skb->dev->rx_dropped);
+       dev_core_stats_rx_dropped_inc(skb->dev);
        kfree_skb_reason(skb, reason);
        return NET_RX_DROP;
 }
@@ -5359,10 +5371,10 @@ check_vlan_id:
        } else {
 drop:
                if (!deliver_exact) {
-                       atomic_long_inc(&skb->dev->rx_dropped);
+                       dev_core_stats_rx_dropped_inc(skb->dev);
                        kfree_skb_reason(skb, SKB_DROP_REASON_PTYPE_ABSENT);
                } else {
-                       atomic_long_inc(&skb->dev->rx_nohandler);
+                       dev_core_stats_rx_nohandler_inc(skb->dev);
                        kfree_skb(skb);
                }
                /* Jamal, now you will not able to escape explaining
@@ -10282,6 +10294,25 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
 }
 EXPORT_SYMBOL(netdev_stats_to_stats64);
 
+struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev)
+{
+       struct net_device_core_stats __percpu *p;
+
+       p = alloc_percpu_gfp(struct net_device_core_stats,
+                            GFP_ATOMIC | __GFP_NOWARN);
+
+       if (p && cmpxchg(&dev->core_stats, NULL, p))
+               free_percpu(p);
+
+       /* This READ_ONCE() pairs with the cmpxchg() above */
+       p = READ_ONCE(dev->core_stats);
+       if (!p)
+               return NULL;
+
+       return this_cpu_ptr(p);
+}
+EXPORT_SYMBOL(netdev_core_stats_alloc);
+
 /**
  *     dev_get_stats   - get network device statistics
  *     @dev: device to get statistics from
@@ -10296,6 +10327,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
                                        struct rtnl_link_stats64 *storage)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
+       const struct net_device_core_stats __percpu *p;
 
        if (ops->ndo_get_stats64) {
                memset(storage, 0, sizeof(*storage));
@@ -10305,9 +10337,20 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
        } else {
                netdev_stats_to_stats64(storage, &dev->stats);
        }
-       storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
-       storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
-       storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
+
+       /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
+       p = READ_ONCE(dev->core_stats);
+       if (p) {
+               const struct net_device_core_stats *core_stats;
+               int i;
+
+               for_each_possible_cpu(i) {
+                       core_stats = per_cpu_ptr(p, i);
+                       storage->rx_dropped += local_read(&core_stats->rx_dropped);
+                       storage->tx_dropped += local_read(&core_stats->tx_dropped);
+                       storage->rx_nohandler += local_read(&core_stats->rx_nohandler);
+               }
+       }
        return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
@@ -10569,6 +10612,8 @@ void free_netdev(struct net_device *dev)
        free_percpu(dev->pcpu_refcnt);
        dev->pcpu_refcnt = NULL;
 #endif
+       free_percpu(dev->core_stats);
+       dev->core_stats = NULL;
        free_percpu(dev->xdp_bulkq);
        dev->xdp_bulkq = NULL;
 
index fcd9f6d..aeca13b 100644 (file)
@@ -225,6 +225,33 @@ struct devlink *__must_check devlink_try_get(struct devlink *devlink)
        return NULL;
 }
 
+void devl_assert_locked(struct devlink *devlink)
+{
+       lockdep_assert_held(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_assert_locked);
+
+#ifdef CONFIG_LOCKDEP
+/* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
+bool devl_lock_is_held(struct devlink *devlink)
+{
+       return lockdep_is_held(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_lock_is_held);
+#endif
+
+void devl_lock(struct devlink *devlink)
+{
+       mutex_lock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_lock);
+
+void devl_unlock(struct devlink *devlink)
+{
+       mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devl_unlock);
+
 static struct devlink *devlink_get_from_attrs(struct net *net,
                                              struct nlattr **attrs)
 {
@@ -1541,35 +1568,20 @@ static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
        return 0;
 }
 
-static int devlink_port_split(struct devlink *devlink, u32 port_index,
-                             u32 count, struct netlink_ext_ack *extack)
-
-{
-       if (devlink->ops->port_split)
-               return devlink->ops->port_split(devlink, port_index, count,
-                                               extack);
-       return -EOPNOTSUPP;
-}
-
 static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
                                          struct genl_info *info)
 {
+       struct devlink_port *devlink_port = info->user_ptr[1];
        struct devlink *devlink = info->user_ptr[0];
-       struct devlink_port *devlink_port;
-       u32 port_index;
        u32 count;
 
-       if (!info->attrs[DEVLINK_ATTR_PORT_INDEX] ||
-           !info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT])
+       if (!info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT])
                return -EINVAL;
+       if (!devlink->ops->port_split)
+               return -EOPNOTSUPP;
 
-       devlink_port = devlink_port_get_from_info(devlink, info);
-       port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
        count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]);
 
-       if (IS_ERR(devlink_port))
-               return -EINVAL;
-
        if (!devlink_port->attrs.splittable) {
                /* Split ports cannot be split. */
                if (devlink_port->attrs.split)
@@ -1584,29 +1596,19 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
                return -EINVAL;
        }
 
-       return devlink_port_split(devlink, port_index, count, info->extack);
-}
-
-static int devlink_port_unsplit(struct devlink *devlink, u32 port_index,
-                               struct netlink_ext_ack *extack)
-
-{
-       if (devlink->ops->port_unsplit)
-               return devlink->ops->port_unsplit(devlink, port_index, extack);
-       return -EOPNOTSUPP;
+       return devlink->ops->port_split(devlink, devlink_port, count,
+                                       info->extack);
 }
 
 static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
                                            struct genl_info *info)
 {
+       struct devlink_port *devlink_port = info->user_ptr[1];
        struct devlink *devlink = info->user_ptr[0];
-       u32 port_index;
 
-       if (!info->attrs[DEVLINK_ATTR_PORT_INDEX])
-               return -EINVAL;
-
-       port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
-       return devlink_port_unsplit(devlink, port_index, info->extack);
+       if (!devlink->ops->port_unsplit)
+               return -EOPNOTSUPP;
+       return devlink->ops->port_unsplit(devlink, devlink_port, info->extack);
 }
 
 static int devlink_port_new_notifiy(struct devlink *devlink,
@@ -2866,15 +2868,11 @@ static int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
 {
        struct devlink_rate *devlink_rate;
 
-       /* Take the lock to sync with devlink_rate_nodes_destroy() */
-       mutex_lock(&devlink->lock);
        list_for_each_entry(devlink_rate, &devlink->rate_list, list)
                if (devlink_rate_is_node(devlink_rate)) {
-                       mutex_unlock(&devlink->lock);
                        NL_SET_ERR_MSG_MOD(extack, "Rate node(s) exists.");
                        return -EBUSY;
                }
-       mutex_unlock(&devlink->lock);
        return 0;
 }
 
@@ -8645,14 +8643,14 @@ static const struct genl_small_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_port_split_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
        },
        {
                .cmd = DEVLINK_CMD_PORT_UNSPLIT,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_port_unsplit_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
        },
        {
                .cmd = DEVLINK_CMD_PORT_NEW,
@@ -8733,14 +8731,12 @@ static const struct genl_small_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_eswitch_get_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
                .cmd = DEVLINK_CMD_ESWITCH_SET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_eswitch_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
                .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
@@ -9249,6 +9245,32 @@ static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port)
        cancel_delayed_work_sync(&devlink_port->type_warn_dw);
 }
 
+int devl_port_register(struct devlink *devlink,
+                      struct devlink_port *devlink_port,
+                      unsigned int port_index)
+{
+       lockdep_assert_held(&devlink->lock);
+
+       if (devlink_port_index_exists(devlink, port_index))
+               return -EEXIST;
+
+       WARN_ON(devlink_port->devlink);
+       devlink_port->devlink = devlink;
+       devlink_port->index = port_index;
+       spin_lock_init(&devlink_port->type_lock);
+       INIT_LIST_HEAD(&devlink_port->reporter_list);
+       mutex_init(&devlink_port->reporters_lock);
+       list_add_tail(&devlink_port->list, &devlink->port_list);
+       INIT_LIST_HEAD(&devlink_port->param_list);
+       INIT_LIST_HEAD(&devlink_port->region_list);
+
+       INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
+       devlink_port_type_warn_schedule(devlink_port);
+       devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devl_port_register);
+
 /**
  *     devlink_port_register - Register devlink port
  *
@@ -9266,29 +9288,28 @@ int devlink_port_register(struct devlink *devlink,
                          struct devlink_port *devlink_port,
                          unsigned int port_index)
 {
-       mutex_lock(&devlink->lock);
-       if (devlink_port_index_exists(devlink, port_index)) {
-               mutex_unlock(&devlink->lock);
-               return -EEXIST;
-       }
+       int err;
 
-       WARN_ON(devlink_port->devlink);
-       devlink_port->devlink = devlink;
-       devlink_port->index = port_index;
-       spin_lock_init(&devlink_port->type_lock);
-       INIT_LIST_HEAD(&devlink_port->reporter_list);
-       mutex_init(&devlink_port->reporters_lock);
-       list_add_tail(&devlink_port->list, &devlink->port_list);
-       INIT_LIST_HEAD(&devlink_port->param_list);
-       INIT_LIST_HEAD(&devlink_port->region_list);
+       mutex_lock(&devlink->lock);
+       err = devl_port_register(devlink, devlink_port, port_index);
        mutex_unlock(&devlink->lock);
-       INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
-       devlink_port_type_warn_schedule(devlink_port);
-       devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
-       return 0;
+       return err;
 }
 EXPORT_SYMBOL_GPL(devlink_port_register);
 
+void devl_port_unregister(struct devlink_port *devlink_port)
+{
+       lockdep_assert_held(&devlink_port->devlink->lock);
+
+       devlink_port_type_warn_cancel(devlink_port);
+       devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
+       list_del(&devlink_port->list);
+       WARN_ON(!list_empty(&devlink_port->reporter_list));
+       WARN_ON(!list_empty(&devlink_port->region_list));
+       mutex_destroy(&devlink_port->reporters_lock);
+}
+EXPORT_SYMBOL_GPL(devl_port_unregister);
+
 /**
  *     devlink_port_unregister - Unregister devlink port
  *
@@ -9298,14 +9319,9 @@ void devlink_port_unregister(struct devlink_port *devlink_port)
 {
        struct devlink *devlink = devlink_port->devlink;
 
-       devlink_port_type_warn_cancel(devlink_port);
-       devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
        mutex_lock(&devlink->lock);
-       list_del(&devlink_port->list);
+       devl_port_unregister(devlink_port);
        mutex_unlock(&devlink->lock);
-       WARN_ON(!list_empty(&devlink_port->reporter_list));
-       WARN_ON(!list_empty(&devlink_port->region_list));
-       mutex_destroy(&devlink_port->reporters_lock);
 }
 EXPORT_SYMBOL_GPL(devlink_port_unregister);
 
@@ -9526,30 +9542,26 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 contro
 EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
 
 /**
- * devlink_rate_leaf_create - create devlink rate leaf
- *
+ * devl_rate_leaf_create - create devlink rate leaf
  * @devlink_port: devlink port object to create rate object on
  * @priv: driver private data
  *
  * Create devlink rate object of type leaf on provided @devlink_port.
- * Throws call trace if @devlink_port already has a devlink rate object.
- *
- * Context: Takes and release devlink->lock <mutex>.
- *
- * Return: -ENOMEM if failed to allocate rate object, 0 otherwise.
  */
-int
-devlink_rate_leaf_create(struct devlink_port *devlink_port, void *priv)
+int devl_rate_leaf_create(struct devlink_port *devlink_port, void *priv)
 {
        struct devlink *devlink = devlink_port->devlink;
        struct devlink_rate *devlink_rate;
 
+       devl_assert_locked(devlink_port->devlink);
+
+       if (WARN_ON(devlink_port->devlink_rate))
+               return -EBUSY;
+
        devlink_rate = kzalloc(sizeof(*devlink_rate), GFP_KERNEL);
        if (!devlink_rate)
                return -ENOMEM;
 
-       mutex_lock(&devlink->lock);
-       WARN_ON(devlink_port->devlink_rate);
        devlink_rate->type = DEVLINK_RATE_TYPE_LEAF;
        devlink_rate->devlink = devlink;
        devlink_rate->devlink_port = devlink_port;
@@ -9557,12 +9569,42 @@ devlink_rate_leaf_create(struct devlink_port *devlink_port, void *priv)
        list_add_tail(&devlink_rate->list, &devlink->rate_list);
        devlink_port->devlink_rate = devlink_rate;
        devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
-       mutex_unlock(&devlink->lock);
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(devl_rate_leaf_create);
+
+int
+devlink_rate_leaf_create(struct devlink_port *devlink_port, void *priv)
+{
+       struct devlink *devlink = devlink_port->devlink;
+       int ret;
+
+       mutex_lock(&devlink->lock);
+       ret = devl_rate_leaf_create(devlink_port, priv);
+       mutex_unlock(&devlink->lock);
+
+       return ret;
+}
 EXPORT_SYMBOL_GPL(devlink_rate_leaf_create);
 
+void devl_rate_leaf_destroy(struct devlink_port *devlink_port)
+{
+       struct devlink_rate *devlink_rate = devlink_port->devlink_rate;
+
+       devl_assert_locked(devlink_port->devlink);
+       if (!devlink_rate)
+               return;
+
+       devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_DEL);
+       if (devlink_rate->parent)
+               refcount_dec(&devlink_rate->parent->refcnt);
+       list_del(&devlink_rate->list);
+       devlink_port->devlink_rate = NULL;
+       kfree(devlink_rate);
+}
+EXPORT_SYMBOL_GPL(devl_rate_leaf_destroy);
+
 /**
  * devlink_rate_leaf_destroy - destroy devlink rate leaf
  *
@@ -9579,32 +9621,25 @@ void devlink_rate_leaf_destroy(struct devlink_port *devlink_port)
                return;
 
        mutex_lock(&devlink->lock);
-       devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_DEL);
-       if (devlink_rate->parent)
-               refcount_dec(&devlink_rate->parent->refcnt);
-       list_del(&devlink_rate->list);
-       devlink_port->devlink_rate = NULL;
+       devl_rate_leaf_destroy(devlink_port);
        mutex_unlock(&devlink->lock);
-       kfree(devlink_rate);
 }
 EXPORT_SYMBOL_GPL(devlink_rate_leaf_destroy);
 
 /**
- * devlink_rate_nodes_destroy - destroy all devlink rate nodes on device
- *
+ * devl_rate_nodes_destroy - destroy all devlink rate nodes on device
  * @devlink: devlink instance
  *
  * Unset parent for all rate objects and destroy all rate nodes
  * on specified device.
- *
- * Context: Takes and release devlink->lock <mutex>.
  */
-void devlink_rate_nodes_destroy(struct devlink *devlink)
+void devl_rate_nodes_destroy(struct devlink *devlink)
 {
        static struct devlink_rate *devlink_rate, *tmp;
        const struct devlink_ops *ops = devlink->ops;
 
-       mutex_lock(&devlink->lock);
+       devl_assert_locked(devlink);
+
        list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
                if (!devlink_rate->parent)
                        continue;
@@ -9625,6 +9660,23 @@ void devlink_rate_nodes_destroy(struct devlink *devlink)
                        kfree(devlink_rate);
                }
        }
+}
+EXPORT_SYMBOL_GPL(devl_rate_nodes_destroy);
+
+/**
+ * devlink_rate_nodes_destroy - destroy all devlink rate nodes on device
+ *
+ * @devlink: devlink instance
+ *
+ * Unset parent for all rate objects and destroy all rate nodes
+ * on specified device.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_rate_nodes_destroy(struct devlink *devlink)
+{
+       mutex_lock(&devlink->lock);
+       devl_rate_nodes_destroy(devlink);
        mutex_unlock(&devlink->lock);
 }
 EXPORT_SYMBOL_GPL(devlink_rate_nodes_destroy);
index 34441a3..03b6e64 100644 (file)
@@ -1283,6 +1283,7 @@ proto_again:
                break;
        }
 
+       case htons(ETH_P_PRP):
        case htons(ETH_P_HSR): {
                struct hsr_tag *hdr, _hdr;
 
index ee5e7e8..78110ed 100644 (file)
@@ -92,6 +92,31 @@ void dev_remove_offload(struct packet_offload *po)
 }
 EXPORT_SYMBOL(dev_remove_offload);
 
+/**
+ *     skb_eth_gso_segment - segmentation handler for ethernet protocols.
+ *     @skb: buffer to segment
+ *     @features: features for the output path (see dev->features)
+ *     @type: Ethernet Protocol ID
+ */
+struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
+                                   netdev_features_t features, __be16 type)
+{
+       struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+       struct packet_offload *ptype;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(ptype, &offload_base, list) {
+               if (ptype->type == type && ptype->callbacks.gso_segment) {
+                       segs = ptype->callbacks.gso_segment(skb, features);
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return segs;
+}
+EXPORT_SYMBOL(skb_eth_gso_segment);
+
 /**
  *     skb_mac_gso_segment - mac layer segmentation handler.
  *     @skb: buffer to segment
index 8462f92..541c7a7 100644 (file)
@@ -28,7 +28,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
 
        if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
 drop:
-               atomic_long_inc(&dev->rx_dropped);
+               dev_core_stats_rx_dropped_inc(dev);
                kfree_skb(skb);
                res = NET_RX_DROP;
                goto unlock;
index dd4cf01..598041b 100644 (file)
@@ -137,6 +137,18 @@ struct ptp_header *ptp_parse_header(struct sk_buff *skb, unsigned int type)
 }
 EXPORT_SYMBOL_GPL(ptp_parse_header);
 
+bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type)
+{
+       struct ptp_header *hdr;
+
+       hdr = ptp_parse_header(skb, type);
+       if (!hdr)
+               return false;
+
+       return ptp_get_msgtype(hdr, type) == PTP_MSGTYPE_SYNC;
+}
+EXPORT_SYMBOL_GPL(ptp_msg_is_sync);
+
 void __init ptp_classifier_init(void)
 {
        static struct sock_filter ptp_filter[] __initdata = {
index a66b676..159c9c6 100644 (file)
@@ -3652,13 +3652,24 @@ static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
                           bool *changed, struct netlink_ext_ack *extack)
 {
        char *alt_ifname;
+       size_t size;
        int err;
 
        err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
        if (err)
                return err;
 
-       alt_ifname = nla_strdup(attr, GFP_KERNEL);
+       if (cmd == RTM_NEWLINKPROP) {
+               size = rtnl_prop_list_size(dev);
+               size += nla_total_size(ALTIFNAMSIZ);
+               if (size >= U16_MAX) {
+                       NL_SET_ERR_MSG(extack,
+                                      "effective property list too long");
+                       return -EINVAL;
+               }
+       }
+
+       alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
        if (!alt_ifname)
                return -ENOMEM;
 
@@ -5440,12 +5451,12 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
                list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
                        if (af_ops->fill_stats_af) {
                                struct nlattr *af;
-                               int err;
 
                                af = nla_nest_start_noflag(skb,
                                                           af_ops->family);
                                if (!af) {
                                        rcu_read_unlock();
+                                       err = -EMSGSIZE;
                                        goto nla_put_failure;
                                }
                                err = af_ops->fill_stats_af(skb, dev);
index 784c92e..1180a0c 100644 (file)
@@ -1377,9 +1377,9 @@ set_sndbuf:
                        if (!(sk_is_tcp(sk) ||
                              (sk->sk_type == SOCK_DGRAM &&
                               sk->sk_protocol == IPPROTO_UDP)))
-                               ret = -ENOTSUPP;
+                               ret = -EOPNOTSUPP;
                } else if (sk->sk_family != PF_RDS) {
-                       ret = -ENOTSUPP;
+                       ret = -EOPNOTSUPP;
                }
                if (!ret) {
                        if (val < 0 || val > 1)
index b5f2d42..2442020 100644 (file)
@@ -359,7 +359,8 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
        if (IS_ERR(xdp_alloc))
                return PTR_ERR(xdp_alloc);
 
-       trace_mem_connect(xdp_alloc, xdp_rxq);
+       if (trace_mem_connect_enabled() && xdp_alloc)
+               trace_mem_connect(xdp_alloc, xdp_rxq);
        return 0;
 }
 
index 06d5de2..89c6c86 100644 (file)
@@ -471,7 +471,7 @@ int dsa_port_walk_fdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb)
 {
        struct dsa_port *dp = dsa_to_port(ds, port);
        struct dsa_mac_addr *a;
-       int err;
+       int err = 0;
 
        mutex_lock(&dp->addr_lists_lock);
 
@@ -491,7 +491,7 @@ int dsa_port_walk_mdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb)
 {
        struct dsa_port *dp = dsa_to_port(ds, port);
        struct dsa_mac_addr *a;
-       int err;
+       int err = 0;
 
        mutex_lock(&dp->addr_lists_lock);
 
@@ -507,6 +507,66 @@ int dsa_port_walk_mdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb)
 }
 EXPORT_SYMBOL_GPL(dsa_port_walk_mdbs);
 
+bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
+{
+       if (a->type != b->type)
+               return false;
+
+       switch (a->type) {
+       case DSA_DB_PORT:
+               return a->dp == b->dp;
+       case DSA_DB_LAG:
+               return a->lag.dev == b->lag.dev;
+       case DSA_DB_BRIDGE:
+               return a->bridge.num == b->bridge.num;
+       default:
+               WARN_ON(1);
+               return false;
+       }
+}
+
+bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
+                                const unsigned char *addr, u16 vid,
+                                struct dsa_db db)
+{
+       struct dsa_port *dp = dsa_to_port(ds, port);
+       struct dsa_mac_addr *a;
+
+       lockdep_assert_held(&dp->addr_lists_lock);
+
+       list_for_each_entry(a, &dp->fdbs, list) {
+               if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
+                       continue;
+
+               if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
+                       return true;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
+
+bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
+                                const struct switchdev_obj_port_mdb *mdb,
+                                struct dsa_db db)
+{
+       struct dsa_port *dp = dsa_to_port(ds, port);
+       struct dsa_mac_addr *a;
+
+       lockdep_assert_held(&dp->addr_lists_lock);
+
+       list_for_each_entry(a, &dp->mdbs, list) {
+               if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
+                       continue;
+
+               if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
+                       return true;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
+
 static int __init dsa_init_module(void)
 {
        int rc;
index d5f21a7..47b3342 100644 (file)
@@ -457,12 +457,6 @@ static int dsa_port_setup(struct dsa_port *dp)
        if (dp->setup)
                return 0;
 
-       mutex_init(&dp->addr_lists_lock);
-       mutex_init(&dp->vlans_lock);
-       INIT_LIST_HEAD(&dp->fdbs);
-       INIT_LIST_HEAD(&dp->mdbs);
-       INIT_LIST_HEAD(&dp->vlans);
-
        if (ds->ops->port_setup) {
                err = ds->ops->port_setup(ds, dp->index);
                if (err)
@@ -568,9 +562,7 @@ static void dsa_port_teardown(struct dsa_port *dp)
 {
        struct devlink_port *dlp = &dp->devlink_port;
        struct dsa_switch *ds = dp->ds;
-       struct dsa_mac_addr *a, *tmp;
        struct net_device *slave;
-       struct dsa_vlan *v, *n;
 
        if (!dp->setup)
                return;
@@ -601,21 +593,6 @@ static void dsa_port_teardown(struct dsa_port *dp)
                break;
        }
 
-       list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
-               list_del(&a->list);
-               kfree(a);
-       }
-
-       list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
-               list_del(&a->list);
-               kfree(a);
-       }
-
-       list_for_each_entry_safe(v, n, &dp->vlans, list) {
-               list_del(&v->list);
-               kfree(v);
-       }
-
        dp->setup = false;
 }
 
@@ -1072,7 +1049,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
 {
        struct dsa_port *dp;
-       int err;
+       int err = 0;
 
        rtnl_lock();
 
@@ -1084,7 +1061,7 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
 
                        err = dsa_master_setup(master, dp);
                        if (err)
-                               return err;
+                               break;
 
                        /* Replay master state event */
                        dsa_tree_master_admin_state_change(dst, master, admin_up);
@@ -1095,7 +1072,7 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
 
        rtnl_unlock();
 
-       return 0;
+       return err;
 }
 
 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
@@ -1374,6 +1351,11 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
        dp->ds = ds;
        dp->index = index;
 
+       mutex_init(&dp->addr_lists_lock);
+       mutex_init(&dp->vlans_lock);
+       INIT_LIST_HEAD(&dp->fdbs);
+       INIT_LIST_HEAD(&dp->mdbs);
+       INIT_LIST_HEAD(&dp->vlans);
        INIT_LIST_HEAD(&dp->list);
        list_add_tail(&dp->list, &dst->ports);
 
@@ -1515,6 +1497,7 @@ static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
                const char *user_protocol;
 
                master = of_find_net_device_by_node(ethernet);
+               of_node_put(ethernet);
                if (!master)
                        return -EPROBE_DEFER;
 
@@ -1712,6 +1695,9 @@ static void dsa_switch_release_ports(struct dsa_switch *ds)
        struct dsa_port *dp, *next;
 
        dsa_switch_for_each_port_safe(dp, next, ds) {
+               WARN_ON(!list_empty(&dp->fdbs));
+               WARN_ON(!list_empty(&dp->mdbs));
+               WARN_ON(!list_empty(&dp->vlans));
                list_del(&dp->list);
                kfree(dp);
        }
index c3c7491..5d3f4a6 100644 (file)
@@ -182,6 +182,8 @@ const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
 void dsa_tag_driver_put(const struct dsa_device_ops *ops);
 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
 
+bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b);
+
 bool dsa_schedule_work(struct work_struct *work);
 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
 
@@ -213,6 +215,9 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
                               const struct dsa_device_ops *tag_ops);
 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
+int dsa_port_set_mst_state(struct dsa_port *dp,
+                          const struct switchdev_mst_state *state,
+                          struct netlink_ext_ack *extack);
 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
 void dsa_port_disable_rt(struct dsa_port *dp);
@@ -232,6 +237,10 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
                            struct netlink_ext_ack *extack);
 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
+int dsa_port_mst_enable(struct dsa_port *dp, bool on,
+                       struct netlink_ext_ack *extack);
+int dsa_port_vlan_msti(struct dsa_port *dp,
+                      const struct switchdev_vlan_msti *msti);
 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
                        bool targeted_match);
 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
index 58291df..32d472a 100644 (file)
@@ -30,12 +30,11 @@ static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
        return dsa_tree_notify(dp->ds->dst, e, v);
 }
 
-static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp)
+static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid)
 {
        struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
        struct switchdev_notifier_fdb_info info = {
-               /* flush all VLANs */
-               .vid = 0,
+               .vid = vid,
        };
 
        /* When the port becomes standalone it has already left the bridge.
@@ -57,7 +56,42 @@ static void dsa_port_fast_age(const struct dsa_port *dp)
 
        ds->ops->port_fast_age(ds, dp->index);
 
-       dsa_port_notify_bridge_fdb_flush(dp);
+       /* flush all VLANs */
+       dsa_port_notify_bridge_fdb_flush(dp, 0);
+}
+
+static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid)
+{
+       struct dsa_switch *ds = dp->ds;
+       int err;
+
+       if (!ds->ops->port_vlan_fast_age)
+               return -EOPNOTSUPP;
+
+       err = ds->ops->port_vlan_fast_age(ds, dp->index, vid);
+
+       if (!err)
+               dsa_port_notify_bridge_fdb_flush(dp, vid);
+
+       return err;
+}
+
+static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti)
+{
+       DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 };
+       int err, vid;
+
+       err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids);
+       if (err)
+               return err;
+
+       for_each_set_bit(vid, vids, VLAN_N_VID) {
+               err = dsa_port_vlan_fast_age(dp, vid);
+               if (err)
+                       return err;
+       }
+
+       return 0;
 }
 
 static bool dsa_port_can_configure_learning(struct dsa_port *dp)
@@ -118,6 +152,42 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
                pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
 }
 
+int dsa_port_set_mst_state(struct dsa_port *dp,
+                          const struct switchdev_mst_state *state,
+                          struct netlink_ext_ack *extack)
+{
+       struct dsa_switch *ds = dp->ds;
+       u8 prev_state;
+       int err;
+
+       if (!ds->ops->port_mst_state_set)
+               return -EOPNOTSUPP;
+
+       err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti,
+                              &prev_state);
+       if (err)
+               return err;
+
+       err = ds->ops->port_mst_state_set(ds, dp->index, state);
+       if (err)
+               return err;
+
+       if (!(dp->learning &&
+             (prev_state == BR_STATE_LEARNING ||
+              prev_state == BR_STATE_FORWARDING) &&
+             (state->state == BR_STATE_DISABLED ||
+              state->state == BR_STATE_BLOCKING ||
+              state->state == BR_STATE_LISTENING)))
+               return 0;
+
+       err = dsa_port_msti_fast_age(dp, state->msti);
+       if (err)
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Unable to flush associated VLANs");
+
+       return 0;
+}
+
 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
 {
        struct dsa_switch *ds = dp->ds;
@@ -321,6 +391,16 @@ static void dsa_port_bridge_destroy(struct dsa_port *dp,
        kfree(bridge);
 }
 
+static bool dsa_port_supports_mst(struct dsa_port *dp)
+{
+       struct dsa_switch *ds = dp->ds;
+
+       return ds->ops->vlan_msti_set &&
+               ds->ops->port_mst_state_set &&
+               ds->ops->port_vlan_fast_age &&
+               dsa_port_can_configure_learning(dp);
+}
+
 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
                         struct netlink_ext_ack *extack)
 {
@@ -334,6 +414,9 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
        struct net_device *brport_dev;
        int err;
 
+       if (br_mst_enabled(br) && !dsa_port_supports_mst(dp))
+               return -EOPNOTSUPP;
+
        /* Here the interface is already bridged. Reflect the current
         * configuration so that drivers can program their chips accordingly.
         */
@@ -735,6 +818,17 @@ int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
        return 0;
 }
 
+int dsa_port_mst_enable(struct dsa_port *dp, bool on,
+                       struct netlink_ext_ack *extack)
+{
+       if (on && !dsa_port_supports_mst(dp)) {
+               NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
                              struct switchdev_brport_flags flags,
                              struct netlink_ext_ack *extack)
@@ -778,6 +872,17 @@ int dsa_port_bridge_flags(struct dsa_port *dp,
        return 0;
 }
 
+int dsa_port_vlan_msti(struct dsa_port *dp,
+                      const struct switchdev_vlan_msti *msti)
+{
+       struct dsa_switch *ds = dp->ds;
+
+       if (!ds->ops->vlan_msti_set)
+               return -EOPNOTSUPP;
+
+       return ds->ops->vlan_msti_set(ds, *dp->bridge, msti);
+}
+
 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
                        bool targeted_match)
 {
index 42436ac..d1a3be1 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/tc_act/tc_mirred.h>
 #include <linux/if_bridge.h>
 #include <linux/if_hsr.h>
+#include <net/dcbnl.h>
 #include <linux/netpoll.h>
 
 #include "dsa_priv.h"
@@ -305,6 +306,12 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
+       /* If the port is down, the address isn't synced yet to hardware or
+        * to the DSA master, so there is nothing to change.
+        */
+       if (!(dev->flags & IFF_UP))
+               goto out_change_dev_addr;
+
        if (dsa_switch_supports_uc_filtering(ds)) {
                err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
                if (err)
@@ -323,6 +330,7 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
        if (dsa_switch_supports_uc_filtering(ds))
                dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
 
+out_change_dev_addr:
        eth_hw_addr_set(dev, addr->sa_data);
 
        return 0;
@@ -443,6 +451,12 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
 
                ret = dsa_port_set_state(dp, attr->u.stp_state, true);
                break;
+       case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
+               if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
+                       return -EOPNOTSUPP;
+
+               ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
+               break;
        case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
                if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
                        return -EOPNOTSUPP;
@@ -456,6 +470,12 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
 
                ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
                break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_MST:
+               if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
+                       return -EOPNOTSUPP;
+
+               ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
+               break;
        case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
                if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
                        return -EOPNOTSUPP;
@@ -469,6 +489,12 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
 
                ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
                break;
+       case SWITCHDEV_ATTR_ID_VLAN_MSTI:
+               if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
+                       return -EOPNOTSUPP;
+
+               ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
+               break;
        default:
                ret = -EOPNOTSUPP;
                break;
@@ -1147,6 +1173,7 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
                                  struct tc_cls_matchall_offload *cls,
                                  bool ingress)
 {
+       struct netlink_ext_ack *extack = cls->common.extack;
        struct dsa_port *dp = dsa_slave_to_port(dev);
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct dsa_mall_mirror_tc_entry *mirror;
@@ -1184,7 +1211,7 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
        mirror->to_local_port = to_dp->index;
        mirror->ingress = ingress;
 
-       err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
+       err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
        if (err) {
                kfree(mall_tc_entry);
                return err;
@@ -1845,6 +1872,209 @@ out_master_failed:
        return err;
 }
 
+static int __maybe_unused
+dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
+{
+       struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct dsa_switch *ds = dp->ds;
+       unsigned long mask, new_prio;
+       int err, port = dp->index;
+
+       if (!ds->ops->port_set_default_prio)
+               return -EOPNOTSUPP;
+
+       err = dcb_ieee_setapp(dev, app);
+       if (err)
+               return err;
+
+       mask = dcb_ieee_getapp_mask(dev, app);
+       new_prio = __fls(mask);
+
+       err = ds->ops->port_set_default_prio(ds, port, new_prio);
+       if (err) {
+               dcb_ieee_delapp(dev, app);
+               return err;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused
+dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
+{
+       struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct dsa_switch *ds = dp->ds;
+       unsigned long mask, new_prio;
+       int err, port = dp->index;
+       u8 dscp = app->protocol;
+
+       if (!ds->ops->port_add_dscp_prio)
+               return -EOPNOTSUPP;
+
+       if (dscp >= 64) {
+               netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
+                          dscp);
+               return -EINVAL;
+       }
+
+       err = dcb_ieee_setapp(dev, app);
+       if (err)
+               return err;
+
+       mask = dcb_ieee_getapp_mask(dev, app);
+       new_prio = __fls(mask);
+
+       err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
+       if (err) {
+               dcb_ieee_delapp(dev, app);
+               return err;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
+                                                     struct dcb_app *app)
+{
+       switch (app->selector) {
+       case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+               switch (app->protocol) {
+               case 0:
+                       return dsa_slave_dcbnl_set_default_prio(dev, app);
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+       case IEEE_8021QAZ_APP_SEL_DSCP:
+               return dsa_slave_dcbnl_add_dscp_prio(dev, app);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int __maybe_unused
+dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
+{
+       struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct dsa_switch *ds = dp->ds;
+       unsigned long mask, new_prio;
+       int err, port = dp->index;
+
+       if (!ds->ops->port_set_default_prio)
+               return -EOPNOTSUPP;
+
+       err = dcb_ieee_delapp(dev, app);
+       if (err)
+               return err;
+
+       mask = dcb_ieee_getapp_mask(dev, app);
+       new_prio = mask ? __fls(mask) : 0;
+
+       err = ds->ops->port_set_default_prio(ds, port, new_prio);
+       if (err) {
+               dcb_ieee_setapp(dev, app);
+               return err;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused
+dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
+{
+       struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct dsa_switch *ds = dp->ds;
+       int err, port = dp->index;
+       u8 dscp = app->protocol;
+
+       if (!ds->ops->port_del_dscp_prio)
+               return -EOPNOTSUPP;
+
+       err = dcb_ieee_delapp(dev, app);
+       if (err)
+               return err;
+
+       err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
+       if (err) {
+               dcb_ieee_setapp(dev, app);
+               return err;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
+                                                     struct dcb_app *app)
+{
+       switch (app->selector) {
+       case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+               switch (app->protocol) {
+               case 0:
+                       return dsa_slave_dcbnl_del_default_prio(dev, app);
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+       case IEEE_8021QAZ_APP_SEL_DSCP:
+               return dsa_slave_dcbnl_del_dscp_prio(dev, app);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+/* Pre-populate the DCB application priority table with the priorities
+ * configured during switch setup, which we read from hardware here.
+ */
+static int dsa_slave_dcbnl_init(struct net_device *dev)
+{
+       struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct dsa_switch *ds = dp->ds;
+       int port = dp->index;
+       int err;
+
+       if (ds->ops->port_get_default_prio) {
+               int prio = ds->ops->port_get_default_prio(ds, port);
+               struct dcb_app app = {
+                       .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
+                       .protocol = 0,
+                       .priority = prio,
+               };
+
+               if (prio < 0)
+                       return prio;
+
+               err = dcb_ieee_setapp(dev, &app);
+               if (err)
+                       return err;
+       }
+
+       if (ds->ops->port_get_dscp_prio) {
+               int protocol;
+
+               for (protocol = 0; protocol < 64; protocol++) {
+                       struct dcb_app app = {
+                               .selector = IEEE_8021QAZ_APP_SEL_DSCP,
+                               .protocol = protocol,
+                       };
+                       int prio;
+
+                       prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
+                       if (prio == -EOPNOTSUPP)
+                               continue;
+                       if (prio < 0)
+                               return prio;
+
+                       app.priority = prio;
+
+                       err = dcb_ieee_setapp(dev, &app);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
 static const struct ethtool_ops dsa_slave_ethtool_ops = {
        .get_drvinfo            = dsa_slave_get_drvinfo,
        .get_regs_len           = dsa_slave_get_regs_len,
@@ -1874,6 +2104,11 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
        .self_test              = dsa_slave_net_selftest,
 };
 
+static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
+       .ieee_setapp            = dsa_slave_dcbnl_ieee_setapp,
+       .ieee_delapp            = dsa_slave_dcbnl_ieee_delapp,
+};
+
 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -2098,6 +2333,9 @@ int dsa_slave_create(struct dsa_port *port)
                return -ENOMEM;
 
        slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
+#if IS_ENABLED(CONFIG_DCB)
+       slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
+#endif
        if (!is_zero_ether_addr(port->mac))
                eth_hw_addr_set(slave_dev, port->mac);
        else
@@ -2155,6 +2393,17 @@ int dsa_slave_create(struct dsa_port *port)
                goto out_phy;
        }
 
+       if (IS_ENABLED(CONFIG_DCB)) {
+               ret = dsa_slave_dcbnl_init(slave_dev);
+               if (ret) {
+                       netdev_err(slave_dev,
+                                  "failed to initialize DCB: %pe\n",
+                                  ERR_PTR(ret));
+                       rtnl_unlock();
+                       goto out_unregister;
+               }
+       }
+
        ret = netdev_upper_dev_link(master, slave_dev, NULL);
 
        rtnl_unlock();
@@ -2617,6 +2866,9 @@ static int dsa_slave_fdb_event(struct net_device *dev,
        if (ctx && ctx != dp)
                return 0;
 
+       if (!dp->bridge)
+               return 0;
+
        if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
                if (dsa_port_offloads_bridge_port(dp, orig_dev))
                        return 0;
index 327d66b..d25cd1d 100644 (file)
@@ -212,24 +212,6 @@ static bool dsa_port_host_address_match(struct dsa_port *dp,
        return false;
 }
 
-static bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
-{
-       if (a->type != b->type)
-               return false;
-
-       switch (a->type) {
-       case DSA_DB_PORT:
-               return a->dp == b->dp;
-       case DSA_DB_LAG:
-               return a->lag.dev == b->lag.dev;
-       case DSA_DB_BRIDGE:
-               return a->bridge.num == b->bridge.num;
-       default:
-               WARN_ON(1);
-               return false;
-       }
-}
-
 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
                                              const unsigned char *addr, u16 vid,
                                              struct dsa_db db)
index c8b4bbd..e4b6e3f 100644 (file)
@@ -127,6 +127,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
                                   u8 extra)
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct net_device *br_dev;
        u8 tag_dev, tag_port;
        enum dsa_cmd cmd;
        u8 *dsa_header;
@@ -149,7 +150,16 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
                tag_port = dp->index;
        }
 
-       if (skb->protocol == htons(ETH_P_8021Q)) {
+       br_dev = dsa_port_bridge_dev_get(dp);
+
+       /* If frame is already 802.1Q tagged, we can convert it to a DSA
+        * tag (avoiding a memmove), but only if the port is standalone
+        * (in which case we always send FROM_CPU) or if the port's
+        * bridge has VLAN filtering enabled (in which case the CPU port
+        * will be a member of the VLAN).
+        */
+       if (skb->protocol == htons(ETH_P_8021Q) &&
+           (!br_dev || br_vlan_enabled(br_dev))) {
                if (extra) {
                        skb_push(skb, extra);
                        dsa_alloc_etype_header(skb, extra);
@@ -166,10 +176,9 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
                        dsa_header[2] &= ~0x10;
                }
        } else {
-               struct net_device *br = dsa_port_bridge_dev_get(dp);
                u16 vid;
 
-               vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
+               vid = br_dev ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
 
                skb_push(skb, DSA_HLEN + extra);
                dsa_alloc_etype_header(skb, DSA_HLEN + extra);
index 71fec45..a593ead 100644 (file)
@@ -247,7 +247,7 @@ static const struct dsa_device_ops rtl8_4t_netdev_ops = {
 
 DSA_TAG_DRIVER(rtl8_4t_netdev_ops);
 
-MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL8_4L);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL8_4T);
 
 static struct dsa_tag_driver *dsa_tag_drivers[] = {
        &DSA_TAG_DRIVER_NAME(rtl8_4_netdev_ops),
index 7f25021..6ffef47 100644 (file)
@@ -221,7 +221,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
                skb_reset_mac_len(skb);
                hsr_forward_skb(skb, master);
        } else {
-               atomic_long_inc(&dev->tx_dropped);
+               dev_core_stats_tx_dropped_inc(dev);
                dev_kfree_skb_any(skb);
        }
        return NETDEV_TX_OK;
index e1b1d08..70e6c87 100644 (file)
@@ -446,6 +446,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
        struct page *page;
        struct sk_buff *trailer;
        int tailen = esp->tailen;
+       unsigned int allocsz;
 
        /* this is non-NULL only with TCP/UDP Encapsulation */
        if (x->encap) {
@@ -455,6 +456,10 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                        return err;
        }
 
+       allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
+       if (allocsz > ESP_SKB_FRAG_MAXSIZE)
+               goto cow;
+
        if (!skb_cloned(skb)) {
                if (tailen <= skb_tailroom(skb)) {
                        nfrags = 1;
index d87f02a..935026f 100644 (file)
@@ -110,8 +110,7 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
                                                struct sk_buff *skb,
                                                netdev_features_t features)
 {
-       __skb_push(skb, skb->mac_len);
-       return skb_mac_gso_segment(skb, features);
+       return skb_eth_gso_segment(skb, features, htons(ETH_P_IP));
 }
 
 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
@@ -160,6 +159,9 @@ static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
                        skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
        }
 
+       if (proto == IPPROTO_IPV6)
+               skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
+
        __skb_pull(skb, skb_transport_offset(skb));
        ops = rcu_dereference(inet_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_segment))
index 7408051..af8209f 100644 (file)
@@ -291,7 +291,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
                bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
                struct flowi4 fl4 = {
                        .flowi4_iif = LOOPBACK_IFINDEX,
-                       .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
+                       .flowi4_l3mdev = l3mdev_master_ifindex_rcu(dev),
                        .daddr = ip_hdr(skb)->saddr,
                        .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK,
                        .flowi4_scope = scope,
@@ -353,9 +353,8 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
        bool dev_match;
 
        fl4.flowi4_oif = 0;
-       fl4.flowi4_iif = l3mdev_master_ifindex_rcu(dev);
-       if (!fl4.flowi4_iif)
-               fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
+       fl4.flowi4_l3mdev = l3mdev_master_ifindex_rcu(dev);
+       fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
        fl4.daddr = src;
        fl4.saddr = dst;
        fl4.flowi4_tos = tos;
index c5a2970..cc8e84e 100644 (file)
@@ -2234,7 +2234,7 @@ void fib_select_multipath(struct fib_result *res, int hash)
 void fib_select_path(struct net *net, struct fib_result *res,
                     struct flowi4 *fl4, const struct sk_buff *skb)
 {
-       if (fl4->flowi4_oif && !(fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF))
+       if (fl4->flowi4_oif)
                goto check_saddr;
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
index 2af2b99..fb0e49c 100644 (file)
@@ -1429,11 +1429,8 @@ bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
            !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
                return false;
 
-       if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
-               if (flp->flowi4_oif &&
-                   flp->flowi4_oif != nhc->nhc_oif)
-                       return false;
-       }
+       if (flp->flowi4_oif && flp->flowi4_oif != nhc->nhc_oif)
+               return false;
 
        return true;
 }
index 3e2685c..76a411a 100644 (file)
@@ -580,7 +580,7 @@ static struct nf_ct_helper_expectfn callforwarding_nat = {
 };
 
 /****************************************************************************/
-static int __init init(void)
+static int __init nf_nat_h323_init(void)
 {
        BUG_ON(set_h245_addr_hook != NULL);
        BUG_ON(set_h225_addr_hook != NULL);
@@ -607,7 +607,7 @@ static int __init init(void)
 }
 
 /****************************************************************************/
-static void __exit fini(void)
+static void __exit nf_nat_h323_fini(void)
 {
        RCU_INIT_POINTER(set_h245_addr_hook, NULL);
        RCU_INIT_POINTER(set_h225_addr_hook, NULL);
@@ -624,8 +624,8 @@ static void __exit fini(void)
 }
 
 /****************************************************************************/
-module_init(init);
-module_exit(fini);
+module_init(nf_nat_h323_init);
+module_exit(nf_nat_h323_fini);
 
 MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
 MODULE_DESCRIPTION("H.323 NAT helper");
index aeb6317..0bcd6ae 100644 (file)
@@ -75,6 +75,7 @@ static const struct nft_expr_ops nft_dup_ipv4_ops = {
        .eval           = nft_dup_ipv4_eval,
        .init           = nft_dup_ipv4_init,
        .dump           = nft_dup_ipv4_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static const struct nla_policy nft_dup_ipv4_policy[NFTA_DUP_MAX + 1] = {
index 03df986..4151eb1 100644 (file)
@@ -152,6 +152,7 @@ static const struct nft_expr_ops nft_fib4_type_ops = {
        .init           = nft_fib_init,
        .dump           = nft_fib_dump,
        .validate       = nft_fib_validate,
+       .reduce         = nft_fib_reduce,
 };
 
 static const struct nft_expr_ops nft_fib4_ops = {
@@ -161,6 +162,7 @@ static const struct nft_expr_ops nft_fib4_ops = {
        .init           = nft_fib_init,
        .dump           = nft_fib_dump,
        .validate       = nft_fib_validate,
+       .reduce         = nft_fib_reduce,
 };
 
 static const struct nft_expr_ops *
index 55fc23a..6cb213b 100644 (file)
@@ -45,6 +45,7 @@ static const struct nft_expr_ops nft_reject_ipv4_ops = {
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
        .validate       = nft_reject_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
index f444f59..63f3256 100644 (file)
@@ -2263,6 +2263,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        /*
         *      Now we are ready to route packet.
         */
+       fl4.flowi4_l3mdev = 0;
        fl4.flowi4_oif = 0;
        fl4.flowi4_iif = dev->ifindex;
        fl4.flowi4_mark = skb->mark;
@@ -2738,8 +2739,7 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
                res->fi = NULL;
                res->table = NULL;
                if (fl4->flowi4_oif &&
-                   (ipv4_is_multicast(fl4->daddr) ||
-                   !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
+                   (ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) {
                        /* Apparently, routing tables are wrong. Assume,
                         * that the destination is on link.
                         *
index 1cae27b..ad80d18 100644 (file)
@@ -1271,6 +1271,13 @@ static struct ctl_table ipv4_net_table[] = {
                .proc_handler   = proc_dou8vec_minmax,
                .extra1         = SYSCTL_ONE,
        },
+       {
+               .procname       = "tcp_tso_rtt_log",
+               .data           = &init_net.ipv4.sysctl_tcp_tso_rtt_log,
+               .maxlen         = sizeof(u8),
+               .mode           = 0644,
+               .proc_handler   = proc_dou8vec_minmax,
+       },
        {
                .procname       = "tcp_min_rtt_wlen",
                .data           = &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
index 33f2013..cf18fbc 100644 (file)
@@ -688,7 +688,8 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
        return skb->len < size_goal &&
               sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
               !tcp_rtx_queue_empty(sk) &&
-              refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
+              refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&
+              tcp_skb_can_collapse_to(skb);
 }
 
 void tcp_push(struct sock *sk, int flags, int mss_now,
@@ -4434,10 +4435,10 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *ke
 EXPORT_SYMBOL(tcp_md5_hash_key);
 
 /* Called with rcu_read_lock() */
-bool tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
-                         enum skb_drop_reason *reason,
-                         const void *saddr, const void *daddr,
-                         int family, int dif, int sdif)
+enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+                    const void *saddr, const void *daddr,
+                    int family, int dif, int sdif)
 {
        /*
         * This gets called for each TCP segment that arrives
@@ -4464,18 +4465,16 @@ bool tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
 
        /* We've parsed the options - do we have a hash? */
        if (!hash_expected && !hash_location)
-               return false;
+               return SKB_NOT_DROPPED_YET;
 
        if (hash_expected && !hash_location) {
-               *reason = SKB_DROP_REASON_TCP_MD5NOTFOUND;
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
-               return true;
+               return SKB_DROP_REASON_TCP_MD5NOTFOUND;
        }
 
        if (!hash_expected && hash_location) {
-               *reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED;
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
-               return true;
+               return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
        }
 
        /* check the signature */
@@ -4483,7 +4482,6 @@ bool tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
                                                 NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
-               *reason = SKB_DROP_REASON_TCP_MD5FAILURE;
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
                if (family == AF_INET) {
                        net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
@@ -4497,9 +4495,9 @@ bool tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
                                        saddr, ntohs(th->source),
                                        daddr, ntohs(th->dest), l3index);
                }
-               return true;
+               return SKB_DROP_REASON_TCP_MD5FAILURE;
        }
-       return false;
+       return SKB_NOT_DROPPED_YET;
 }
 EXPORT_SYMBOL(tcp_inbound_md5_hash);
 
index db5831e..dc95572 100644 (file)
@@ -135,7 +135,6 @@ u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
 
        return key;
 }
-EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
 
 char *tcp_ca_get_name_by_key(u32 key, char *buffer)
 {
@@ -151,7 +150,6 @@ char *tcp_ca_get_name_by_key(u32 key, char *buffer)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
 
 /* Assign choice of congestion control. */
 void tcp_assign_congestion_control(struct sock *sk)
index 411357a..f9cec62 100644 (file)
@@ -1965,9 +1965,10 @@ process:
                struct sock *nsk;
 
                sk = req->rsk_listener;
-               if (unlikely(tcp_inbound_md5_hash(sk, skb, &drop_reason,
-                                                 &iph->saddr, &iph->daddr,
-                                                 AF_INET, dif, sdif))) {
+               drop_reason = tcp_inbound_md5_hash(sk, skb,
+                                                  &iph->saddr, &iph->daddr,
+                                                  AF_INET, dif, sdif);
+               if (unlikely(drop_reason)) {
                        sk_drops_add(sk, skb);
                        reqsk_put(req);
                        goto discard_it;
@@ -2041,8 +2042,9 @@ process:
                goto discard_and_relse;
        }
 
-       if (tcp_inbound_md5_hash(sk, skb, &drop_reason, &iph->saddr,
-                                &iph->daddr, AF_INET, dif, sdif))
+       drop_reason = tcp_inbound_md5_hash(sk, skb, &iph->saddr,
+                                          &iph->daddr, AF_INET, dif, sdif);
+       if (drop_reason)
                goto discard_and_relse;
 
        nf_reset_ct(skb);
@@ -3135,6 +3137,7 @@ static int __net_init tcp_sk_init(struct net *net)
        /* rfc5961 challenge ack rate limiting */
        net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
        net->ipv4.sysctl_tcp_min_tso_segs = 2;
+       net->ipv4.sysctl_tcp_tso_rtt_log = 9;  /* 2^9 = 512 usec */
        net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
        net->ipv4.sysctl_tcp_autocorking = 1;
        net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
index 2319531..81aaa7d 100644 (file)
@@ -1951,25 +1951,34 @@ static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
 }
 
 /* Return how many segs we'd like on a TSO packet,
- * to send one TSO packet per ms
+ * depending on current pacing rate, and how close the peer is.
+ *
+ * Rationale is:
+ * - For close peers, we rather send bigger packets to reduce
+ *   cpu costs, because occasional losses will be repaired fast.
+ * - For long distance/rtt flows, we would like to get ACK clocking
+ *   with 1 ACK per ms.
+ *
+ * Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
+ * in bigger TSO bursts. We we cut the RTT-based allowance in half
+ * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
+ * is below 1500 bytes after 6 * ~500 usec = 3ms.
  */
 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
                            int min_tso_segs)
 {
-       u32 bytes, segs;
+       unsigned long bytes;
+       u32 r;
 
-       bytes = min_t(unsigned long,
-                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
-                     sk->sk_gso_max_size);
+       bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift);
 
-       /* Goal is to send at least one packet per ms,
-        * not one big TSO packet every 100 ms.
-        * This preserves ACK clocking and is consistent
-        * with tcp_tso_should_defer() heuristic.
-        */
-       segs = max_t(u32, bytes / mss_now, min_tso_segs);
+       r = tcp_min_rtt(tcp_sk(sk)) >> sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log;
+       if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
+               bytes += sk->sk_gso_max_size >> r;
+
+       bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size);
 
-       return segs;
+       return max_t(u32, bytes / mss_now, min_tso_segs);
 }
 
 /* Return the number of segments we want in the skb we are transmitting.
index 9e83bcb..6fde0b1 100644 (file)
@@ -28,13 +28,11 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
        memset(fl4, 0, sizeof(*fl4));
        fl4->daddr = daddr->a4;
        fl4->flowi4_tos = tos;
-       fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
+       fl4->flowi4_l3mdev = l3mdev_master_ifindex_by_index(net, oif);
        fl4->flowi4_mark = mark;
        if (saddr)
                fl4->saddr = saddr->a4;
 
-       fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF;
-
        rt = __ip_route_output_key(net, fl4);
        if (!IS_ERR(rt))
                return &rt->dst;
index 7591160..55d604c 100644 (file)
@@ -482,6 +482,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
        struct page *page;
        struct sk_buff *trailer;
        int tailen = esp->tailen;
+       unsigned int allocsz;
 
        if (x->encap) {
                int err = esp6_output_encap(x, skb, esp);
@@ -490,6 +491,10 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                        return err;
        }
 
+       allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
+       if (allocsz > ESP_SKB_FRAG_MAXSIZE)
+               goto cow;
+
        if (!skb_cloned(skb)) {
                if (tailen <= skb_tailroom(skb)) {
                        nfrags = 1;
@@ -807,8 +812,7 @@ int esp6_input_done2(struct sk_buff *skb, int err)
                struct tcphdr *th;
 
                offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
-
-               if (offset < 0) {
+               if (offset == -1) {
                        err = -EINVAL;
                        goto out;
                }
index ba5e81c..3a29383 100644 (file)
@@ -145,8 +145,7 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
                                                struct sk_buff *skb,
                                                netdev_features_t features)
 {
-       __skb_push(skb, skb->mac_len);
-       return skb_mac_gso_segment(skb, features);
+       return skb_eth_gso_segment(skb, features, htons(ETH_P_IPV6));
 }
 
 static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
@@ -199,6 +198,9 @@ static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
                        ipv6_skip_exthdr(skb, 0, &proto, &frag);
        }
 
+       if (proto == IPPROTO_IPIP)
+               skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
+
        __skb_pull(skb, skb_transport_offset(skb));
        ops = rcu_dereference(inet6_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_segment))
index e69fac5..e23f058 100644 (file)
@@ -1035,8 +1035,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
 #ifdef CONFIG_IPV6_SUBTREES
            ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 #endif
-          (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
-             (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
+          (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
                dst_release(dst);
                dst = NULL;
        }
@@ -1476,8 +1475,8 @@ static int __ip6_append_data(struct sock *sk,
                      sizeof(struct frag_hdr) : 0) +
                     rt->rt6i_nfheader_len;
 
-       if (mtu < fragheaderlen ||
-           ((mtu - fragheaderlen) & ~7) + fragheaderlen < sizeof(struct frag_hdr))
+       if (mtu <= fragheaderlen ||
+           ((mtu - fragheaderlen) & ~7) + fragheaderlen <= sizeof(struct frag_hdr))
                goto emsgsize;
 
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
index 3a00d95..70a405b 100644 (file)
@@ -73,6 +73,7 @@ static const struct nft_expr_ops nft_dup_ipv6_ops = {
        .eval           = nft_dup_ipv6_eval,
        .init           = nft_dup_ipv6_init,
        .dump           = nft_dup_ipv6_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static const struct nla_policy nft_dup_ipv6_policy[NFTA_DUP_MAX + 1] = {
index 92f3235..b3f163b 100644 (file)
@@ -211,6 +211,7 @@ static const struct nft_expr_ops nft_fib6_type_ops = {
        .init           = nft_fib_init,
        .dump           = nft_fib_dump,
        .validate       = nft_fib_validate,
+       .reduce         = nft_fib_reduce,
 };
 
 static const struct nft_expr_ops nft_fib6_ops = {
@@ -220,6 +221,7 @@ static const struct nft_expr_ops nft_fib6_ops = {
        .init           = nft_fib_init,
        .dump           = nft_fib_dump,
        .validate       = nft_fib_validate,
+       .reduce         = nft_fib_reduce,
 };
 
 static const struct nft_expr_ops *
index ed69c76..5c61294 100644 (file)
@@ -46,6 +46,7 @@ static const struct nft_expr_ops nft_reject_ipv6_ops = {
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
        .validate       = nft_reject_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
index 6188712..2fa10e6 100644 (file)
@@ -1209,9 +1209,6 @@ INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
        struct fib6_node *fn;
        struct rt6_info *rt;
 
-       if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
-               flags &= ~RT6_LOOKUP_F_IFACE;
-
        rcu_read_lock();
        fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 restart:
@@ -2181,9 +2178,6 @@ int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
        fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
        saved_fn = fn;
 
-       if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
-               oif = 0;
-
 redo_rt6_select:
        rt6_select(net, fn, oif, res, strict);
        if (res->f6i == net->ipv6.fib6_null_entry) {
@@ -3058,12 +3052,6 @@ INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
        struct fib6_info *rt;
        struct fib6_node *fn;
 
-       /* l3mdev_update_flow overrides oif if the device is enslaved; in
-        * this case we must match on the real ingress device, so reset it
-        */
-       if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
-               fl6->flowi6_oif = skb->dev->ifindex;
-
        /* Get the "current" route for this destination and
         * check if the redirect has come from appropriate router.
         *
index cb2bb7d..13678d3 100644 (file)
@@ -1632,8 +1632,10 @@ process:
                struct sock *nsk;
 
                sk = req->rsk_listener;
-               if (tcp_inbound_md5_hash(sk, skb, &drop_reason, &hdr->saddr,
-                                        &hdr->daddr, AF_INET6, dif, sdif)) {
+               drop_reason = tcp_inbound_md5_hash(sk, skb,
+                                                  &hdr->saddr, &hdr->daddr,
+                                                  AF_INET6, dif, sdif);
+               if (drop_reason) {
                        sk_drops_add(sk, skb);
                        reqsk_put(req);
                        goto discard_it;
@@ -1704,8 +1706,9 @@ process:
                goto discard_and_relse;
        }
 
-       if (tcp_inbound_md5_hash(sk, skb, &drop_reason, &hdr->saddr,
-                                &hdr->daddr, AF_INET6, dif, sdif))
+       drop_reason = tcp_inbound_md5_hash(sk, skb, &hdr->saddr, &hdr->daddr,
+                                          AF_INET6, dif, sdif);
+       if (drop_reason)
                goto discard_and_relse;
 
        if (tcp_filter(sk, skb)) {
index d0d2800..ad07904 100644 (file)
@@ -45,6 +45,19 @@ static int __xfrm6_output_finish(struct net *net, struct sock *sk, struct sk_buf
        return xfrm_output(sk, skb);
 }
 
+static int xfrm6_noneed_fragment(struct sk_buff *skb)
+{
+       struct frag_hdr *fh;
+       u8 prevhdr = ipv6_hdr(skb)->nexthdr;
+
+       if (prevhdr != NEXTHDR_FRAGMENT)
+               return 0;
+       fh = (struct frag_hdr *)(skb->data + sizeof(struct ipv6hdr));
+       if (fh->nexthdr == NEXTHDR_ESP || fh->nexthdr == NEXTHDR_AUTH)
+               return 1;
+       return 0;
+}
+
 static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
@@ -73,6 +86,9 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                xfrm6_local_rxpmtu(skb, mtu);
                kfree_skb(skb);
                return -EMSGSIZE;
+       } else if (toobig && xfrm6_noneed_fragment(skb)) {
+               skb->ignore_df = 1;
+               goto skip_frag;
        } else if (!skb->ignore_df && toobig && skb->sk) {
                xfrm_local_error(skb, mtu);
                kfree_skb(skb);
index 55bb2cb..e64e427 100644 (file)
@@ -33,8 +33,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
        int err;
 
        memset(&fl6, 0, sizeof(fl6));
-       fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
-       fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
+       fl6.flowi6_l3mdev = l3mdev_master_ifindex_by_index(net, oif);
        fl6.flowi6_mark = mark;
        memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
        if (saddr)
index 9bf52a0..fd51db3 100644 (file)
@@ -1699,7 +1699,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
 
        xfrm_probe_algs();
 
-       supp_skb = compose_sadb_supported(hdr, GFP_KERNEL);
+       supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
        if (!supp_skb) {
                if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
                        pfk->registered &= ~(1<<hdr->sadb_msg_satype);
index 1792796..4eb8892 100644 (file)
@@ -250,25 +250,19 @@ int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
        struct net_device *dev;
        int rc = 0;
 
-       rcu_read_lock();
+       /* update flow ensures flowi_l3mdev is set when relevant */
+       if (!fl->flowi_l3mdev)
+               return 0;
 
-       dev = dev_get_by_index_rcu(net, fl->flowi_oif);
-       if (dev && netif_is_l3_master(dev) &&
-           dev->l3mdev_ops->l3mdev_fib_table) {
-               arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
-               rc = 1;
-               goto out;
-       }
+       rcu_read_lock();
 
-       dev = dev_get_by_index_rcu(net, fl->flowi_iif);
+       dev = dev_get_by_index_rcu(net, fl->flowi_l3mdev);
        if (dev && netif_is_l3_master(dev) &&
            dev->l3mdev_ops->l3mdev_fib_table) {
                arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
                rc = 1;
-               goto out;
        }
 
-out:
        rcu_read_unlock();
 
        return rc;
@@ -277,31 +271,28 @@ out:
 void l3mdev_update_flow(struct net *net, struct flowi *fl)
 {
        struct net_device *dev;
-       int ifindex;
 
        rcu_read_lock();
 
        if (fl->flowi_oif) {
                dev = dev_get_by_index_rcu(net, fl->flowi_oif);
                if (dev) {
-                       ifindex = l3mdev_master_ifindex_rcu(dev);
-                       if (ifindex) {
-                               fl->flowi_oif = ifindex;
-                               fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
-                               goto out;
-                       }
+                       if (!fl->flowi_l3mdev)
+                               fl->flowi_l3mdev = l3mdev_master_ifindex_rcu(dev);
+
+                       /* oif set to L3mdev directs lookup to its table;
+                        * reset to avoid oif match in fib_lookup
+                        */
+                       if (netif_is_l3_master(dev))
+                               fl->flowi_oif = 0;
+                       goto out;
                }
        }
 
-       if (fl->flowi_iif) {
+       if (fl->flowi_iif > LOOPBACK_IFINDEX && !fl->flowi_l3mdev) {
                dev = dev_get_by_index_rcu(net, fl->flowi_iif);
-               if (dev) {
-                       ifindex = l3mdev_master_ifindex_rcu(dev);
-                       if (ifindex) {
-                               fl->flowi_iif = ifindex;
-                               fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
-                       }
-               }
+               if (dev)
+                       fl->flowi_l3mdev = l3mdev_master_ifindex_rcu(dev);
        }
 
 out:
index 23d25e8..af1df3a 100644 (file)
@@ -34,7 +34,8 @@ mac80211-y := \
        trace.o mlme.o \
        tdls.o \
        ocb.o \
-       airtime.o
+       airtime.o \
+       eht.o
 
 mac80211-$(CONFIG_MAC80211_LEDS) += led.o
 mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
index 7d2925b..218cdc5 100644 (file)
@@ -180,7 +180,8 @@ static void sta_rx_agg_reorder_timer_expired(struct timer_list *t)
 
 static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata,
                                   struct sk_buff *skb,
-                                  const struct ieee80211_addba_ext_ie *req)
+                                  const struct ieee80211_addba_ext_ie *req,
+                                  u16 buf_size)
 {
        struct ieee80211_supported_band *sband;
        struct ieee80211_addba_ext_ie *resp;
@@ -210,6 +211,8 @@ static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata,
                frag_level = cap_frag_level;
        resp->data |= u8_encode_bits(frag_level,
                                     IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK);
+       resp->data |= u8_encode_bits(buf_size >> IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT,
+                                    IEEE80211_ADDBA_EXT_BUF_SIZE_MASK);
 }
 
 static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid,
@@ -261,7 +264,7 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid,
        mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
 
        if (sta->sta.he_cap.has_he && addbaext)
-               ieee80211_add_addbaext(sdata, skb, addbaext);
+               ieee80211_add_addbaext(sdata, skb, addbaext, buf_size);
 
        ieee80211_tx_skb(sdata, skb);
 }
@@ -309,8 +312,10 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
                goto end;
        }
 
-       if (sta->sta.he_cap.has_he)
-               max_buf_size = IEEE80211_MAX_AMPDU_BUF;
+       if (sta->sta.eht_cap.has_eht)
+               max_buf_size = IEEE80211_MAX_AMPDU_BUF_EHT;
+       else if (sta->sta.he_cap.has_he)
+               max_buf_size = IEEE80211_MAX_AMPDU_BUF_HE;
        else
                max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
 
@@ -502,6 +507,13 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
                        goto free;
        }
 
+       if (sta->sta.eht_cap.has_eht && elems && elems->addba_ext_ie) {
+               u8 buf_size_1k = u8_get_bits(elems->addba_ext_ie->data,
+                                            IEEE80211_ADDBA_EXT_BUF_SIZE_MASK);
+
+               buf_size |= buf_size_1k << IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT;
+       }
+
        __ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
                                        start_seq_num, ba_policy, tid,
                                        buf_size, true, false,
index 64e8ce9..2619e12 100644 (file)
@@ -472,7 +472,9 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
                bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
                bool cck;
 
-               if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
+               /* on 60GHz or sub-1GHz band, there are no legacy rates */
+               if (WARN_ON_ONCE(status->band == NL80211_BAND_60GHZ ||
+                                status->band == NL80211_BAND_S1GHZ))
                        return 0;
 
                sband = hw->wiphy->bands[status->band];
index 87a2080..ba75253 100644 (file)
@@ -989,11 +989,29 @@ static int ieee80211_set_ftm_responder_params(
        return 0;
 }
 
+static int
+ieee80211_copy_mbssid_beacon(u8 *pos, struct cfg80211_mbssid_elems *dst,
+                            struct cfg80211_mbssid_elems *src)
+{
+       int i, offset = 0;
+
+       for (i = 0; i < src->cnt; i++) {
+               memcpy(pos + offset, src->elem[i].data, src->elem[i].len);
+               dst->elem[i].len = src->elem[i].len;
+               dst->elem[i].data = pos + offset;
+               offset += dst->elem[i].len;
+       }
+       dst->cnt = src->cnt;
+
+       return offset;
+}
+
 static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
                                   struct cfg80211_beacon_data *params,
                                   const struct ieee80211_csa_settings *csa,
                                   const struct ieee80211_color_change_settings *cca)
 {
+       struct cfg80211_mbssid_elems *mbssid = NULL;
        struct beacon_data *new, *old;
        int new_head_len, new_tail_len;
        int size, err;
@@ -1021,6 +1039,17 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
 
        size = sizeof(*new) + new_head_len + new_tail_len;
 
+       /* new or old multiple BSSID elements? */
+       if (params->mbssid_ies) {
+               mbssid = params->mbssid_ies;
+               size += struct_size(new->mbssid_ies, elem, mbssid->cnt);
+               size += ieee80211_get_mbssid_beacon_len(mbssid);
+       } else if (old && old->mbssid_ies) {
+               mbssid = old->mbssid_ies;
+               size += struct_size(new->mbssid_ies, elem, mbssid->cnt);
+               size += ieee80211_get_mbssid_beacon_len(mbssid);
+       }
+
        new = kzalloc(size, GFP_KERNEL);
        if (!new)
                return -ENOMEM;
@@ -1029,12 +1058,23 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
 
        /*
         * pointers go into the block we allocated,
-        * memory is | beacon_data | head | tail |
+        * memory is | beacon_data | head | tail | mbssid_ies
         */
        new->head = ((u8 *) new) + sizeof(*new);
        new->tail = new->head + new_head_len;
        new->head_len = new_head_len;
        new->tail_len = new_tail_len;
+       /* copy in optional mbssid_ies */
+       if (mbssid) {
+               u8 *pos = new->tail + new->tail_len;
+
+               new->mbssid_ies = (void *)pos;
+               pos += struct_size(new->mbssid_ies, elem, mbssid->cnt);
+               ieee80211_copy_mbssid_beacon(pos, new->mbssid_ies, mbssid);
+               /* update bssid_indicator */
+               sdata->vif.bss_conf.bssid_indicator =
+                       ilog2(__roundup_pow_of_two(mbssid->cnt + 1));
+       }
 
        if (csa) {
                new->cntdwn_current_counter = csa->count;
@@ -1332,8 +1372,11 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
 
        mutex_unlock(&local->mtx);
 
-       kfree(sdata->u.ap.next_beacon);
-       sdata->u.ap.next_beacon = NULL;
+       if (sdata->u.ap.next_beacon) {
+               kfree(sdata->u.ap.next_beacon->mbssid_ies);
+               kfree(sdata->u.ap.next_beacon);
+               sdata->u.ap.next_beacon = NULL;
+       }
 
        /* turn off carrier for this interface and dependent VLANs */
        list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
@@ -1716,6 +1759,14 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                                                  (void *)params->he_6ghz_capa,
                                                  sta);
 
+       if (params->eht_capa)
+               ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband,
+                                                   (u8 *)params->he_capa,
+                                                   params->he_capa_len,
+                                                   params->eht_capa,
+                                                   params->eht_capa_len,
+                                                   sta);
+
        if (params->opmode_notif_used) {
                /* returned value is only needed for rc update, but the
                 * rc isn't initialized here yet, so ignore it
@@ -2148,14 +2199,12 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
                const struct mesh_setup *setup)
 {
        u8 *new_ie;
-       const u8 *old_ie;
        struct ieee80211_sub_if_data *sdata = container_of(ifmsh,
                                        struct ieee80211_sub_if_data, u.mesh);
        int i;
 
        /* allocate information elements */
        new_ie = NULL;
-       old_ie = ifmsh->ie;
 
        if (setup->ie_len) {
                new_ie = kmemdup(setup->ie, setup->ie_len,
@@ -2165,7 +2214,6 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
        }
        ifmsh->ie_len = setup->ie_len;
        ifmsh->ie = new_ie;
-       kfree(old_ie);
 
        /* now copy the rest of the setup parameters */
        ifmsh->mesh_id_len = setup->mesh_id_len;
@@ -3130,12 +3178,24 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
 
        len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len +
              beacon->proberesp_ies_len + beacon->assocresp_ies_len +
-             beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len;
+             beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len +
+             ieee80211_get_mbssid_beacon_len(beacon->mbssid_ies);
 
        new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL);
        if (!new_beacon)
                return NULL;
 
+       if (beacon->mbssid_ies && beacon->mbssid_ies->cnt) {
+               new_beacon->mbssid_ies =
+                       kzalloc(struct_size(new_beacon->mbssid_ies,
+                                           elem, beacon->mbssid_ies->cnt),
+                               GFP_KERNEL);
+               if (!new_beacon->mbssid_ies) {
+                       kfree(new_beacon);
+                       return NULL;
+               }
+       }
+
        pos = (u8 *)(new_beacon + 1);
        if (beacon->head_len) {
                new_beacon->head_len = beacon->head_len;
@@ -3173,6 +3233,10 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
                memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
                pos += beacon->probe_resp_len;
        }
+       if (beacon->mbssid_ies && beacon->mbssid_ies->cnt)
+               pos += ieee80211_copy_mbssid_beacon(pos,
+                                                   new_beacon->mbssid_ies,
+                                                   beacon->mbssid_ies);
 
        /* might copy -1, meaning no changes requested */
        new_beacon->ftm_responder = beacon->ftm_responder;
@@ -3195,9 +3259,31 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
 void ieee80211_csa_finish(struct ieee80211_vif *vif)
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+       struct ieee80211_local *local = sdata->local;
 
-       ieee80211_queue_work(&sdata->local->hw,
-                            &sdata->csa_finalize_work);
+       rcu_read_lock();
+
+       if (vif->mbssid_tx_vif == vif) {
+               /* Trigger ieee80211_csa_finish() on the non-transmitting
+                * interfaces when channel switch is received on
+                * transmitting interface
+                */
+               struct ieee80211_sub_if_data *iter;
+
+               list_for_each_entry_rcu(iter, &local->interfaces, list) {
+                       if (!ieee80211_sdata_running(iter))
+                               continue;
+
+                       if (iter == sdata || iter->vif.mbssid_tx_vif != vif)
+                               continue;
+
+                       ieee80211_queue_work(&iter->local->hw,
+                                            &iter->csa_finalize_work);
+               }
+       }
+       ieee80211_queue_work(&local->hw, &sdata->csa_finalize_work);
+
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_csa_finish);
 
@@ -3222,8 +3308,11 @@ static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
        case NL80211_IFTYPE_AP:
                err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon,
                                              NULL, NULL);
-               kfree(sdata->u.ap.next_beacon);
-               sdata->u.ap.next_beacon = NULL;
+               if (sdata->u.ap.next_beacon) {
+                       kfree(sdata->u.ap.next_beacon->mbssid_ies);
+                       kfree(sdata->u.ap.next_beacon);
+                       sdata->u.ap.next_beacon = NULL;
+               }
 
                if (err < 0)
                        return err;
@@ -3378,8 +3467,12 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
                if ((params->n_counter_offsets_beacon >
                     IEEE80211_MAX_CNTDWN_COUNTERS_NUM) ||
                    (params->n_counter_offsets_presp >
-                    IEEE80211_MAX_CNTDWN_COUNTERS_NUM))
+                    IEEE80211_MAX_CNTDWN_COUNTERS_NUM)) {
+                       kfree(sdata->u.ap.next_beacon->mbssid_ies);
+                       kfree(sdata->u.ap.next_beacon);
+                       sdata->u.ap.next_beacon = NULL;
                        return -EINVAL;
+               }
 
                csa.counter_offsets_beacon = params->counter_offsets_beacon;
                csa.counter_offsets_presp = params->counter_offsets_presp;
@@ -3389,7 +3482,9 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
 
                err = ieee80211_assign_beacon(sdata, &params->beacon_csa, &csa, NULL);
                if (err < 0) {
+                       kfree(sdata->u.ap.next_beacon->mbssid_ies);
                        kfree(sdata->u.ap.next_beacon);
+                       sdata->u.ap.next_beacon = NULL;
                        return err;
                }
                *changed |= err;
@@ -3479,8 +3574,11 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
 static void ieee80211_color_change_abort(struct ieee80211_sub_if_data  *sdata)
 {
        sdata->vif.color_change_active = false;
-       kfree(sdata->u.ap.next_beacon);
-       sdata->u.ap.next_beacon = NULL;
+       if (sdata->u.ap.next_beacon) {
+               kfree(sdata->u.ap.next_beacon->mbssid_ies);
+               kfree(sdata->u.ap.next_beacon);
+               sdata->u.ap.next_beacon = NULL;
+       }
 
        cfg80211_color_change_aborted_notify(sdata->dev);
 }
@@ -4218,8 +4316,11 @@ ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata,
 
                ret = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon,
                                              NULL, NULL);
-               kfree(sdata->u.ap.next_beacon);
-               sdata->u.ap.next_beacon = NULL;
+               if (sdata->u.ap.next_beacon) {
+                       kfree(sdata->u.ap.next_beacon->mbssid_ies);
+                       kfree(sdata->u.ap.next_beacon);
+                       sdata->u.ap.next_beacon = NULL;
+               }
 
                if (ret < 0)
                        return ret;
@@ -4262,7 +4363,11 @@ ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata,
                err = ieee80211_assign_beacon(sdata, &params->beacon_color_change,
                                              NULL, &color_change);
                if (err < 0) {
-                       kfree(sdata->u.ap.next_beacon);
+                       if (sdata->u.ap.next_beacon) {
+                               kfree(sdata->u.ap.next_beacon->mbssid_ies);
+                               kfree(sdata->u.ap.next_beacon);
+                               sdata->u.ap.next_beacon = NULL;
+                       }
                        return err;
                }
                *changed |= err;
index 76fc36a..e26d42d 100644 (file)
@@ -218,6 +218,8 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta)
                 * might be smaller than the configured bw (160).
                 */
                return NL80211_CHAN_WIDTH_160;
+       case IEEE80211_STA_RX_BW_320:
+               return NL80211_CHAN_WIDTH_320;
        default:
                WARN_ON(1);
                return NL80211_CHAN_WIDTH_20;
@@ -417,7 +419,7 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
 {
        u32 changed;
 
-       /* expected to handle only 20/40/80/160 channel widths */
+       /* expected to handle only 20/40/80/160/320 channel widths */
        switch (chandef->width) {
        case NL80211_CHAN_WIDTH_20_NOHT:
        case NL80211_CHAN_WIDTH_20:
@@ -425,6 +427,7 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
        case NL80211_CHAN_WIDTH_80:
        case NL80211_CHAN_WIDTH_80P80:
        case NL80211_CHAN_WIDTH_160:
+       case NL80211_CHAN_WIDTH_320:
                break;
        default:
                WARN_ON(1);
diff --git a/net/mac80211/eht.c b/net/mac80211/eht.c
new file mode 100644 (file)
index 0000000..364ad0e
--- /dev/null
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * EHT handling
+ *
+ * Copyright(c) 2021-2022 Intel Corporation
+ */
+
+#include "ieee80211_i.h"
+
+void
+ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
+                                   struct ieee80211_supported_band *sband,
+                                   const u8 *he_cap_ie, u8 he_cap_len,
+                                   const struct ieee80211_eht_cap_elem *eht_cap_ie_elem,
+                                   u8 eht_cap_len, struct sta_info *sta)
+{
+       struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.eht_cap;
+       struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
+       u8 eht_ppe_size = 0;
+       u8 mcs_nss_size;
+       u8 eht_total_size = sizeof(eht_cap->eht_cap_elem);
+       u8 *pos = (u8 *)eht_cap_ie_elem;
+
+       memset(eht_cap, 0, sizeof(*eht_cap));
+
+       if (!eht_cap_ie_elem ||
+           !ieee80211_get_eht_iftype_cap(sband,
+                                        ieee80211_vif_type_p2p(&sdata->vif)))
+               return;
+
+       mcs_nss_size = ieee80211_eht_mcs_nss_size(he_cap_ie_elem,
+                                                 &eht_cap_ie_elem->fixed);
+
+       eht_total_size += mcs_nss_size;
+
+       /* Calculate the PPE thresholds length only if the header is present */
+       if (eht_cap_ie_elem->fixed.phy_cap_info[5] &
+                       IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+               u16 eht_ppe_hdr;
+
+               if (eht_cap_len < eht_total_size + sizeof(u16))
+                       return;
+
+               eht_ppe_hdr = get_unaligned_le16(eht_cap_ie_elem->optional + mcs_nss_size);
+               eht_ppe_size =
+                       ieee80211_eht_ppe_size(eht_ppe_hdr,
+                                              eht_cap_ie_elem->fixed.phy_cap_info);
+               eht_total_size += eht_ppe_size;
+
+               /* we calculate as if NSS > 8 are valid, but don't handle that */
+               if (eht_ppe_size > sizeof(eht_cap->eht_ppe_thres))
+                       return;
+       }
+
+       if (eht_cap_len < eht_total_size)
+               return;
+
+       /* Copy the static portion of the EHT capabilities */
+       memcpy(&eht_cap->eht_cap_elem, pos, sizeof(eht_cap->eht_cap_elem));
+       pos += sizeof(eht_cap->eht_cap_elem);
+
+       /* Copy MCS/NSS which depends on the peer capabilities */
+       memset(&eht_cap->eht_mcs_nss_supp, 0,
+              sizeof(eht_cap->eht_mcs_nss_supp));
+       memcpy(&eht_cap->eht_mcs_nss_supp, pos, mcs_nss_size);
+
+       if (eht_ppe_size)
+               memcpy(eht_cap->eht_ppe_thres,
+                      &eht_cap_ie_elem->optional[mcs_nss_size],
+                      eht_ppe_size);
+
+       eht_cap->has_eht = true;
+
+       sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta);
+       sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+}
index 95aaf00..d4a7ba4 100644 (file)
@@ -257,6 +257,7 @@ struct beacon_data {
        struct ieee80211_meshconf_ie *meshconf;
        u16 cntdwn_counter_offsets[IEEE80211_MAX_CNTDWN_COUNTERS_NUM];
        u8 cntdwn_current_counter;
+       struct cfg80211_mbssid_elems *mbssid_ies;
        struct rcu_head rcu_head;
 };
 
@@ -366,6 +367,8 @@ enum ieee80211_sta_flags {
        IEEE80211_STA_DISABLE_WMM       = BIT(14),
        IEEE80211_STA_ENABLE_RRM        = BIT(15),
        IEEE80211_STA_DISABLE_HE        = BIT(16),
+       IEEE80211_STA_DISABLE_EHT       = BIT(17),
+       IEEE80211_STA_DISABLE_320MHZ    = BIT(18),
 };
 
 struct ieee80211_mgd_auth_data {
@@ -765,6 +768,8 @@ struct ieee80211_if_mesh {
  *     back to wireless media and to the local net stack.
  * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
  * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
+ * @IEEE80211_SDATA_DISCONNECT_HW_RESTART: Disconnect after hardware restart
+ *  recovery
  */
 enum ieee80211_sub_if_data_flags {
        IEEE80211_SDATA_ALLMULTI                = BIT(0),
@@ -772,6 +777,7 @@ enum ieee80211_sub_if_data_flags {
        IEEE80211_SDATA_DONT_BRIDGE_PACKETS     = BIT(3),
        IEEE80211_SDATA_DISCONNECT_RESUME       = BIT(4),
        IEEE80211_SDATA_IN_DRIVER               = BIT(5),
+       IEEE80211_SDATA_DISCONNECT_HW_RESTART   = BIT(6),
 };
 
 /**
@@ -1078,6 +1084,20 @@ ieee80211_vif_get_shift(struct ieee80211_vif *vif)
        return shift;
 }
 
+static inline int
+ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems)
+{
+       int i, len = 0;
+
+       if (!elems)
+               return 0;
+
+       for (i = 0; i < elems->cnt; i++)
+               len += elems->elem[i].len;
+
+       return len;
+}
+
 enum {
        IEEE80211_RX_MSG        = 1,
        IEEE80211_TX_STATUS_MSG = 2,
@@ -1587,6 +1607,8 @@ struct ieee802_11_elems {
        const struct ieee80211_s1g_oper_ie *s1g_oper;
        const struct ieee80211_s1g_bcn_compat_ie *s1g_bcn_compat;
        const struct ieee80211_aid_response_ie *aid_resp;
+       const struct ieee80211_eht_cap_elem *eht_cap;
+       const struct ieee80211_eht_operation *eht_operation;
 
        /* length of them, respectively */
        u8 ext_capab_len;
@@ -1608,6 +1630,7 @@ struct ieee802_11_elems {
        u8 bssid_index_len;
        u8 tx_pwr_env_len[IEEE80211_TPE_MAX_IE_COUNT];
        u8 tx_pwr_env_num;
+       u8 eht_cap_len;
 
        /* whether a parse error occurred while retrieving these elements */
        bool parse_error;
@@ -2411,6 +2434,7 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
                                struct cfg80211_chan_def *chandef);
 bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
                                    const struct ieee80211_he_operation *he_oper,
+                                   const struct ieee80211_eht_operation *eht_oper,
                                    struct cfg80211_chan_def *chandef);
 bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
                                struct cfg80211_chan_def *chandef);
@@ -2514,4 +2538,16 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
 
+u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
+u8 *ieee80211_ie_build_eht_cap(u8 *pos,
+                              const struct ieee80211_sta_he_cap *he_cap,
+                              const struct ieee80211_sta_eht_cap *eht_cap,
+                              u8 *end);
+
+void
+ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
+                                   struct ieee80211_supported_band *sband,
+                                   const u8 *he_cap_ie, u8 he_cap_len,
+                                   const struct ieee80211_eht_cap_elem *eht_cap_ie_elem,
+                                   u8 eht_cap_len, struct sta_info *sta);
 #endif /* IEEE80211_I_H */
index 5311c3c..a48a32f 100644 (file)
@@ -909,7 +909,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        int result, i;
        enum nl80211_band band;
        int channels, max_bitrates;
-       bool supp_ht, supp_vht, supp_he;
+       bool supp_ht, supp_vht, supp_he, supp_eht;
        struct cfg80211_chan_def dflt_chandef = {};
 
        if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
@@ -978,6 +978,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        supp_ht = false;
        supp_vht = false;
        supp_he = false;
+       supp_eht = false;
        for (band = 0; band < NUM_NL80211_BANDS; band++) {
                struct ieee80211_supported_band *sband;
 
@@ -1021,6 +1022,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                        iftd = &sband->iftype_data[i];
 
                        supp_he = supp_he || iftd->he_cap.has_he;
+                       supp_eht = supp_eht || iftd->eht_cap.has_eht;
                }
 
                /* HT, VHT, HE require QoS, thus >= 4 queues */
@@ -1028,6 +1030,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                            (supp_ht || supp_vht || supp_he)))
                        return -EINVAL;
 
+               /* EHT requires HE support */
+               if (WARN_ON(supp_eht && !supp_he))
+                       return -EINVAL;
+
                if (!sband->ht_cap.ht_supported)
                        continue;
 
@@ -1138,6 +1144,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                        3 + sizeof(struct ieee80211_he_cap_elem) +
                        sizeof(struct ieee80211_he_mcs_nss_supp) +
                        IEEE80211_HE_PPE_THRES_MAX_LEN;
+
+               if (supp_eht)
+                       local->scan_ies_len +=
+                               3 + sizeof(struct ieee80211_eht_cap_elem) +
+                               sizeof(struct ieee80211_eht_mcs_nss_supp) +
+                               IEEE80211_EHT_PPE_THRES_MAX_LEN;
        }
 
        if (!local->ops->hw_scan) {
index 6847fdf..5275f4f 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
  * Authors:    Luis Carlos Cobo <luisca@cozybit.com>
  *            Javier Cardona <javier@cozybit.com>
  */
@@ -104,7 +104,8 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
        ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
                                   ie->vht_operation, ie->ht_operation,
                                   &sta_chan_def);
-       ieee80211_chandef_he_6ghz_oper(sdata, ie->he_operation, &sta_chan_def);
+       ieee80211_chandef_he_6ghz_oper(sdata, ie->he_operation, NULL,
+                                      &sta_chan_def);
 
        if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
                                         &sta_chan_def))
@@ -852,7 +853,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
 
        bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL);
        /* need an skb for IE builders to operate on */
-       skb = dev_alloc_skb(max(head_len, tail_len));
+       skb = __dev_alloc_skb(max(head_len, tail_len), GFP_KERNEL);
 
        if (!bcn || !skb)
                goto out_free;
index 950be0f..1b30c72 100644 (file)
@@ -150,6 +150,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                             const struct ieee80211_ht_operation *ht_oper,
                             const struct ieee80211_vht_operation *vht_oper,
                             const struct ieee80211_he_operation *he_oper,
+                            const struct ieee80211_eht_operation *eht_oper,
                             const struct ieee80211_s1g_oper_ie *s1g_oper,
                             struct cfg80211_chan_def *chandef, bool tracking)
 {
@@ -165,12 +166,14 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
        chandef->freq1_offset = channel->freq_offset;
 
        if (channel->band == NL80211_BAND_6GHZ) {
-               if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, chandef)) {
+               if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, eht_oper,
+                                                   chandef)) {
                        mlme_dbg(sdata,
-                                "bad 6 GHz operation, disabling HT/VHT/HE\n");
+                                "bad 6 GHz operation, disabling HT/VHT/HE/EHT\n");
                        ret = IEEE80211_STA_DISABLE_HT |
                              IEEE80211_STA_DISABLE_VHT |
-                             IEEE80211_STA_DISABLE_HE;
+                             IEEE80211_STA_DISABLE_HE |
+                             IEEE80211_STA_DISABLE_EHT;
                } else {
                        ret = 0;
                }
@@ -197,7 +200,8 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                mlme_dbg(sdata, "HT operation missing / HT not supported\n");
                ret = IEEE80211_STA_DISABLE_HT |
                      IEEE80211_STA_DISABLE_VHT |
-                     IEEE80211_STA_DISABLE_HE;
+                     IEEE80211_STA_DISABLE_HE |
+                     IEEE80211_STA_DISABLE_EHT;
                goto out;
        }
 
@@ -220,7 +224,8 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                           ht_oper->primary_chan, channel->band);
                ret = IEEE80211_STA_DISABLE_HT |
                      IEEE80211_STA_DISABLE_VHT |
-                     IEEE80211_STA_DISABLE_HE;
+                     IEEE80211_STA_DISABLE_HE |
+                     IEEE80211_STA_DISABLE_EHT;
                goto out;
        }
 
@@ -261,7 +266,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                        if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
                                sdata_info(sdata,
                                           "HE AP VHT information is invalid, disabling HE\n");
-                       ret = IEEE80211_STA_DISABLE_HE;
+                       ret = IEEE80211_STA_DISABLE_HE | IEEE80211_STA_DISABLE_EHT;
                        goto out;
                }
        } else if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
@@ -341,7 +346,8 @@ out:
                if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
                        ret = IEEE80211_STA_DISABLE_HT |
                              IEEE80211_STA_DISABLE_VHT |
-                             IEEE80211_STA_DISABLE_HE;
+                             IEEE80211_STA_DISABLE_HE |
+                             IEEE80211_STA_DISABLE_EHT;
                        break;
                }
 
@@ -350,7 +356,11 @@ out:
 
        if (!he_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
                                                 IEEE80211_CHAN_NO_HE))
-               ret |= IEEE80211_STA_DISABLE_HE;
+               ret |= IEEE80211_STA_DISABLE_HE | IEEE80211_STA_DISABLE_EHT;
+
+       if (!eht_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
+                                                 IEEE80211_CHAN_NO_EHT))
+               ret |= IEEE80211_STA_DISABLE_EHT;
 
        if (chandef->width != vht_chandef.width && !tracking)
                sdata_info(sdata,
@@ -367,6 +377,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
                               const struct ieee80211_ht_operation *ht_oper,
                               const struct ieee80211_vht_operation *vht_oper,
                               const struct ieee80211_he_operation *he_oper,
+                              const struct ieee80211_eht_operation *eht_oper,
                               const struct ieee80211_s1g_oper_ie *s1g_oper,
                               const u8 *bssid, u32 *changed)
 {
@@ -392,9 +403,16 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
        /* don't check HE if we associated as non-HE station */
        if (ifmgd->flags & IEEE80211_STA_DISABLE_HE ||
            !ieee80211_get_he_iftype_cap(sband,
-                                        ieee80211_vif_type_p2p(&sdata->vif)))
-
+                                        ieee80211_vif_type_p2p(&sdata->vif))) {
                he_oper = NULL;
+               eht_oper = NULL;
+       }
+
+       /* don't check EHT if we associated as non-EHT station */
+       if (ifmgd->flags & IEEE80211_STA_DISABLE_EHT ||
+           !ieee80211_get_eht_iftype_cap(sband,
+                                        ieee80211_vif_type_p2p(&sdata->vif)))
+               eht_oper = NULL;
 
        if (WARN_ON_ONCE(!sta))
                return -EINVAL;
@@ -414,7 +432,8 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
 
        /* calculate new channel (type) based on HT/VHT/HE operation IEs */
        flags = ieee80211_determine_chantype(sdata, sband, chan, vht_cap_info,
-                                            ht_oper, vht_oper, he_oper,
+                                            ht_oper, vht_oper,
+                                            he_oper, eht_oper,
                                             s1g_oper, &chandef, true);
 
        /*
@@ -448,9 +467,11 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
        if (flags != (ifmgd->flags & (IEEE80211_STA_DISABLE_HT |
                                      IEEE80211_STA_DISABLE_VHT |
                                      IEEE80211_STA_DISABLE_HE |
+                                     IEEE80211_STA_DISABLE_EHT |
                                      IEEE80211_STA_DISABLE_40MHZ |
                                      IEEE80211_STA_DISABLE_80P80MHZ |
-                                     IEEE80211_STA_DISABLE_160MHZ)) ||
+                                     IEEE80211_STA_DISABLE_160MHZ |
+                                     IEEE80211_STA_DISABLE_320MHZ)) ||
            !cfg80211_chandef_valid(&chandef)) {
                sdata_info(sdata,
                           "AP %pM changed caps/bw in a way we can't support (0x%x/0x%x) - disconnect\n",
@@ -672,6 +693,48 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
        ieee80211_ie_build_he_6ghz_cap(sdata, skb);
 }
 
+static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata,
+                                struct sk_buff *skb,
+                                struct ieee80211_supported_band *sband)
+{
+       u8 *pos;
+       const struct ieee80211_sta_he_cap *he_cap;
+       const struct ieee80211_sta_eht_cap *eht_cap;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       u8 eht_cap_size;
+       bool reg_cap = false;
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+       if (!WARN_ON_ONCE(!chanctx_conf))
+               reg_cap = cfg80211_chandef_usable(sdata->wdev.wiphy,
+                                                 &chanctx_conf->def,
+                                                 IEEE80211_CHAN_NO_HE |
+                                                 IEEE80211_CHAN_NO_EHT);
+       rcu_read_unlock();
+
+       he_cap = ieee80211_get_he_iftype_cap(sband,
+                                            ieee80211_vif_type_p2p(&sdata->vif));
+       eht_cap = ieee80211_get_eht_iftype_cap(sband,
+                                              ieee80211_vif_type_p2p(&sdata->vif));
+
+       /*
+        * EHT capabilities element is only added if the HE capabilities element
+        * was added so assume that 'he_cap' is valid and don't check it.
+        */
+       if (WARN_ON(!he_cap || !eht_cap || !reg_cap))
+               return;
+
+       eht_cap_size =
+               2 + 1 + sizeof(eht_cap->eht_cap_elem) +
+               ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+                                          &eht_cap->eht_cap_elem) +
+               ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
+                                      eht_cap->eht_cap_elem.phy_cap_info);
+       pos = skb_put(skb, eht_cap_size);
+       ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size);
+}
+
 static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
@@ -992,17 +1055,22 @@ skip_rates:
                                     &assoc_data->ap_vht_cap);
 
        /*
-        * If AP doesn't support HT, mark HE as disabled.
+        * If AP doesn't support HT, mark HE and EHT as disabled.
         * If on the 5GHz band, make sure it supports VHT.
         */
        if (ifmgd->flags & IEEE80211_STA_DISABLE_HT ||
            (sband->band == NL80211_BAND_5GHZ &&
             ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
-               ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_HE |
+                               IEEE80211_STA_DISABLE_EHT;
 
-       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) {
                ieee80211_add_he_ie(sdata, skb, sband);
 
+               if (!(ifmgd->flags & IEEE80211_STA_DISABLE_EHT))
+                       ieee80211_add_eht_ie(sdata, skb, sband);
+       }
+
        /* if present, add any custom non-vendor IEs that go after HE */
        if (assoc_data->ie_len) {
                noffset = ieee80211_ie_split_vendor(assoc_data->ie,
@@ -3529,10 +3597,25 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
                        bss_conf->twt_protected = false;
 
                changed |= ieee80211_recalc_twt_req(sdata, sta, elems);
+
+               if (elems->eht_operation && elems->eht_cap &&
+                   !(ifmgd->flags & IEEE80211_STA_DISABLE_EHT)) {
+                       ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband,
+                                                           elems->he_cap,
+                                                           elems->he_cap_len,
+                                                           elems->eht_cap,
+                                                           elems->eht_cap_len,
+                                                           sta);
+
+                       bss_conf->eht_support = sta->sta.eht_cap.has_eht;
+               } else {
+                       bss_conf->eht_support = false;
+               }
        } else {
                bss_conf->he_support = false;
                bss_conf->twt_requester = false;
                bss_conf->twt_protected = false;
+               bss_conf->eht_support = false;
        }
 
        bss_conf->twt_broadcast =
@@ -4276,6 +4359,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        if (ieee80211_config_bw(sdata, sta, elems->ht_cap_elem,
                                elems->vht_cap_elem, elems->ht_operation,
                                elems->vht_operation, elems->he_operation,
+                               elems->eht_operation,
                                elems->s1g_oper, bssid, &changed)) {
                mutex_unlock(&local->sta_mtx);
                sdata_info(sdata,
@@ -4850,6 +4934,7 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
 
        sdata_unlock(sdata);
 }
+#endif
 
 void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
 {
@@ -4871,9 +4956,20 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
                sdata_unlock(sdata);
                return;
        }
+
+       if (sdata->flags & IEEE80211_SDATA_DISCONNECT_HW_RESTART) {
+               sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_HW_RESTART;
+               mlme_dbg(sdata, "driver requested disconnect after hardware restart\n");
+               ieee80211_sta_connection_lost(sdata,
+                                             ifmgd->associated->bssid,
+                                             WLAN_REASON_UNSPECIFIED,
+                                             true);
+               sdata_unlock(sdata);
+               return;
+       }
+
        sdata_unlock(sdata);
 }
-#endif
 
 /* interface setup */
 void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
@@ -5206,6 +5302,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
        const struct ieee80211_ht_operation *ht_oper = NULL;
        const struct ieee80211_vht_operation *vht_oper = NULL;
        const struct ieee80211_he_operation *he_oper = NULL;
+       const struct ieee80211_eht_operation *eht_oper = NULL;
        const struct ieee80211_s1g_oper_ie *s1g_oper = NULL;
        struct ieee80211_supported_band *sband;
        struct cfg80211_chan_def chandef;
@@ -5236,22 +5333,31 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
 
        /* disable HT/VHT/HE if we don't support them */
        if (!sband->ht_cap.ht_supported && !is_6ghz) {
-               mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE\n");
+               mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE/EHT\n");
                ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
                ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
                ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
        }
 
        if (!sband->vht_cap.vht_supported && is_5ghz) {
-               mlme_dbg(sdata, "VHT not supported, disabling VHT/HE\n");
+               mlme_dbg(sdata, "VHT not supported, disabling VHT/HE/EHT\n");
                ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
                ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
        }
 
        if (!ieee80211_get_he_iftype_cap(sband,
                                         ieee80211_vif_type_p2p(&sdata->vif))) {
-               mlme_dbg(sdata, "HE not supported, disabling it\n");
+               mlme_dbg(sdata, "HE not supported, disabling HE and EHT\n");
                ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
+       }
+
+       if (!ieee80211_get_eht_iftype_cap(sband,
+                                         ieee80211_vif_type_p2p(&sdata->vif))) {
+               mlme_dbg(sdata, "EHT not supported, disabling EHT\n");
+               ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
        }
 
        if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && !is_6ghz) {
@@ -5273,6 +5379,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
                        ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
                        ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
                        ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
                }
 
                if (!elems->vht_cap_elem) {
@@ -5312,7 +5419,29 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
 
                if (!ieee80211_verify_peer_he_mcs_support(sdata, ies, he_oper) ||
                    !ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper))
-                       ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_HE |
+                                       IEEE80211_STA_DISABLE_EHT;
+       }
+
+       /*
+        * EHT requires HE to be supported as well. Specifically for 6 GHz
+        * channels, the operation channel information can only be deduced from
+        * both the 6 GHz operation information (from the HE operation IE) and
+        * EHT operation.
+        */
+       if (!(ifmgd->flags & (IEEE80211_STA_DISABLE_HE |
+                             IEEE80211_STA_DISABLE_EHT)) && he_oper) {
+               const struct cfg80211_bss_ies *ies;
+               const u8 *eht_oper_ie;
+
+               ies = rcu_dereference(cbss->ies);
+               eht_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_EHT_OPERATION,
+                                                  ies->data, ies->len);
+               if (eht_oper_ie && eht_oper_ie[1] >=
+                   1 + sizeof(struct ieee80211_eht_operation))
+                       eht_oper = (void *)(eht_oper_ie + 3);
+               else
+                       eht_oper = NULL;
        }
 
        /* Allow VHT if at least one channel on the sband supports 80 MHz */
@@ -5341,7 +5470,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
        ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
                                                     cbss->channel,
                                                     bss->vht_cap_info,
-                                                    ht_oper, vht_oper, he_oper,
+                                                    ht_oper, vht_oper,
+                                                    he_oper, eht_oper,
                                                     s1g_oper,
                                                     &chandef, false);
 
@@ -5832,6 +5962,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                        ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
                        ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
                        ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
                        netdev_info(sdata->dev,
                                    "disabling HT/VHT/HE due to WEP/TKIP use\n");
                }
@@ -5839,11 +5970,12 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
        sband = local->hw.wiphy->bands[req->bss->channel->band];
 
-       /* also disable HT/VHT/HE if the AP doesn't use WMM */
+       /* also disable HT/VHT/HE/EHT if the AP doesn't use WMM */
        if (!bss->wmm_used) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
                ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
                ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
                netdev_info(sdata->dev,
                            "disabling HT/VHT/HE as WMM/QoS is not supported by the AP\n");
        }
@@ -5897,9 +6029,11 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                memcpy(&assoc_data->ap_vht_cap, vht_elem->data,
                       sizeof(struct ieee80211_vht_cap));
        } else if (is_5ghz) {
-               sdata_info(sdata, "VHT capa missing/short, disabling VHT/HE\n");
+               sdata_info(sdata,
+                          "VHT capa missing/short, disabling VHT/HE/EHT\n");
                ifmgd->flags |= IEEE80211_STA_DISABLE_VHT |
-                               IEEE80211_STA_DISABLE_HE;
+                               IEEE80211_STA_DISABLE_HE |
+                               IEEE80211_STA_DISABLE_EHT;
        }
        rcu_read_unlock();
 
@@ -5978,6 +6112,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
                ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
                ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
        }
 
        if (req->flags & ASSOC_REQ_DISABLE_VHT) {
@@ -5986,8 +6121,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        }
 
        if (req->flags & ASSOC_REQ_DISABLE_HE) {
-               mlme_dbg(sdata, "HE disabled by flag, disabling VHT\n");
+               mlme_dbg(sdata, "HE disabled by flag, disabling HE/EHT\n");
                ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
        }
 
        err = ieee80211_prep_connection(sdata, req->bss, true, override);
index 6d054fe..b6b20f3 100644 (file)
@@ -5042,6 +5042,19 @@ ieee80211_beacon_get_finish(struct ieee80211_hw *hw,
                       IEEE80211_TX_CTL_FIRST_FRAGMENT;
 }
 
+static void
+ieee80211_beacon_add_mbssid(struct sk_buff *skb, struct beacon_data *beacon)
+{
+       int i;
+
+       if (!beacon->mbssid_ies)
+               return;
+
+       for (i = 0; i < beacon->mbssid_ies->cnt; i++)
+               skb_put_data(skb, beacon->mbssid_ies->elem[i].data,
+                            beacon->mbssid_ies->elem[i].len);
+}
+
 static struct sk_buff *
 ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
                        struct ieee80211_vif *vif,
@@ -5055,6 +5068,7 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
        struct ieee80211_if_ap *ap = &sdata->u.ap;
        struct sk_buff *skb = NULL;
        u16 csa_off_base = 0;
+       int mbssid_len;
 
        if (beacon->cntdwn_counter_offsets[0]) {
                if (!is_template)
@@ -5064,11 +5078,12 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
        }
 
        /* headroom, head length,
-        * tail length and maximum TIM length
+        * tail length, maximum TIM length and multiple BSSID length
         */
+       mbssid_len = ieee80211_get_mbssid_beacon_len(beacon->mbssid_ies);
        skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
                            beacon->tail_len + 256 +
-                           local->hw.extra_beacon_tailroom);
+                           local->hw.extra_beacon_tailroom + mbssid_len);
        if (!skb)
                return NULL;
 
@@ -5082,6 +5097,11 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
                offs->tim_length = skb->len - beacon->head_len;
                offs->cntdwn_counter_offs[0] = beacon->cntdwn_counter_offsets[0];
 
+               if (mbssid_len) {
+                       ieee80211_beacon_add_mbssid(skb, beacon);
+                       offs->mbssid_off = skb->len - mbssid_len;
+               }
+
                /* for AP the csa offsets are from tail */
                csa_off_base = skb->len;
        }
index abc29df..682a164 100644 (file)
@@ -973,8 +973,10 @@ static void ieee80211_parse_extension_element(u32 *crc,
                }
                break;
        case WLAN_EID_EXT_HE_CAPABILITY:
-               elems->he_cap = data;
-               elems->he_cap_len = len;
+               if (ieee80211_he_capa_size_ok(data, len)) {
+                       elems->he_cap = data;
+                       elems->he_cap_len = len;
+               }
                break;
        case WLAN_EID_EXT_HE_OPERATION:
                if (len >= sizeof(*elems->he_operation) &&
@@ -1006,6 +1008,17 @@ static void ieee80211_parse_extension_element(u32 *crc,
                if (len >= sizeof(*elems->he_6ghz_capa))
                        elems->he_6ghz_capa = data;
                break;
+       case WLAN_EID_EXT_EHT_CAPABILITY:
+               if (ieee80211_eht_capa_size_ok(elems->he_cap,
+                                              data, len)) {
+                       elems->eht_cap = data;
+                       elems->eht_cap_len = len;
+               }
+               break;
+       case WLAN_EID_EXT_EHT_OPERATION:
+               if (ieee80211_eht_oper_size_ok(data, len))
+                       elems->eht_operation = data;
+               break;
        }
 }
 
@@ -1799,6 +1812,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
        const struct ieee80211_sta_he_cap *he_cap;
+       const struct ieee80211_sta_eht_cap *eht_cap;
        u8 *pos = buffer, *end = buffer + buffer_len;
        size_t noffset;
        int supp_rates_len, i;
@@ -1979,6 +1993,18 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
                        goto out_err;
        }
 
+       eht_cap = ieee80211_get_eht_iftype_cap(sband,
+                                              ieee80211_vif_type_p2p(&sdata->vif));
+
+       if (eht_cap &&
+           cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+                                        IEEE80211_CHAN_NO_HE |
+                                        IEEE80211_CHAN_NO_EHT)) {
+               pos = ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, end);
+               if (!pos)
+                       goto out_err;
+       }
+
        if (cfg80211_any_usable_channels(local->hw.wiphy,
                                         BIT(NL80211_BAND_6GHZ),
                                         IEEE80211_CHAN_NO_HE)) {
@@ -2321,6 +2347,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        struct cfg80211_sched_scan_request *sched_scan_req;
        bool sched_scan_stopped = false;
        bool suspended = local->suspended;
+       bool in_reconfig = false;
 
        /* nothing to do if HW shouldn't run */
        if (!local->open_count)
@@ -2672,6 +2699,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
 
        if (local->in_reconfig) {
+               in_reconfig = local->in_reconfig;
                local->in_reconfig = false;
                barrier();
 
@@ -2689,6 +2717,15 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                                        IEEE80211_QUEUE_STOP_REASON_SUSPEND,
                                        false);
 
+       if (in_reconfig) {
+               list_for_each_entry(sdata, &local->interfaces, list) {
+                       if (!ieee80211_sdata_running(sdata))
+                               continue;
+                       if (sdata->vif.type == NL80211_IFTYPE_STATION)
+                               ieee80211_sta_restart(sdata);
+               }
+       }
+
        if (!suspended)
                return 0;
 
@@ -2718,7 +2755,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        return 0;
 }
 
-void ieee80211_resume_disconnect(struct ieee80211_vif *vif)
+static void ieee80211_reconfig_disconnect(struct ieee80211_vif *vif, u8 flag)
 {
        struct ieee80211_sub_if_data *sdata;
        struct ieee80211_local *local;
@@ -2730,19 +2767,35 @@ void ieee80211_resume_disconnect(struct ieee80211_vif *vif)
        sdata = vif_to_sdata(vif);
        local = sdata->local;
 
-       if (WARN_ON(!local->resuming))
+       if (WARN_ON(flag & IEEE80211_SDATA_DISCONNECT_RESUME &&
+                   !local->resuming))
+               return;
+
+       if (WARN_ON(flag & IEEE80211_SDATA_DISCONNECT_HW_RESTART &&
+                   !local->in_reconfig))
                return;
 
        if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
                return;
 
-       sdata->flags |= IEEE80211_SDATA_DISCONNECT_RESUME;
+       sdata->flags |= flag;
 
        mutex_lock(&local->key_mtx);
        list_for_each_entry(key, &sdata->key_list, list)
                key->flags |= KEY_FLAG_TAINTED;
        mutex_unlock(&local->key_mtx);
 }
+
+void ieee80211_hw_restart_disconnect(struct ieee80211_vif *vif)
+{
+       ieee80211_reconfig_disconnect(vif, IEEE80211_SDATA_DISCONNECT_HW_RESTART);
+}
+EXPORT_SYMBOL_GPL(ieee80211_hw_restart_disconnect);
+
+void ieee80211_resume_disconnect(struct ieee80211_vif *vif)
+{
+       ieee80211_reconfig_disconnect(vif, IEEE80211_SDATA_DISCONNECT_RESUME);
+}
 EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect);
 
 void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata)
@@ -3073,6 +3126,10 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
                else
                        ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
                break;
+       case NL80211_CHAN_WIDTH_320:
+               /* HT information element should not be included on 6GHz */
+               WARN_ON(1);
+               return pos;
        default:
                ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
                break;
@@ -3112,6 +3169,10 @@ void ieee80211_ie_build_wide_bw_cs(u8 *pos,
        case NL80211_CHAN_WIDTH_80P80:
                *pos++ = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
                break;
+       case NL80211_CHAN_WIDTH_320:
+               /* The behavior is not defined for 320 MHz channels */
+               WARN_ON(1);
+               fallthrough;
        default:
                *pos++ = IEEE80211_VHT_CHANWIDTH_USE_HT;
        }
@@ -3164,6 +3225,10 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
        case NL80211_CHAN_WIDTH_80:
                vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
                break;
+       case NL80211_CHAN_WIDTH_320:
+               /* VHT information element should not be included on 6GHz */
+               WARN_ON(1);
+               return pos;
        default:
                vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
                break;
@@ -3224,6 +3289,13 @@ u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef)
                he_6ghz_op->ccfs1 = 0;
 
        switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_320:
+               /*
+                * TODO: mesh operation is not defined over 6GHz 320 MHz
+                * channels.
+                */
+               WARN_ON(1);
+               break;
        case NL80211_CHAN_WIDTH_160:
                /* Convert 160 MHz channel width to new style as interop
                 * workaround.
@@ -3412,17 +3484,19 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
 
 bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
                                    const struct ieee80211_he_operation *he_oper,
+                                   const struct ieee80211_eht_operation *eht_oper,
                                    struct cfg80211_chan_def *chandef)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
        enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
        const struct ieee80211_sta_he_cap *he_cap;
+       const struct ieee80211_sta_eht_cap *eht_cap;
        struct cfg80211_chan_def he_chandef = *chandef;
        const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
        struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
-       bool support_80_80, support_160;
-       u8 he_phy_cap;
+       bool support_80_80, support_160, support_320;
+       u8 he_phy_cap, eht_phy_cap;
        u32 freq;
 
        if (chandef->chan->band != NL80211_BAND_6GHZ)
@@ -3451,6 +3525,12 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
                return false;
        }
 
+       eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
+       if (!eht_cap) {
+               sdata_info(sdata, "Missing iftype sband data/EHT cap");
+               eht_oper = NULL;
+       }
+
        he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
 
        if (!he_6ghz_oper) {
@@ -3460,6 +3540,11 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
                return false;
        }
 
+       /*
+        * The EHT operation IE does not contain the primary channel so the
+        * primary channel frequency should be taken from the 6 GHz operation
+        * information.
+        */
        freq = ieee80211_channel_to_frequency(he_6ghz_oper->primary,
                                              NL80211_BAND_6GHZ);
        he_chandef.chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
@@ -3477,43 +3562,80 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
                break;
        }
 
-       switch (u8_get_bits(he_6ghz_oper->control,
-                           IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH)) {
-       case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ:
-               he_chandef.width = NL80211_CHAN_WIDTH_20;
-               break;
-       case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ:
-               he_chandef.width = NL80211_CHAN_WIDTH_40;
-               break;
-       case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ:
-               he_chandef.width = NL80211_CHAN_WIDTH_80;
-               break;
-       case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ:
-               he_chandef.width = NL80211_CHAN_WIDTH_80;
-               if (!he_6ghz_oper->ccfs1)
+       if (!eht_oper) {
+               switch (u8_get_bits(he_6ghz_oper->control,
+                                   IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH)) {
+               case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ:
+                       he_chandef.width = NL80211_CHAN_WIDTH_20;
                        break;
-               if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8) {
+               case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ:
+                       he_chandef.width = NL80211_CHAN_WIDTH_40;
+                       break;
+               case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ:
+                       he_chandef.width = NL80211_CHAN_WIDTH_80;
+                       break;
+               case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ:
+                       he_chandef.width = NL80211_CHAN_WIDTH_80;
+                       if (!he_6ghz_oper->ccfs1)
+                               break;
+                       if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8) {
+                               if (support_160)
+                                       he_chandef.width = NL80211_CHAN_WIDTH_160;
+                       } else {
+                               if (support_80_80)
+                                       he_chandef.width = NL80211_CHAN_WIDTH_80P80;
+                       }
+                       break;
+               }
+
+               if (he_chandef.width == NL80211_CHAN_WIDTH_160) {
+                       he_chandef.center_freq1 =
+                               ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
+                                                              NL80211_BAND_6GHZ);
+               } else {
+                       he_chandef.center_freq1 =
+                               ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0,
+                                                              NL80211_BAND_6GHZ);
+                       if (support_80_80 || support_160)
+                               he_chandef.center_freq2 =
+                                       ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
+                                                                      NL80211_BAND_6GHZ);
+               }
+       } else {
+               eht_phy_cap = eht_cap->eht_cap_elem.phy_cap_info[0];
+               support_320 =
+                       eht_phy_cap & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+
+               switch (u8_get_bits(eht_oper->chan_width,
+                                   IEEE80211_EHT_OPER_CHAN_WIDTH)) {
+               case IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ:
+                       he_chandef.width = NL80211_CHAN_WIDTH_20;
+                       break;
+               case IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ:
+                       he_chandef.width = NL80211_CHAN_WIDTH_40;
+                       break;
+               case IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ:
+                       he_chandef.width = NL80211_CHAN_WIDTH_80;
+                       break;
+               case IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ:
                        if (support_160)
                                he_chandef.width = NL80211_CHAN_WIDTH_160;
-               } else {
-                       if (support_80_80)
-                               he_chandef.width = NL80211_CHAN_WIDTH_80P80;
+                       else
+                               he_chandef.width = NL80211_CHAN_WIDTH_80;
+                       break;
+               case IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ:
+                       if (support_320)
+                               he_chandef.width = NL80211_CHAN_WIDTH_320;
+                       else if (support_160)
+                               he_chandef.width = NL80211_CHAN_WIDTH_160;
+                       else
+                               he_chandef.width = NL80211_CHAN_WIDTH_80;
+                       break;
                }
-               break;
-       }
 
-       if (he_chandef.width == NL80211_CHAN_WIDTH_160) {
-               he_chandef.center_freq1 =
-                       ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
-                                                      NL80211_BAND_6GHZ);
-       } else {
                he_chandef.center_freq1 =
-                       ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0,
+                       ieee80211_channel_to_frequency(eht_oper->ccfs,
                                                       NL80211_BAND_6GHZ);
-               if (support_80_80 || support_160)
-                       he_chandef.center_freq2 =
-                               ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
-                                                              NL80211_BAND_6GHZ);
        }
 
        if (!cfg80211_chandef_valid(&he_chandef)) {
@@ -3985,6 +4107,15 @@ u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c)
                ret = IEEE80211_STA_DISABLE_80P80MHZ |
                      IEEE80211_STA_DISABLE_160MHZ;
                break;
+       case NL80211_CHAN_WIDTH_320:
+               /* n_P20 */
+               tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
+               /* n_P160 */
+               tmp /= 80;
+               c->center_freq1 = c->center_freq1 - 80 + 160 * tmp;
+               c->width = NL80211_CHAN_WIDTH_160;
+               ret = IEEE80211_STA_DISABLE_320MHZ;
+               break;
        default:
        case NL80211_CHAN_WIDTH_20_NOHT:
                WARN_ON_ONCE(1);
@@ -4649,3 +4780,69 @@ u16 ieee80211_encode_usf(int listen_interval)
 
        return (u16) listen_interval;
 }
+
+u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+{
+       const struct ieee80211_sta_he_cap *he_cap;
+       const struct ieee80211_sta_eht_cap *eht_cap;
+       struct ieee80211_supported_band *sband;
+       u8 n;
+
+       sband = ieee80211_get_sband(sdata);
+       if (!sband)
+               return 0;
+
+       he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
+       eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
+       if (!he_cap || !eht_cap)
+               return 0;
+
+       n = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+                                      &eht_cap->eht_cap_elem);
+       return 2 + 1 +
+              sizeof(he_cap->he_cap_elem) + n +
+              ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
+                                     eht_cap->eht_cap_elem.phy_cap_info);
+       return 0;
+}
+
+u8 *ieee80211_ie_build_eht_cap(u8 *pos,
+                              const struct ieee80211_sta_he_cap *he_cap,
+                              const struct ieee80211_sta_eht_cap *eht_cap,
+                              u8 *end)
+{
+       u8 mcs_nss_len, ppet_len;
+       u8 ie_len;
+       u8 *orig_pos = pos;
+
+       /* Make sure we have place for the IE */
+       if (!he_cap || !eht_cap)
+               return orig_pos;
+
+       mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+                                                &eht_cap->eht_cap_elem);
+       ppet_len = ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
+                                         eht_cap->eht_cap_elem.phy_cap_info);
+
+       ie_len = 2 + 1 + sizeof(eht_cap->eht_cap_elem) + mcs_nss_len + ppet_len;
+       if ((end - pos) < ie_len)
+               return orig_pos;
+
+       *pos++ = WLAN_EID_EXTENSION;
+       *pos++ = ie_len - 2;
+       *pos++ = WLAN_EID_EXT_EHT_CAPABILITY;
+
+       /* Fixed data */
+       memcpy(pos, &eht_cap->eht_cap_elem, sizeof(eht_cap->eht_cap_elem));
+       pos += sizeof(eht_cap->eht_cap_elem);
+
+       memcpy(pos, &eht_cap->eht_mcs_nss_supp, mcs_nss_len);
+       pos += mcs_nss_len;
+
+       if (ppet_len) {
+               memcpy(pos, &eht_cap->eht_ppe_thres, ppet_len);
+               pos += ppet_len;
+       }
+
+       return pos;
+}
index a45dacd..8f16aa9 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Portions of this file
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -329,15 +329,27 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        }
 }
 
-/* FIXME: move this to some better location - parses HE now */
+/* FIXME: move this to some better location - parses HE/EHT now */
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
 {
        struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
        struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
+       struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.eht_cap;
        u32 cap_width;
 
        if (he_cap->has_he) {
-               u8 info = he_cap->he_cap_elem.phy_cap_info[0];
+               u8 info;
+
+               if (eht_cap->has_eht &&
+                   sta->sdata->vif.bss_conf.chandef.chan->band ==
+                   NL80211_BAND_6GHZ) {
+                       info = eht_cap->eht_cap_elem.phy_cap_info[0];
+
+                       if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+                               return IEEE80211_STA_RX_BW_320;
+               }
+
+               info = he_cap->he_cap_elem.phy_cap_info[0];
 
                if (sta->sdata->vif.bss_conf.chandef.chan->band ==
                                NL80211_BAND_2GHZ) {
@@ -445,6 +457,8 @@ ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
        case NL80211_CHAN_WIDTH_160:
        case NL80211_CHAN_WIDTH_80P80:
                return IEEE80211_STA_RX_BW_160;
+       case NL80211_CHAN_WIDTH_320:
+               return IEEE80211_STA_RX_BW_320;
        default:
                WARN_ON_ONCE(1);
                return IEEE80211_STA_RX_BW_20;
@@ -483,13 +497,24 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
 
 void ieee80211_sta_set_rx_nss(struct sta_info *sta)
 {
-       u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, rx_nss;
+       u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss;
        bool support_160;
 
        /* if we received a notification already don't overwrite it */
        if (sta->sta.rx_nss)
                return;
 
+       if (sta->sta.eht_cap.has_eht) {
+               int i;
+               const u8 *rx_nss_mcs = (void *)&sta->sta.eht_cap.eht_mcs_nss_supp;
+
+               /* get the max nss for EHT over all possible bandwidths and mcs */
+               for (i = 0; i < sizeof(struct ieee80211_eht_mcs_nss_supp); i++)
+                       eht_rx_nss = max_t(u8, eht_rx_nss,
+                                          u8_get_bits(rx_nss_mcs[i],
+                                                      IEEE80211_EHT_MCS_NSS_RX));
+       }
+
        if (sta->sta.he_cap.has_he) {
                int i;
                u8 rx_mcs_80 = 0, rx_mcs_160 = 0;
@@ -555,6 +580,7 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
 
        rx_nss = max(vht_rx_nss, ht_rx_nss);
        rx_nss = max(he_rx_nss, rx_nss);
+       rx_nss = max(eht_rx_nss, rx_nss);
        sta->sta.rx_nss = max_t(u8, 1, rx_nss);
 }
 
index 75a0a27..b5e8de6 100644 (file)
@@ -83,16 +83,6 @@ static bool addresses_equal(const struct mptcp_addr_info *a,
        return a->port == b->port;
 }
 
-static bool address_zero(const struct mptcp_addr_info *addr)
-{
-       struct mptcp_addr_info zero;
-
-       memset(&zero, 0, sizeof(zero));
-       zero.family = addr->family;
-
-       return addresses_equal(addr, &zero, true);
-}
-
 static void local_address(const struct sock_common *skc,
                          struct mptcp_addr_info *addr)
 {
@@ -660,7 +650,6 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
        unsigned int add_addr_accept_max;
        struct mptcp_addr_info remote;
        unsigned int subflows_max;
-       bool reset_port = false;
        int i, nr;
 
        add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
@@ -671,14 +660,15 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
                 msk->pm.remote.family);
 
        remote = msk->pm.remote;
+       mptcp_pm_announce_addr(msk, &remote, true);
+       mptcp_pm_nl_addr_send_ack(msk);
+
        if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
-               goto add_addr_echo;
+               return;
 
        /* pick id 0 port, if none is provided the remote address */
-       if (!remote.port) {
-               reset_port = true;
+       if (!remote.port)
                remote.port = sk->sk_dport;
-       }
 
        /* connect to the specified remote address, using whatever
         * local address the routing configuration will pick.
@@ -694,14 +684,6 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
        for (i = 0; i < nr; i++)
                __mptcp_subflow_connect(sk, &addrs[i], &remote);
        spin_lock_bh(&msk->pm.lock);
-
-       /* be sure to echo exactly the received address */
-       if (reset_port)
-               remote.port = 0;
-
-add_addr_echo:
-       mptcp_pm_announce_addr(msk, &remote, true);
-       mptcp_pm_nl_addr_send_ack(msk);
 }
 
 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
@@ -877,10 +859,18 @@ static bool address_use_port(struct mptcp_pm_addr_entry *entry)
                MPTCP_PM_ADDR_FLAG_SIGNAL;
 }
 
+/* caller must ensure the RCU grace period is already elapsed */
+static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
+{
+       if (entry->lsk)
+               sock_release(entry->lsk);
+       kfree(entry);
+}
+
 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
                                             struct mptcp_pm_addr_entry *entry)
 {
-       struct mptcp_pm_addr_entry *cur;
+       struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
        unsigned int addr_max;
        int ret = -EINVAL;
 
@@ -901,8 +891,22 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
        list_for_each_entry(cur, &pernet->local_addr_list, list) {
                if (addresses_equal(&cur->addr, &entry->addr,
                                    address_use_port(entry) &&
-                                   address_use_port(cur)))
-                       goto out;
+                                   address_use_port(cur))) {
+                       /* allow replacing the exiting endpoint only if such
+                        * endpoint is an implicit one and the user-space
+                        * did not provide an endpoint id
+                        */
+                       if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT))
+                               goto out;
+                       if (entry->addr.id)
+                               goto out;
+
+                       pernet->addrs--;
+                       entry->addr.id = cur->addr.id;
+                       list_del_rcu(&cur->list);
+                       del_entry = cur;
+                       break;
+               }
        }
 
        if (!entry->addr.id) {
@@ -938,6 +942,12 @@ find_next:
 
 out:
        spin_unlock_bh(&pernet->lock);
+
+       /* just replaced an existing entry, free it */
+       if (del_entry) {
+               synchronize_rcu();
+               __mptcp_pm_release_addr_entry(del_entry);
+       }
        return ret;
 }
 
@@ -1011,9 +1021,6 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
        if (addresses_equal(&msk_local, &skc_local, false))
                return 0;
 
-       if (address_zero(&skc_local))
-               return 0;
-
        pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
 
        rcu_read_lock();
@@ -1036,7 +1043,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
        entry->addr.id = 0;
        entry->addr.port = 0;
        entry->ifindex = 0;
-       entry->flags = 0;
+       entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
        entry->lsk = NULL;
        ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
        if (ret < 0)
@@ -1249,6 +1256,17 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
        }
 
+       if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL &&
+           addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
+               GENL_SET_ERR_MSG(info, "flags mustn't have both signal and fullmesh");
+               return -EINVAL;
+       }
+
+       if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) {
+               GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint");
+               return -EINVAL;
+       }
+
        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry) {
                GENL_SET_ERR_MSG(info, "can't allocate addr");
@@ -1333,11 +1351,12 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
 }
 
 static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
-                                                  struct mptcp_addr_info *addr)
+                                                  const struct mptcp_pm_addr_entry *entry)
 {
-       struct mptcp_sock *msk;
-       long s_slot = 0, s_num = 0;
+       const struct mptcp_addr_info *addr = &entry->addr;
        struct mptcp_rm_list list = { .nr = 0 };
+       long s_slot = 0, s_num = 0;
+       struct mptcp_sock *msk;
 
        pr_debug("remove_id=%d", addr->id);
 
@@ -1354,7 +1373,8 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
 
                lock_sock(sk);
                remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
-               mptcp_pm_remove_anno_addr(msk, addr, remove_subflow);
+               mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
+                                         !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
                if (remove_subflow)
                        mptcp_pm_remove_subflow(msk, &list);
                release_sock(sk);
@@ -1367,14 +1387,6 @@ next:
        return 0;
 }
 
-/* caller must ensure the RCU grace period is already elapsed */
-static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
-{
-       if (entry->lsk)
-               sock_release(entry->lsk);
-       kfree(entry);
-}
-
 static int mptcp_nl_remove_id_zero_address(struct net *net,
                                           struct mptcp_addr_info *addr)
 {
@@ -1451,7 +1463,7 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
        __clear_bit(entry->addr.id, pernet->id_bitmap);
        spin_unlock_bh(&pernet->lock);
 
-       mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr);
+       mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry);
        synchronize_rcu();
        __mptcp_pm_release_addr_entry(entry);
 
@@ -1466,14 +1478,12 @@ static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
 
        list_for_each_entry(entry, rm_list, list) {
                if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
-                   alist.nr < MPTCP_RM_IDS_MAX &&
-                   slist.nr < MPTCP_RM_IDS_MAX) {
-                       alist.ids[alist.nr++] = entry->addr.id;
+                   slist.nr < MPTCP_RM_IDS_MAX)
                        slist.ids[slist.nr++] = entry->addr.id;
-               } else if (remove_anno_list_by_saddr(msk, &entry->addr) &&
-                        alist.nr < MPTCP_RM_IDS_MAX) {
+
+               if (remove_anno_list_by_saddr(msk, &entry->addr) &&
+                   alist.nr < MPTCP_RM_IDS_MAX)
                        alist.ids[alist.nr++] = entry->addr.id;
-               }
        }
 
        if (alist.nr) {
index 1c72f25..101aeeb 100644 (file)
@@ -117,6 +117,9 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
        list_add(&subflow->node, &msk->conn_list);
        sock_hold(ssock->sk);
        subflow->request_mptcp = 1;
+
+       /* This is the first subflow, always with id 0 */
+       subflow->local_id_valid = 1;
        mptcp_sock_graft(msk->first, sk->sk_socket);
 
        return 0;
@@ -1356,6 +1359,7 @@ alloc_skb:
 out:
        if (READ_ONCE(msk->csum_enabled))
                mptcp_update_data_checksum(skb, copy);
+       trace_mptcp_sendmsg_frag(mpext);
        mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
        return copy;
 }
index 9d0ee6c..3c1a303 100644 (file)
@@ -442,7 +442,8 @@ struct mptcp_subflow_context {
                rx_eof : 1,
                can_ack : 1,        /* only after processing the remote a key */
                disposable : 1,     /* ctx can be free at ulp release time */
-               stale : 1;          /* unable to snd/rcv data, do not use for xmit */
+               stale : 1,          /* unable to snd/rcv data, do not use for xmit */
+               local_id_valid : 1; /* local_id is correctly initialized */
        enum mptcp_data_avail data_avail;
        u32     remote_nonce;
        u64     thmac;
index 45c004f..aba260f 100644 (file)
@@ -481,6 +481,51 @@ do_reset:
        mptcp_subflow_reset(sk);
 }
 
+static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
+{
+       subflow->local_id = local_id;
+       subflow->local_id_valid = 1;
+}
+
+static int subflow_chk_local_id(struct sock *sk)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+       int err;
+
+       if (likely(subflow->local_id_valid))
+               return 0;
+
+       err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
+       if (err < 0)
+               return err;
+
+       subflow_set_local_id(subflow, err);
+       return 0;
+}
+
+static int subflow_rebuild_header(struct sock *sk)
+{
+       int err = subflow_chk_local_id(sk);
+
+       if (unlikely(err < 0))
+               return err;
+
+       return inet_sk_rebuild_header(sk);
+}
+
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+static int subflow_v6_rebuild_header(struct sock *sk)
+{
+       int err = subflow_chk_local_id(sk);
+
+       if (unlikely(err < 0))
+               return err;
+
+       return inet6_sk_rebuild_header(sk);
+}
+#endif
+
 struct request_sock_ops mptcp_subflow_request_sock_ops;
 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
 
@@ -1104,7 +1149,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
        struct sk_buff *skb;
 
        if (!skb_peek(&ssk->sk_receive_queue))
-               WRITE_ONCE(subflow->data_avail, 0);
+               WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
        if (subflow->data_avail)
                return true;
 
@@ -1169,7 +1214,7 @@ fallback:
                subflow->reset_transient = 0;
                subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
                tcp_send_active_reset(ssk, GFP_ATOMIC);
-               WRITE_ONCE(subflow->data_avail, 0);
+               WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
                return true;
        }
 
@@ -1182,7 +1227,7 @@ fallback:
                subflow->reset_transient = 0;
                subflow->reset_reason = MPTCP_RST_EMPTCP;
                tcp_send_active_reset(ssk, GFP_ATOMIC);
-               WRITE_ONCE(subflow->data_avail, 0);
+               WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
                return false;
        }
 
@@ -1204,7 +1249,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
        if (subflow->map_valid &&
            mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
                subflow->map_valid = 0;
-               WRITE_ONCE(subflow->data_avail, 0);
+               WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
 
                pr_debug("Done with mapping: seq=%u data_len=%u",
                         subflow->map_subflow_seq,
@@ -1398,13 +1443,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
                get_random_bytes(&subflow->local_nonce, sizeof(u32));
        } while (!subflow->local_nonce);
 
-       if (!local_id) {
-               err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
-               if (err < 0)
-                       goto failed;
-
-               local_id = err;
-       }
+       if (local_id)
+               subflow_set_local_id(subflow, local_id);
 
        mptcp_pm_get_flags_and_ifindex_by_id(sock_net(sk), local_id,
                                             &flags, &ifindex);
@@ -1429,7 +1469,6 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
        pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
                 remote_token, local_id, remote_id);
        subflow->remote_token = remote_token;
-       subflow->local_id = local_id;
        subflow->remote_id = remote_id;
        subflow->request_join = 1;
        subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
@@ -1728,15 +1767,22 @@ static void subflow_ulp_clone(const struct request_sock *req,
                new_ctx->token = subflow_req->token;
                new_ctx->ssn_offset = subflow_req->ssn_offset;
                new_ctx->idsn = subflow_req->idsn;
+
+               /* this is the first subflow, id is always 0 */
+               new_ctx->local_id_valid = 1;
        } else if (subflow_req->mp_join) {
                new_ctx->ssn_offset = subflow_req->ssn_offset;
                new_ctx->mp_join = 1;
                new_ctx->fully_established = 1;
                new_ctx->backup = subflow_req->backup;
-               new_ctx->local_id = subflow_req->local_id;
                new_ctx->remote_id = subflow_req->remote_id;
                new_ctx->token = subflow_req->token;
                new_ctx->thmac = subflow_req->thmac;
+
+               /* the subflow req id is valid, fetched via subflow_check_req()
+                * and subflow_token_join_request()
+                */
+               subflow_set_local_id(new_ctx, subflow_req->local_id);
        }
 }
 
@@ -1789,6 +1835,7 @@ void __init mptcp_subflow_init(void)
        subflow_specific.conn_request = subflow_v4_conn_request;
        subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
        subflow_specific.sk_rx_dst_set = subflow_finish_connect;
+       subflow_specific.rebuild_header = subflow_rebuild_header;
 
        tcp_prot_override = tcp_prot;
        tcp_prot_override.release_cb = tcp_release_cb_override;
@@ -1801,6 +1848,7 @@ void __init mptcp_subflow_init(void)
        subflow_v6_specific.conn_request = subflow_v6_conn_request;
        subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
        subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
+       subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
 
        subflow_v6m_specific = subflow_v6_specific;
        subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
@@ -1808,6 +1856,7 @@ void __init mptcp_subflow_init(void)
        subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
        subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
        subflow_v6m_specific.net_frag_header_len = 0;
+       subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
 
        tcpv6_prot_override = tcpv6_prot;
        tcpv6_prot_override.release_cb = tcp_release_cb_override;
index 9b7f9c9..0164e5f 100644 (file)
@@ -66,6 +66,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
 struct conntrack_gc_work {
        struct delayed_work     dwork;
        u32                     next_bucket;
+       u32                     avg_timeout;
+       u32                     start_time;
        bool                    exiting;
        bool                    early_drop;
 };
@@ -77,8 +79,19 @@ static __read_mostly bool nf_conntrack_locks_all;
 /* serialize hash resizes and nf_ct_iterate_cleanup */
 static DEFINE_MUTEX(nf_conntrack_mutex);
 
-#define GC_SCAN_INTERVAL       (120u * HZ)
+#define GC_SCAN_INTERVAL_MAX   (60ul * HZ)
+#define GC_SCAN_INTERVAL_MIN   (1ul * HZ)
+
+/* clamp timeouts to this value (TCP unacked) */
+#define GC_SCAN_INTERVAL_CLAMP (300ul * HZ)
+
+/* large initial bias so that we don't scan often just because we have
+ * three entries with a 1s timeout.
+ */
+#define GC_SCAN_INTERVAL_INIT  INT_MAX
+
 #define GC_SCAN_MAX_DURATION   msecs_to_jiffies(10)
+#define GC_SCAN_EXPIRED_MAX    (64000u / HZ)
 
 #define MIN_CHAINLEN   8u
 #define MAX_CHAINLEN   (32u - MIN_CHAINLEN)
@@ -1420,16 +1433,28 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
 
 static void gc_worker(struct work_struct *work)
 {
-       unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
        unsigned int i, hashsz, nf_conntrack_max95 = 0;
-       unsigned long next_run = GC_SCAN_INTERVAL;
+       u32 end_time, start_time = nfct_time_stamp;
        struct conntrack_gc_work *gc_work;
+       unsigned int expired_count = 0;
+       unsigned long next_run;
+       s32 delta_time;
+
        gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
 
        i = gc_work->next_bucket;
        if (gc_work->early_drop)
                nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
 
+       if (i == 0) {
+               gc_work->avg_timeout = GC_SCAN_INTERVAL_INIT;
+               gc_work->start_time = start_time;
+       }
+
+       next_run = gc_work->avg_timeout;
+
+       end_time = start_time + GC_SCAN_MAX_DURATION;
+
        do {
                struct nf_conntrack_tuple_hash *h;
                struct hlist_nulls_head *ct_hash;
@@ -1446,6 +1471,7 @@ static void gc_worker(struct work_struct *work)
 
                hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
                        struct nf_conntrack_net *cnet;
+                       unsigned long expires;
                        struct net *net;
 
                        tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -1455,11 +1481,29 @@ static void gc_worker(struct work_struct *work)
                                continue;
                        }
 
+                       if (expired_count > GC_SCAN_EXPIRED_MAX) {
+                               rcu_read_unlock();
+
+                               gc_work->next_bucket = i;
+                               gc_work->avg_timeout = next_run;
+
+                               delta_time = nfct_time_stamp - gc_work->start_time;
+
+                               /* re-sched immediately if total cycle time is exceeded */
+                               next_run = delta_time < (s32)GC_SCAN_INTERVAL_MAX;
+                               goto early_exit;
+                       }
+
                        if (nf_ct_is_expired(tmp)) {
                                nf_ct_gc_expired(tmp);
+                               expired_count++;
                                continue;
                        }
 
+                       expires = clamp(nf_ct_expires(tmp), GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_CLAMP);
+                       next_run += expires;
+                       next_run /= 2u;
+
                        if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
                                continue;
 
@@ -1477,8 +1521,10 @@ static void gc_worker(struct work_struct *work)
                                continue;
                        }
 
-                       if (gc_worker_can_early_drop(tmp))
+                       if (gc_worker_can_early_drop(tmp)) {
                                nf_ct_kill(tmp);
+                               expired_count++;
+                       }
 
                        nf_ct_put(tmp);
                }
@@ -1491,33 +1537,38 @@ static void gc_worker(struct work_struct *work)
                cond_resched();
                i++;
 
-               if (time_after(jiffies, end_time) && i < hashsz) {
+               delta_time = nfct_time_stamp - end_time;
+               if (delta_time > 0 && i < hashsz) {
+                       gc_work->avg_timeout = next_run;
                        gc_work->next_bucket = i;
                        next_run = 0;
-                       break;
+                       goto early_exit;
                }
        } while (i < hashsz);
 
+       gc_work->next_bucket = 0;
+
+       next_run = clamp(next_run, GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_MAX);
+
+       delta_time = max_t(s32, nfct_time_stamp - gc_work->start_time, 1);
+       if (next_run > (unsigned long)delta_time)
+               next_run -= delta_time;
+       else
+               next_run = 1;
+
+early_exit:
        if (gc_work->exiting)
                return;
 
-       /*
-        * Eviction will normally happen from the packet path, and not
-        * from this gc worker.
-        *
-        * This worker is only here to reap expired entries when system went
-        * idle after a busy period.
-        */
-       if (next_run) {
+       if (next_run)
                gc_work->early_drop = false;
-               gc_work->next_bucket = 0;
-       }
+
        queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
 }
 
 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
 {
-       INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
+       INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
        gc_work->exiting = false;
 }
 
@@ -1757,9 +1808,6 @@ resolve_normal_ct(struct nf_conn *tmpl,
                        return 0;
                if (IS_ERR(h))
                        return PTR_ERR(h);
-
-               ct = nf_ct_tuplehash_to_ctrack(h);
-               ct->local_origin = state->hook == NF_INET_LOCAL_OUT;
        }
        ct = nf_ct_tuplehash_to_ctrack(h);
 
index a97ddb1..8dec42e 100644 (file)
@@ -550,6 +550,12 @@ void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat)
 }
 EXPORT_SYMBOL_GPL(nf_nat_helper_unregister);
 
+void nf_ct_set_auto_assign_helper_warned(struct net *net)
+{
+       nf_ct_pernet(net)->auto_assign_helper_warned = true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_set_auto_assign_helper_warned);
+
 void nf_conntrack_helper_pernet_init(struct net *net)
 {
        struct nf_conntrack_net *cnet = nf_ct_pernet(net);
index 12f793d..3b516cf 100644 (file)
@@ -63,10 +63,8 @@ static bool udp_error(struct sk_buff *skb,
        }
 
        /* Packet with no checksum */
-       if (!hdr->check) {
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       if (!hdr->check)
                return false;
-       }
 
        /* Checksum invalid? Ignore.
         * We skip checking packets on the outgoing path
index b90eca7..3db256d 100644 (file)
@@ -39,8 +39,14 @@ flow_offload_fill_dir(struct flow_offload *flow,
 
        ft->l3proto = ctt->src.l3num;
        ft->l4proto = ctt->dst.protonum;
-       ft->src_port = ctt->src.u.tcp.port;
-       ft->dst_port = ctt->dst.u.tcp.port;
+
+       switch (ctt->dst.protonum) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+               ft->src_port = ctt->src.u.tcp.port;
+               ft->dst_port = ctt->dst.u.tcp.port;
+               break;
+       }
 }
 
 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
@@ -399,7 +405,8 @@ EXPORT_SYMBOL_GPL(flow_offload_lookup);
 
 static int
 nf_flow_table_iterate(struct nf_flowtable *flow_table,
-                     void (*iter)(struct flow_offload *flow, void *data),
+                     void (*iter)(struct nf_flowtable *flowtable,
+                                  struct flow_offload *flow, void *data),
                      void *data)
 {
        struct flow_offload_tuple_rhash *tuplehash;
@@ -423,7 +430,7 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
 
                flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
 
-               iter(flow, data);
+               iter(flow_table, flow, data);
        }
        rhashtable_walk_stop(&hti);
        rhashtable_walk_exit(&hti);
@@ -451,10 +458,9 @@ static bool nf_flow_has_stale_dst(struct flow_offload *flow)
               flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
 }
 
-static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
+static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
+                                   struct flow_offload *flow, void *data)
 {
-       struct nf_flowtable *flow_table = data;
-
        if (nf_flow_has_expired(flow) ||
            nf_ct_is_dying(flow->ct) ||
            nf_flow_has_stale_dst(flow))
@@ -479,7 +485,7 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
        struct nf_flowtable *flow_table;
 
        flow_table = container_of(work, struct nf_flowtable, gc_work.work);
-       nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
+       nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
        queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
 }
 
@@ -595,7 +601,8 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_init);
 
-static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
+static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
+                                    struct flow_offload *flow, void *data)
 {
        struct net_device *dev = data;
 
@@ -637,11 +644,10 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
 
        cancel_delayed_work_sync(&flow_table->gc_work);
        nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
-       nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
+       nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
        nf_flow_table_offload_flush(flow_table);
        if (nf_flowtable_hw_offload(flow_table))
-               nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
-                                     flow_table);
+               nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
        rhashtable_destroy(&flow_table->rhashtable);
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_free);
index f1d3871..010b0b4 100644 (file)
@@ -172,6 +172,7 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
        struct flow_ports *ports;
        unsigned int thoff;
        struct iphdr *iph;
+       u8 ipproto;
 
        if (!pskb_may_pull(skb, sizeof(*iph) + offset))
                return -1;
@@ -185,13 +186,19 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
 
        thoff += offset;
 
-       switch (iph->protocol) {
+       ipproto = iph->protocol;
+       switch (ipproto) {
        case IPPROTO_TCP:
                *hdrsize = sizeof(struct tcphdr);
                break;
        case IPPROTO_UDP:
                *hdrsize = sizeof(struct udphdr);
                break;
+#ifdef CONFIG_NF_CT_PROTO_GRE
+       case IPPROTO_GRE:
+               *hdrsize = sizeof(struct gre_base_hdr);
+               break;
+#endif
        default:
                return -1;
        }
@@ -202,15 +209,29 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
        if (!pskb_may_pull(skb, thoff + *hdrsize))
                return -1;
 
+       switch (ipproto) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+               ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+               tuple->src_port         = ports->source;
+               tuple->dst_port         = ports->dest;
+               break;
+       case IPPROTO_GRE: {
+               struct gre_base_hdr *greh;
+
+               greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
+               if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
+                       return -1;
+               break;
+       }
+       }
+
        iph = (struct iphdr *)(skb_network_header(skb) + offset);
-       ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
 
        tuple->src_v4.s_addr    = iph->saddr;
        tuple->dst_v4.s_addr    = iph->daddr;
-       tuple->src_port         = ports->source;
-       tuple->dst_port         = ports->dest;
        tuple->l3proto          = AF_INET;
-       tuple->l4proto          = iph->protocol;
+       tuple->l4proto          = ipproto;
        tuple->iifidx           = dev->ifindex;
        nf_flow_tuple_encap(skb, tuple);
 
@@ -521,6 +542,7 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
        struct flow_ports *ports;
        struct ipv6hdr *ip6h;
        unsigned int thoff;
+       u8 nexthdr;
 
        thoff = sizeof(*ip6h) + offset;
        if (!pskb_may_pull(skb, thoff))
@@ -528,13 +550,19 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
 
        ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
 
-       switch (ip6h->nexthdr) {
+       nexthdr = ip6h->nexthdr;
+       switch (nexthdr) {
        case IPPROTO_TCP:
                *hdrsize = sizeof(struct tcphdr);
                break;
        case IPPROTO_UDP:
                *hdrsize = sizeof(struct udphdr);
                break;
+#ifdef CONFIG_NF_CT_PROTO_GRE
+       case IPPROTO_GRE:
+               *hdrsize = sizeof(struct gre_base_hdr);
+               break;
+#endif
        default:
                return -1;
        }
@@ -545,15 +573,29 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
        if (!pskb_may_pull(skb, thoff + *hdrsize))
                return -1;
 
+       switch (nexthdr) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+               ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+               tuple->src_port         = ports->source;
+               tuple->dst_port         = ports->dest;
+               break;
+       case IPPROTO_GRE: {
+               struct gre_base_hdr *greh;
+
+               greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
+               if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
+                       return -1;
+               break;
+       }
+       }
+
        ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
-       ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
 
        tuple->src_v6           = ip6h->saddr;
        tuple->dst_v6           = ip6h->daddr;
-       tuple->src_port         = ports->source;
-       tuple->dst_port         = ports->dest;
        tuple->l3proto          = AF_INET6;
-       tuple->l4proto          = ip6h->nexthdr;
+       tuple->l4proto          = nexthdr;
        tuple->iifidx           = dev->ifindex;
        nf_flow_tuple_encap(skb, tuple);
 
index fc4265a..11b6e19 100644 (file)
@@ -20,7 +20,6 @@ static struct workqueue_struct *nf_flow_offload_stats_wq;
 struct flow_offload_work {
        struct list_head        list;
        enum flow_cls_command   cmd;
-       int                     priority;
        struct nf_flowtable     *flowtable;
        struct flow_offload     *flow;
        struct work_struct      work;
@@ -174,6 +173,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
                match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
                break;
        case IPPROTO_UDP:
+       case IPPROTO_GRE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -182,15 +182,22 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
        key->basic.ip_proto = tuple->l4proto;
        mask->basic.ip_proto = 0xff;
 
-       key->tp.src = tuple->src_port;
-       mask->tp.src = 0xffff;
-       key->tp.dst = tuple->dst_port;
-       mask->tp.dst = 0xffff;
-
        match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) |
                                      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
-                                     BIT(FLOW_DISSECTOR_KEY_BASIC) |
-                                     BIT(FLOW_DISSECTOR_KEY_PORTS);
+                                     BIT(FLOW_DISSECTOR_KEY_BASIC);
+
+       switch (tuple->l4proto) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+               key->tp.src = tuple->src_port;
+               mask->tp.src = 0xffff;
+               key->tp.dst = tuple->dst_port;
+               mask->tp.dst = 0xffff;
+
+               match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
+               break;
+       }
+
        return 0;
 }
 
@@ -866,7 +873,8 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload,
                                  enum flow_offload_tuple_dir dir)
 {
        return nf_flow_offload_tuple(offload->flowtable, offload->flow,
-                                    flow_rule, dir, offload->priority,
+                                    flow_rule, dir,
+                                    offload->flowtable->priority,
                                     FLOW_CLS_REPLACE, NULL,
                                     &offload->flowtable->flow_block.cb_list);
 }
@@ -875,7 +883,8 @@ static void flow_offload_tuple_del(struct flow_offload_work *offload,
                                   enum flow_offload_tuple_dir dir)
 {
        nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
-                             offload->priority, FLOW_CLS_DESTROY, NULL,
+                             offload->flowtable->priority,
+                             FLOW_CLS_DESTROY, NULL,
                              &offload->flowtable->flow_block.cb_list);
 }
 
@@ -926,7 +935,8 @@ static void flow_offload_tuple_stats(struct flow_offload_work *offload,
                                     struct flow_stats *stats)
 {
        nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
-                             offload->priority, FLOW_CLS_STATS, stats,
+                             offload->flowtable->priority,
+                             FLOW_CLS_STATS, stats,
                              &offload->flowtable->flow_block.cb_list);
 }
 
@@ -1004,7 +1014,6 @@ nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
 
        offload->cmd = cmd;
        offload->flow = flow;
-       offload->priority = flowtable->priority;
        offload->flowtable = flowtable;
        INIT_WORK(&offload->work, flow_offload_work_handler);
 
index 58c06ac..7981be5 100644 (file)
@@ -494,38 +494,6 @@ another_round:
        goto another_round;
 }
 
-static bool tuple_force_port_remap(const struct nf_conntrack_tuple *tuple)
-{
-       u16 sp, dp;
-
-       switch (tuple->dst.protonum) {
-       case IPPROTO_TCP:
-               sp = ntohs(tuple->src.u.tcp.port);
-               dp = ntohs(tuple->dst.u.tcp.port);
-               break;
-       case IPPROTO_UDP:
-       case IPPROTO_UDPLITE:
-               sp = ntohs(tuple->src.u.udp.port);
-               dp = ntohs(tuple->dst.u.udp.port);
-               break;
-       default:
-               return false;
-       }
-
-       /* IANA: System port range: 1-1023,
-        *         user port range: 1024-49151,
-        *      private port range: 49152-65535.
-        *
-        * Linux default ephemeral port range is 32768-60999.
-        *
-        * Enforce port remapping if sport is significantly lower
-        * than dport to prevent NAT port shadowing, i.e.
-        * accidental match of 'new' inbound connection vs.
-        * existing outbound one.
-        */
-       return sp < 16384 && dp >= 32768;
-}
-
 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
  * we change the source to map into the range. For NF_INET_PRE_ROUTING
  * and NF_INET_LOCAL_OUT, we change the destination to map into the
@@ -539,17 +507,11 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
                 struct nf_conn *ct,
                 enum nf_nat_manip_type maniptype)
 {
-       bool random_port = range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL;
        const struct nf_conntrack_zone *zone;
        struct net *net = nf_ct_net(ct);
 
        zone = nf_ct_zone(ct);
 
-       if (maniptype == NF_NAT_MANIP_SRC &&
-           !random_port &&
-           !ct->local_origin)
-               random_port = tuple_force_port_remap(orig_tuple);
-
        /* 1) If this srcip/proto/src-proto-part is currently mapped,
         * and that same mapping gives a unique tuple within the given
         * range, use that.
@@ -558,7 +520,8 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
         * So far, we don't do local source mappings, so multiple
         * manips not an issue.
         */
-       if (maniptype == NF_NAT_MANIP_SRC && !random_port) {
+       if (maniptype == NF_NAT_MANIP_SRC &&
+           !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
                /* try the original tuple first */
                if (in_range(orig_tuple, range)) {
                        if (!nf_nat_used_tuple(orig_tuple, ct)) {
@@ -582,7 +545,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
         */
 
        /* Only bother mapping if it's not already in range and unique */
-       if (!random_port) {
+       if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
                if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
                        if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
                            l4proto_in_range(tuple, maniptype,
index c86748b..a97e112 100644 (file)
@@ -550,6 +550,58 @@ static int nft_delflowtable(struct nft_ctx *ctx,
        return err;
 }
 
+static void __nft_reg_track_clobber(struct nft_regs_track *track, u8 dreg)
+{
+       int i;
+
+       for (i = track->regs[dreg].num_reg; i > 0; i--)
+               __nft_reg_track_cancel(track, dreg - i);
+}
+
+static void __nft_reg_track_update(struct nft_regs_track *track,
+                                  const struct nft_expr *expr,
+                                  u8 dreg, u8 num_reg)
+{
+       track->regs[dreg].selector = expr;
+       track->regs[dreg].bitwise = NULL;
+       track->regs[dreg].num_reg = num_reg;
+}
+
+void nft_reg_track_update(struct nft_regs_track *track,
+                         const struct nft_expr *expr, u8 dreg, u8 len)
+{
+       unsigned int regcount;
+       int i;
+
+       __nft_reg_track_clobber(track, dreg);
+
+       regcount = DIV_ROUND_UP(len, NFT_REG32_SIZE);
+       for (i = 0; i < regcount; i++, dreg++)
+               __nft_reg_track_update(track, expr, dreg, i);
+}
+EXPORT_SYMBOL_GPL(nft_reg_track_update);
+
+void nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg, u8 len)
+{
+       unsigned int regcount;
+       int i;
+
+       __nft_reg_track_clobber(track, dreg);
+
+       regcount = DIV_ROUND_UP(len, NFT_REG32_SIZE);
+       for (i = 0; i < regcount; i++, dreg++)
+               __nft_reg_track_cancel(track, dreg);
+}
+EXPORT_SYMBOL_GPL(nft_reg_track_cancel);
+
+void __nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg)
+{
+       track->regs[dreg].selector = NULL;
+       track->regs[dreg].bitwise = NULL;
+       track->regs[dreg].num_reg = 0;
+}
+EXPORT_SYMBOL_GPL(__nft_reg_track_cancel);
+
 /*
  * Tables
  */
@@ -1072,6 +1124,30 @@ static int nft_objname_hash_cmp(struct rhashtable_compare_arg *arg,
        return strcmp(obj->key.name, k->name);
 }
 
+static bool nft_supported_family(u8 family)
+{
+       return false
+#ifdef CONFIG_NF_TABLES_INET
+               || family == NFPROTO_INET
+#endif
+#ifdef CONFIG_NF_TABLES_IPV4
+               || family == NFPROTO_IPV4
+#endif
+#ifdef CONFIG_NF_TABLES_ARP
+               || family == NFPROTO_ARP
+#endif
+#ifdef CONFIG_NF_TABLES_NETDEV
+               || family == NFPROTO_NETDEV
+#endif
+#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE)
+               || family == NFPROTO_BRIDGE
+#endif
+#ifdef CONFIG_NF_TABLES_IPV6
+               || family == NFPROTO_IPV6
+#endif
+               ;
+}
+
 static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
                              const struct nlattr * const nla[])
 {
@@ -1086,6 +1162,9 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
        u32 flags = 0;
        int err;
 
+       if (!nft_supported_family(family))
+               return -EOPNOTSUPP;
+
        lockdep_assert_held(&nft_net->commit_mutex);
        attr = nla[NFTA_TABLE_NAME];
        table = nft_table_lookup(net, attr, family, genmask,
@@ -8260,6 +8339,21 @@ void nf_tables_trans_destroy_flush_work(void)
 }
 EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work);
 
+static bool nft_expr_reduce(struct nft_regs_track *track,
+                           const struct nft_expr *expr)
+{
+       if (!expr->ops->reduce) {
+               pr_warn_once("missing reduce for expression %s ",
+                            expr->ops->type->name);
+               return false;
+       }
+
+       if (nft_reduce_is_readonly(expr))
+               return false;
+
+       return expr->ops->reduce(track, expr);
+}
+
 static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain)
 {
        const struct nft_expr *expr, *last;
@@ -8307,8 +8401,7 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha
                nft_rule_for_each_expr(expr, last, rule) {
                        track.cur = expr;
 
-                       if (expr->ops->reduce &&
-                           expr->ops->reduce(&track, expr)) {
+                       if (nft_expr_reduce(&track, expr)) {
                                expr = track.cur;
                                continue;
                        }
index 7b727d3..38caa66 100644 (file)
@@ -283,12 +283,16 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
 {
        const struct nft_bitwise *priv = nft_expr_priv(expr);
        const struct nft_bitwise *bitwise;
+       unsigned int regcount;
+       u8 dreg;
+       int i;
 
        if (!track->regs[priv->sreg].selector)
                return false;
 
        bitwise = nft_expr_priv(expr);
        if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
+           track->regs[priv->sreg].num_reg == 0 &&
            track->regs[priv->dreg].bitwise &&
            track->regs[priv->dreg].bitwise->ops == expr->ops &&
            priv->sreg == bitwise->sreg &&
@@ -302,17 +306,21 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
                return true;
        }
 
-       if (track->regs[priv->sreg].bitwise) {
-               track->regs[priv->dreg].selector = NULL;
-               track->regs[priv->dreg].bitwise = NULL;
+       if (track->regs[priv->sreg].bitwise ||
+           track->regs[priv->sreg].num_reg != 0) {
+               nft_reg_track_cancel(track, priv->dreg, priv->len);
                return false;
        }
 
        if (priv->sreg != priv->dreg) {
-               track->regs[priv->dreg].selector =
-                       track->regs[priv->sreg].selector;
+               nft_reg_track_update(track, track->regs[priv->sreg].selector,
+                                    priv->dreg, priv->len);
        }
-       track->regs[priv->dreg].bitwise = expr;
+
+       dreg = priv->dreg;
+       regcount = DIV_ROUND_UP(priv->len, NFT_REG32_SIZE);
+       for (i = 0; i < regcount; i++, dreg++)
+               track->regs[priv->dreg].bitwise = expr;
 
        return false;
 }
@@ -447,8 +455,7 @@ static bool nft_bitwise_fast_reduce(struct nft_regs_track *track,
        }
 
        if (track->regs[priv->sreg].bitwise) {
-               track->regs[priv->dreg].selector = NULL;
-               track->regs[priv->dreg].bitwise = NULL;
+               nft_reg_track_cancel(track, priv->dreg, NFT_REG32_SIZE);
                return false;
        }
 
@@ -522,3 +529,4 @@ bool nft_expr_reduce_bitwise(struct nft_regs_track *track,
 
        return false;
 }
+EXPORT_SYMBOL_GPL(nft_expr_reduce_bitwise);
index e646e9e..d776091 100644 (file)
@@ -172,8 +172,7 @@ static bool nft_byteorder_reduce(struct nft_regs_track *track,
 {
        struct nft_byteorder *priv = nft_expr_priv(expr);
 
-       track->regs[priv->dreg].selector = NULL;
-       track->regs[priv->dreg].bitwise = NULL;
+       nft_reg_track_cancel(track, priv->dreg, priv->len);
 
        return false;
 }
index 917072a..6528f76 100644 (file)
@@ -193,6 +193,7 @@ static const struct nft_expr_ops nft_cmp_ops = {
        .eval           = nft_cmp_eval,
        .init           = nft_cmp_init,
        .dump           = nft_cmp_dump,
+       .reduce         = NFT_REDUCE_READONLY,
        .offload        = nft_cmp_offload,
 };
 
@@ -269,6 +270,7 @@ const struct nft_expr_ops nft_cmp_fast_ops = {
        .eval           = NULL, /* inlined */
        .init           = nft_cmp_fast_init,
        .dump           = nft_cmp_fast_dump,
+       .reduce         = NFT_REDUCE_READONLY,
        .offload        = nft_cmp_fast_offload,
 };
 
@@ -359,6 +361,7 @@ const struct nft_expr_ops nft_cmp16_fast_ops = {
        .eval           = NULL, /* inlined */
        .init           = nft_cmp16_fast_init,
        .dump           = nft_cmp16_fast_dump,
+       .reduce         = NFT_REDUCE_READONLY,
        .offload        = nft_cmp16_fast_offload,
 };
 
index 5a46d82..c161724 100644 (file)
@@ -871,6 +871,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        ops->dump = nft_target_dump;
        ops->validate = nft_target_validate;
        ops->data = target;
+       ops->reduce = NFT_REDUCE_READONLY;
 
        if (family == NFPROTO_BRIDGE)
                ops->eval = nft_target_eval_bridge;
index 3362417..9de1462 100644 (file)
@@ -257,6 +257,7 @@ static const struct nft_expr_ops nft_connlimit_ops = {
        .destroy_clone  = nft_connlimit_destroy_clone,
        .dump           = nft_connlimit_dump,
        .gc             = nft_connlimit_gc,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_connlimit_type __read_mostly = {
index f179e8c..da90836 100644 (file)
@@ -293,6 +293,7 @@ static const struct nft_expr_ops nft_counter_ops = {
        .destroy_clone  = nft_counter_destroy,
        .dump           = nft_counter_dump,
        .clone          = nft_counter_clone,
+       .reduce         = NFT_REDUCE_READONLY,
        .offload        = nft_counter_offload,
        .offload_stats  = nft_counter_offload_stats,
 };
index 5adf8bb..d8e1614 100644 (file)
@@ -26,6 +26,7 @@
 struct nft_ct {
        enum nft_ct_keys        key:8;
        enum ip_conntrack_dir   dir:8;
+       u8                      len;
        union {
                u8              dreg;
                u8              sreg;
@@ -500,6 +501,7 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
                }
        }
 
+       priv->len = len;
        err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL,
                                       NFT_DATA_VALUE, len);
        if (err < 0)
@@ -608,6 +610,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
                }
        }
 
+       priv->len = len;
        err = nft_parse_register_load(tb[NFTA_CT_SREG], &priv->sreg, len);
        if (err < 0)
                goto err1;
@@ -677,6 +680,29 @@ nla_put_failure:
        return -1;
 }
 
+static bool nft_ct_get_reduce(struct nft_regs_track *track,
+                             const struct nft_expr *expr)
+{
+       const struct nft_ct *priv = nft_expr_priv(expr);
+       const struct nft_ct *ct;
+
+       if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
+               return false;
+       }
+
+       ct = nft_expr_priv(track->regs[priv->dreg].selector);
+       if (priv->key != ct->key) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
+               return false;
+       }
+
+       if (!track->regs[priv->dreg].bitwise)
+               return true;
+
+       return nft_expr_reduce_bitwise(track, expr);
+}
+
 static int nft_ct_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
        const struct nft_ct *priv = nft_expr_priv(expr);
@@ -710,8 +736,27 @@ static const struct nft_expr_ops nft_ct_get_ops = {
        .init           = nft_ct_get_init,
        .destroy        = nft_ct_get_destroy,
        .dump           = nft_ct_get_dump,
+       .reduce         = nft_ct_get_reduce,
 };
 
+static bool nft_ct_set_reduce(struct nft_regs_track *track,
+                             const struct nft_expr *expr)
+{
+       int i;
+
+       for (i = 0; i < NFT_REG32_NUM; i++) {
+               if (!track->regs[i].selector)
+                       continue;
+
+               if (track->regs[i].selector->ops != &nft_ct_get_ops)
+                       continue;
+
+               __nft_reg_track_cancel(track, i);
+       }
+
+       return false;
+}
+
 static const struct nft_expr_ops nft_ct_set_ops = {
        .type           = &nft_ct_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
@@ -719,6 +764,7 @@ static const struct nft_expr_ops nft_ct_set_ops = {
        .init           = nft_ct_set_init,
        .destroy        = nft_ct_set_destroy,
        .dump           = nft_ct_set_dump,
+       .reduce         = nft_ct_set_reduce,
 };
 
 #ifdef CONFIG_NF_CONNTRACK_ZONES
@@ -729,6 +775,7 @@ static const struct nft_expr_ops nft_ct_set_zone_ops = {
        .init           = nft_ct_set_init,
        .destroy        = nft_ct_set_destroy,
        .dump           = nft_ct_set_dump,
+       .reduce         = nft_ct_set_reduce,
 };
 #endif
 
@@ -785,6 +832,7 @@ static const struct nft_expr_ops nft_notrack_ops = {
        .type           = &nft_notrack_type,
        .size           = NFT_EXPR_SIZE(0),
        .eval           = nft_notrack_eval,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_notrack_type __read_mostly = {
@@ -1041,6 +1089,9 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
        if (err < 0)
                goto err_put_helper;
 
+       /* Avoid the bogus warning, helper will be assigned after CT init */
+       nf_ct_set_auto_assign_helper_warned(ctx->net);
+
        return 0;
 
 err_put_helper:
index 5b5c607..6350740 100644 (file)
@@ -79,6 +79,7 @@ static const struct nft_expr_ops nft_dup_netdev_ops = {
        .eval           = nft_dup_netdev_eval,
        .init           = nft_dup_netdev_init,
        .dump           = nft_dup_netdev_dump,
+       .reduce         = NFT_REDUCE_READONLY,
        .offload        = nft_dup_netdev_offload,
        .offload_action = nft_dup_netdev_offload_action,
 };
index 87f3af4..22f70b5 100644 (file)
@@ -413,6 +413,7 @@ static const struct nft_expr_ops nft_dynset_ops = {
        .activate       = nft_dynset_activate,
        .deactivate     = nft_dynset_deactivate,
        .dump           = nft_dynset_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 struct nft_expr_type nft_dynset_type __read_mostly = {
index d2b9378..22c3e05 100644 (file)
@@ -603,12 +603,40 @@ static int nft_exthdr_dump_strip(struct sk_buff *skb, const struct nft_expr *exp
        return nft_exthdr_dump_common(skb, priv);
 }
 
+static bool nft_exthdr_reduce(struct nft_regs_track *track,
+                              const struct nft_expr *expr)
+{
+       const struct nft_exthdr *priv = nft_expr_priv(expr);
+       const struct nft_exthdr *exthdr;
+
+       if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
+               return false;
+       }
+
+       exthdr = nft_expr_priv(track->regs[priv->dreg].selector);
+       if (priv->type != exthdr->type ||
+           priv->op != exthdr->op ||
+           priv->flags != exthdr->flags ||
+           priv->offset != exthdr->offset ||
+           priv->len != exthdr->len) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
+               return false;
+       }
+
+       if (!track->regs[priv->dreg].bitwise)
+               return true;
+
+       return nft_expr_reduce_bitwise(track, expr);
+}
+
 static const struct nft_expr_ops nft_exthdr_ipv6_ops = {
        .type           = &nft_exthdr_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
        .eval           = nft_exthdr_ipv6_eval,
        .init           = nft_exthdr_init,
        .dump           = nft_exthdr_dump,
+       .reduce         = nft_exthdr_reduce,
 };
 
 static const struct nft_expr_ops nft_exthdr_ipv4_ops = {
@@ -617,6 +645,7 @@ static const struct nft_expr_ops nft_exthdr_ipv4_ops = {
        .eval           = nft_exthdr_ipv4_eval,
        .init           = nft_exthdr_ipv4_init,
        .dump           = nft_exthdr_dump,
+       .reduce         = nft_exthdr_reduce,
 };
 
 static const struct nft_expr_ops nft_exthdr_tcp_ops = {
@@ -625,6 +654,7 @@ static const struct nft_expr_ops nft_exthdr_tcp_ops = {
        .eval           = nft_exthdr_tcp_eval,
        .init           = nft_exthdr_init,
        .dump           = nft_exthdr_dump,
+       .reduce         = nft_exthdr_reduce,
 };
 
 static const struct nft_expr_ops nft_exthdr_tcp_set_ops = {
@@ -633,6 +663,7 @@ static const struct nft_expr_ops nft_exthdr_tcp_set_ops = {
        .eval           = nft_exthdr_tcp_set_eval,
        .init           = nft_exthdr_tcp_set_init,
        .dump           = nft_exthdr_dump_set,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static const struct nft_expr_ops nft_exthdr_tcp_strip_ops = {
@@ -641,6 +672,7 @@ static const struct nft_expr_ops nft_exthdr_tcp_strip_ops = {
        .eval           = nft_exthdr_tcp_strip_eval,
        .init           = nft_exthdr_tcp_strip_init,
        .dump           = nft_exthdr_dump_strip,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static const struct nft_expr_ops nft_exthdr_sctp_ops = {
@@ -649,6 +681,7 @@ static const struct nft_expr_ops nft_exthdr_sctp_ops = {
        .eval           = nft_exthdr_sctp_eval,
        .init           = nft_exthdr_init,
        .dump           = nft_exthdr_dump,
+       .reduce         = nft_exthdr_reduce,
 };
 
 static const struct nft_expr_ops *
index b10ce73..f198f2d 100644 (file)
@@ -156,5 +156,47 @@ void nft_fib_store_result(void *reg, const struct nft_fib *priv,
 }
 EXPORT_SYMBOL_GPL(nft_fib_store_result);
 
+bool nft_fib_reduce(struct nft_regs_track *track,
+                   const struct nft_expr *expr)
+{
+       const struct nft_fib *priv = nft_expr_priv(expr);
+       unsigned int len = NFT_REG32_SIZE;
+       const struct nft_fib *fib;
+
+       switch (priv->result) {
+       case NFT_FIB_RESULT_OIF:
+               break;
+       case NFT_FIB_RESULT_OIFNAME:
+               if (priv->flags & NFTA_FIB_F_PRESENT)
+                       len = NFT_REG32_SIZE;
+               else
+                       len = IFNAMSIZ;
+               break;
+       case NFT_FIB_RESULT_ADDRTYPE:
+            break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+
+       if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
+               nft_reg_track_update(track, expr, priv->dreg, len);
+               return false;
+       }
+
+       fib = nft_expr_priv(track->regs[priv->dreg].selector);
+       if (priv->result != fib->result ||
+           priv->flags != fib->flags) {
+               nft_reg_track_update(track, expr, priv->dreg, len);
+               return false;
+       }
+
+       if (!track->regs[priv->dreg].bitwise)
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(nft_fib_reduce);
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
index a88d44e..666a374 100644 (file)
@@ -49,6 +49,7 @@ static const struct nft_expr_ops nft_fib_inet_ops = {
        .init           = nft_fib_init,
        .dump           = nft_fib_dump,
        .validate       = nft_fib_validate,
+       .reduce         = nft_fib_reduce,
 };
 
 static struct nft_expr_type nft_fib_inet_type __read_mostly = {
index 3f3478a..9121ec6 100644 (file)
@@ -58,6 +58,7 @@ static const struct nft_expr_ops nft_fib_netdev_ops = {
        .init           = nft_fib_init,
        .dump           = nft_fib_dump,
        .validate       = nft_fib_validate,
+       .reduce         = nft_fib_reduce,
 };
 
 static struct nft_expr_type nft_fib_netdev_type __read_mostly = {
index 0af34ad..900d48c 100644 (file)
@@ -298,6 +298,19 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
                break;
        case IPPROTO_UDP:
                break;
+#ifdef CONFIG_NF_CT_PROTO_GRE
+       case IPPROTO_GRE: {
+               struct nf_conntrack_tuple *tuple;
+
+               if (ct->status & IPS_NAT_MASK)
+                       goto out;
+               tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+               /* No support for GRE v1 */
+               if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
+                       goto out;
+               break;
+       }
+#endif
        default:
                goto out;
        }
@@ -428,6 +441,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
        .destroy        = nft_flow_offload_destroy,
        .validate       = nft_flow_offload_validate,
        .dump           = nft_flow_offload_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_flow_offload_type __read_mostly = {
index 08e7a28..7c5876d 100644 (file)
@@ -217,6 +217,7 @@ static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = {
        .init           = nft_fwd_neigh_init,
        .dump           = nft_fwd_neigh_dump,
        .validate       = nft_fwd_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static const struct nft_expr_ops nft_fwd_netdev_ops = {
@@ -226,6 +227,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
        .init           = nft_fwd_netdev_init,
        .dump           = nft_fwd_netdev_dump,
        .validate       = nft_fwd_validate,
+       .reduce         = NFT_REDUCE_READONLY,
        .offload        = nft_fwd_netdev_offload,
        .offload_action = nft_fwd_netdev_offload_action,
 };
index f829f52..e5631e8 100644 (file)
@@ -165,6 +165,16 @@ nla_put_failure:
        return -1;
 }
 
+static bool nft_jhash_reduce(struct nft_regs_track *track,
+                            const struct nft_expr *expr)
+{
+       const struct nft_jhash *priv = nft_expr_priv(expr);
+
+       nft_reg_track_cancel(track, priv->dreg, sizeof(u32));
+
+       return false;
+}
+
 static int nft_symhash_dump(struct sk_buff *skb,
                            const struct nft_expr *expr)
 {
@@ -185,6 +195,30 @@ nla_put_failure:
        return -1;
 }
 
+static bool nft_symhash_reduce(struct nft_regs_track *track,
+                              const struct nft_expr *expr)
+{
+       struct nft_symhash *priv = nft_expr_priv(expr);
+       struct nft_symhash *symhash;
+
+       if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
+               nft_reg_track_update(track, expr, priv->dreg, sizeof(u32));
+               return false;
+       }
+
+       symhash = nft_expr_priv(track->regs[priv->dreg].selector);
+       if (priv->offset != symhash->offset ||
+           priv->modulus != symhash->modulus) {
+               nft_reg_track_update(track, expr, priv->dreg, sizeof(u32));
+               return false;
+       }
+
+       if (!track->regs[priv->dreg].bitwise)
+               return true;
+
+       return false;
+}
+
 static struct nft_expr_type nft_hash_type;
 static const struct nft_expr_ops nft_jhash_ops = {
        .type           = &nft_hash_type,
@@ -192,6 +226,7 @@ static const struct nft_expr_ops nft_jhash_ops = {
        .eval           = nft_jhash_eval,
        .init           = nft_jhash_init,
        .dump           = nft_jhash_dump,
+       .reduce         = nft_jhash_reduce,
 };
 
 static const struct nft_expr_ops nft_symhash_ops = {
@@ -200,6 +235,7 @@ static const struct nft_expr_ops nft_symhash_ops = {
        .eval           = nft_symhash_eval,
        .init           = nft_symhash_init,
        .dump           = nft_symhash_dump,
+       .reduce         = nft_symhash_reduce,
 };
 
 static const struct nft_expr_ops *
index d0f67d3..b80f7b5 100644 (file)
@@ -223,6 +223,17 @@ static bool nft_immediate_offload_action(const struct nft_expr *expr)
        return false;
 }
 
+static bool nft_immediate_reduce(struct nft_regs_track *track,
+                                const struct nft_expr *expr)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+       if (priv->dreg != NFT_REG_VERDICT)
+               nft_reg_track_cancel(track, priv->dreg, priv->dlen);
+
+       return false;
+}
+
 static const struct nft_expr_ops nft_imm_ops = {
        .type           = &nft_imm_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
@@ -233,6 +244,7 @@ static const struct nft_expr_ops nft_imm_ops = {
        .destroy        = nft_immediate_destroy,
        .dump           = nft_immediate_dump,
        .validate       = nft_immediate_validate,
+       .reduce         = nft_immediate_reduce,
        .offload        = nft_immediate_offload,
        .offload_action = nft_immediate_offload_action,
 };
index 4f745a4..43d0d4a 100644 (file)
@@ -120,6 +120,7 @@ static const struct nft_expr_ops nft_last_ops = {
        .destroy        = nft_last_destroy,
        .clone          = nft_last_clone,
        .dump           = nft_last_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 struct nft_expr_type nft_last_type __read_mostly = {
index a726b62..d4a6cf3 100644 (file)
@@ -225,6 +225,7 @@ static const struct nft_expr_ops nft_limit_pkts_ops = {
        .destroy        = nft_limit_pkts_destroy,
        .clone          = nft_limit_pkts_clone,
        .dump           = nft_limit_pkts_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static void nft_limit_bytes_eval(const struct nft_expr *expr,
@@ -279,6 +280,7 @@ static const struct nft_expr_ops nft_limit_bytes_ops = {
        .dump           = nft_limit_bytes_dump,
        .clone          = nft_limit_bytes_clone,
        .destroy        = nft_limit_bytes_destroy,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static const struct nft_expr_ops *
index 54f6c20..0e13c00 100644 (file)
@@ -290,6 +290,7 @@ static const struct nft_expr_ops nft_log_ops = {
        .init           = nft_log_init,
        .destroy        = nft_log_destroy,
        .dump           = nft_log_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_log_type __read_mostly = {
index 90becbf..dfae127 100644 (file)
@@ -253,6 +253,17 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
        return 0;
 }
 
+static bool nft_lookup_reduce(struct nft_regs_track *track,
+                             const struct nft_expr *expr)
+{
+       const struct nft_lookup *priv = nft_expr_priv(expr);
+
+       if (priv->set->flags & NFT_SET_MAP)
+               nft_reg_track_cancel(track, priv->dreg, priv->set->dlen);
+
+       return false;
+}
+
 static const struct nft_expr_ops nft_lookup_ops = {
        .type           = &nft_lookup_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
@@ -263,6 +274,7 @@ static const struct nft_expr_ops nft_lookup_ops = {
        .destroy        = nft_lookup_destroy,
        .dump           = nft_lookup_dump,
        .validate       = nft_lookup_validate,
+       .reduce         = nft_lookup_reduce,
 };
 
 struct nft_expr_type nft_lookup_type __read_mostly = {
index 9953e80..2a0adc4 100644 (file)
@@ -129,6 +129,7 @@ static const struct nft_expr_ops nft_masq_ipv4_ops = {
        .destroy        = nft_masq_ipv4_destroy,
        .dump           = nft_masq_dump,
        .validate       = nft_masq_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_masq_ipv4_type __read_mostly = {
@@ -175,6 +176,7 @@ static const struct nft_expr_ops nft_masq_ipv6_ops = {
        .destroy        = nft_masq_ipv6_destroy,
        .dump           = nft_masq_dump,
        .validate       = nft_masq_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_masq_ipv6_type __read_mostly = {
@@ -230,6 +232,7 @@ static const struct nft_expr_ops nft_masq_inet_ops = {
        .destroy        = nft_masq_inet_destroy,
        .dump           = nft_masq_dump,
        .validate       = nft_masq_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_masq_inet_type __read_mostly = {
index 5ab4df5..ac48592 100644 (file)
@@ -539,6 +539,7 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
                return -EOPNOTSUPP;
        }
 
+       priv->len = len;
        return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
                                        NULL, NFT_DATA_VALUE, len);
 }
@@ -664,6 +665,7 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
                return -EOPNOTSUPP;
        }
 
+       priv->len = len;
        err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len);
        if (err < 0)
                return err;
@@ -750,24 +752,21 @@ static int nft_meta_get_offload(struct nft_offload_ctx *ctx,
        return 0;
 }
 
-static bool nft_meta_get_reduce(struct nft_regs_track *track,
-                               const struct nft_expr *expr)
+bool nft_meta_get_reduce(struct nft_regs_track *track,
+                        const struct nft_expr *expr)
 {
        const struct nft_meta *priv = nft_expr_priv(expr);
        const struct nft_meta *meta;
 
-       if (!track->regs[priv->dreg].selector ||
-           track->regs[priv->dreg].selector->ops != expr->ops) {
-               track->regs[priv->dreg].selector = expr;
-               track->regs[priv->dreg].bitwise = NULL;
+       if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
                return false;
        }
 
        meta = nft_expr_priv(track->regs[priv->dreg].selector);
        if (priv->key != meta->key ||
            priv->dreg != meta->dreg) {
-               track->regs[priv->dreg].selector = expr;
-               track->regs[priv->dreg].bitwise = NULL;
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
                return false;
        }
 
@@ -776,6 +775,7 @@ static bool nft_meta_get_reduce(struct nft_regs_track *track,
 
        return nft_expr_reduce_bitwise(track, expr);
 }
+EXPORT_SYMBOL_GPL(nft_meta_get_reduce);
 
 static const struct nft_expr_ops nft_meta_get_ops = {
        .type           = &nft_meta_type,
@@ -800,8 +800,7 @@ static bool nft_meta_set_reduce(struct nft_regs_track *track,
                if (track->regs[i].selector->ops != &nft_meta_get_ops)
                        continue;
 
-               track->regs[i].selector = NULL;
-               track->regs[i].bitwise = NULL;
+               __nft_reg_track_cancel(track, i);
        }
 
        return false;
index be1595d..4394df4 100644 (file)
@@ -317,6 +317,7 @@ static const struct nft_expr_ops nft_nat_ops = {
        .destroy        = nft_nat_destroy,
        .dump           = nft_nat_dump,
        .validate       = nft_nat_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_nat_type __read_mostly = {
@@ -346,6 +347,7 @@ static const struct nft_expr_ops nft_nat_inet_ops = {
        .destroy        = nft_nat_destroy,
        .dump           = nft_nat_dump,
        .validate       = nft_nat_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_inet_nat_type __read_mostly = {
index 1d378ef..81b40c6 100644 (file)
@@ -85,6 +85,16 @@ err:
        return err;
 }
 
+static bool nft_ng_inc_reduce(struct nft_regs_track *track,
+                                const struct nft_expr *expr)
+{
+       const struct nft_ng_inc *priv = nft_expr_priv(expr);
+
+       nft_reg_track_cancel(track, priv->dreg, NFT_REG32_SIZE);
+
+       return false;
+}
+
 static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
                       u32 modulus, enum nft_ng_types type, u32 offset)
 {
@@ -172,6 +182,16 @@ static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
                           priv->offset);
 }
 
+static bool nft_ng_random_reduce(struct nft_regs_track *track,
+                                const struct nft_expr *expr)
+{
+       const struct nft_ng_random *priv = nft_expr_priv(expr);
+
+       nft_reg_track_cancel(track, priv->dreg, NFT_REG32_SIZE);
+
+       return false;
+}
+
 static struct nft_expr_type nft_ng_type;
 static const struct nft_expr_ops nft_ng_inc_ops = {
        .type           = &nft_ng_type,
@@ -180,6 +200,7 @@ static const struct nft_expr_ops nft_ng_inc_ops = {
        .init           = nft_ng_inc_init,
        .destroy        = nft_ng_inc_destroy,
        .dump           = nft_ng_inc_dump,
+       .reduce         = nft_ng_inc_reduce,
 };
 
 static const struct nft_expr_ops nft_ng_random_ops = {
@@ -188,6 +209,7 @@ static const struct nft_expr_ops nft_ng_random_ops = {
        .eval           = nft_ng_random_eval,
        .init           = nft_ng_random_init,
        .dump           = nft_ng_random_dump,
+       .reduce         = nft_ng_random_reduce,
 };
 
 static const struct nft_expr_ops *
index 94b2327..5d8d91b 100644 (file)
@@ -91,6 +91,7 @@ static const struct nft_expr_ops nft_objref_ops = {
        .activate       = nft_objref_activate,
        .deactivate     = nft_objref_deactivate,
        .dump           = nft_objref_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 struct nft_objref_map {
@@ -204,6 +205,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
        .deactivate     = nft_objref_map_deactivate,
        .destroy        = nft_objref_map_destroy,
        .dump           = nft_objref_map_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static const struct nft_expr_ops *
index d82677e..5eed18f 100644 (file)
@@ -120,6 +120,30 @@ static int nft_osf_validate(const struct nft_ctx *ctx,
                                                    (1 << NF_INET_FORWARD));
 }
 
+static bool nft_osf_reduce(struct nft_regs_track *track,
+                          const struct nft_expr *expr)
+{
+       struct nft_osf *priv = nft_expr_priv(expr);
+       struct nft_osf *osf;
+
+       if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
+               nft_reg_track_update(track, expr, priv->dreg, NFT_OSF_MAXGENRELEN);
+               return false;
+       }
+
+       osf = nft_expr_priv(track->regs[priv->dreg].selector);
+       if (priv->flags != osf->flags ||
+           priv->ttl != osf->ttl) {
+               nft_reg_track_update(track, expr, priv->dreg, NFT_OSF_MAXGENRELEN);
+               return false;
+       }
+
+       if (!track->regs[priv->dreg].bitwise)
+               return true;
+
+       return false;
+}
+
 static struct nft_expr_type nft_osf_type;
 static const struct nft_expr_ops nft_osf_op = {
        .eval           = nft_osf_eval,
@@ -128,6 +152,7 @@ static const struct nft_expr_ops nft_osf_op = {
        .dump           = nft_osf_dump,
        .type           = &nft_osf_type,
        .validate       = nft_osf_validate,
+       .reduce         = nft_osf_reduce,
 };
 
 static struct nft_expr_type nft_osf_type __read_mostly = {
index 5cc06ae..2e7ac00 100644 (file)
@@ -216,10 +216,8 @@ static bool nft_payload_reduce(struct nft_regs_track *track,
        const struct nft_payload *priv = nft_expr_priv(expr);
        const struct nft_payload *payload;
 
-       if (!track->regs[priv->dreg].selector ||
-           track->regs[priv->dreg].selector->ops != expr->ops) {
-               track->regs[priv->dreg].selector = expr;
-               track->regs[priv->dreg].bitwise = NULL;
+       if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
                return false;
        }
 
@@ -227,8 +225,7 @@ static bool nft_payload_reduce(struct nft_regs_track *track,
        if (priv->base != payload->base ||
            priv->offset != payload->offset ||
            priv->len != payload->len) {
-               track->regs[priv->dreg].selector = expr;
-               track->regs[priv->dreg].bitwise = NULL;
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
                return false;
        }
 
@@ -815,8 +812,7 @@ static bool nft_payload_set_reduce(struct nft_regs_track *track,
                    track->regs[i].selector->ops != &nft_payload_fast_ops)
                        continue;
 
-               track->regs[i].selector = NULL;
-               track->regs[i].bitwise = NULL;
+               __nft_reg_track_cancel(track, i);
        }
 
        return false;
index 9ba1de5..15e4b76 100644 (file)
@@ -164,6 +164,7 @@ static const struct nft_expr_ops nft_queue_ops = {
        .eval           = nft_queue_eval,
        .init           = nft_queue_init,
        .dump           = nft_queue_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static const struct nft_expr_ops nft_queue_sreg_ops = {
@@ -172,6 +173,7 @@ static const struct nft_expr_ops nft_queue_sreg_ops = {
        .eval           = nft_queue_sreg_eval,
        .init           = nft_queue_sreg_init,
        .dump           = nft_queue_sreg_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static const struct nft_expr_ops *
index f394a0b..d7db57e 100644 (file)
@@ -254,6 +254,7 @@ static const struct nft_expr_ops nft_quota_ops = {
        .destroy        = nft_quota_destroy,
        .clone          = nft_quota_clone,
        .dump           = nft_quota_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_quota_type __read_mostly = {
index e4a1c44..66f7748 100644 (file)
@@ -140,6 +140,7 @@ static const struct nft_expr_ops nft_range_ops = {
        .eval           = nft_range_eval,
        .init           = nft_range_init,
        .dump           = nft_range_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 struct nft_expr_type nft_range_type __read_mostly = {
index ba09890..5086adf 100644 (file)
@@ -134,6 +134,7 @@ static const struct nft_expr_ops nft_redir_ipv4_ops = {
        .destroy        = nft_redir_ipv4_destroy,
        .dump           = nft_redir_dump,
        .validate       = nft_redir_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_redir_ipv4_type __read_mostly = {
@@ -183,6 +184,7 @@ static const struct nft_expr_ops nft_redir_ipv6_ops = {
        .destroy        = nft_redir_ipv6_destroy,
        .dump           = nft_redir_dump,
        .validate       = nft_redir_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_redir_ipv6_type __read_mostly = {
@@ -225,6 +227,7 @@ static const struct nft_expr_ops nft_redir_inet_ops = {
        .destroy        = nft_redir_inet_destroy,
        .dump           = nft_redir_dump,
        .validate       = nft_redir_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_redir_inet_type __read_mostly = {
index 554caf9..973fa31 100644 (file)
@@ -80,6 +80,7 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
        .validate       = nft_reject_inet_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_reject_inet_type __read_mostly = {
index 61cd8c4..7865cd8 100644 (file)
@@ -159,6 +159,7 @@ static const struct nft_expr_ops nft_reject_netdev_ops = {
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
        .validate       = nft_reject_netdev_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_reject_netdev_type __read_mostly = {
index bcd01a6..71931ec 100644 (file)
@@ -191,6 +191,7 @@ static const struct nft_expr_ops nft_rt_get_ops = {
        .init           = nft_rt_get_init,
        .dump           = nft_rt_get_dump,
        .validate       = nft_rt_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 struct nft_expr_type nft_rt_type __read_mostly = {
index d601974..bd3792f 100644 (file)
@@ -10,6 +10,7 @@
 struct nft_socket {
        enum nft_socket_keys            key:8;
        u8                              level;
+       u8                              len;
        union {
                u8                      dreg;
        };
@@ -179,6 +180,7 @@ static int nft_socket_init(const struct nft_ctx *ctx,
                return -EOPNOTSUPP;
        }
 
+       priv->len = len;
        return nft_parse_register_store(ctx, tb[NFTA_SOCKET_DREG], &priv->dreg,
                                        NULL, NFT_DATA_VALUE, len);
 }
@@ -198,6 +200,31 @@ static int nft_socket_dump(struct sk_buff *skb,
        return 0;
 }
 
+static bool nft_socket_reduce(struct nft_regs_track *track,
+                             const struct nft_expr *expr)
+{
+       const struct nft_socket *priv = nft_expr_priv(expr);
+       const struct nft_socket *socket;
+
+       if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
+               return false;
+       }
+
+       socket = nft_expr_priv(track->regs[priv->dreg].selector);
+       if (priv->key != socket->key ||
+           priv->dreg != socket->dreg ||
+           priv->level != socket->level) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
+               return false;
+       }
+
+       if (!track->regs[priv->dreg].bitwise)
+               return true;
+
+       return nft_expr_reduce_bitwise(track, expr);
+}
+
 static struct nft_expr_type nft_socket_type;
 static const struct nft_expr_ops nft_socket_ops = {
        .type           = &nft_socket_type,
@@ -205,6 +232,7 @@ static const struct nft_expr_ops nft_socket_ops = {
        .eval           = nft_socket_eval,
        .init           = nft_socket_init,
        .dump           = nft_socket_dump,
+       .reduce         = nft_socket_reduce,
 };
 
 static struct nft_expr_type nft_socket_type __read_mostly = {
index 1133e06..6cf9a04 100644 (file)
@@ -288,6 +288,7 @@ static const struct nft_expr_ops nft_synproxy_ops = {
        .dump           = nft_synproxy_dump,
        .type           = &nft_synproxy_type,
        .validate       = nft_synproxy_validate,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_synproxy_type __read_mostly = {
index b5b09a9..801f013 100644 (file)
@@ -320,6 +320,7 @@ static const struct nft_expr_ops nft_tproxy_ops = {
        .init           = nft_tproxy_init,
        .destroy        = nft_tproxy_destroy,
        .dump           = nft_tproxy_dump,
+       .reduce         = NFT_REDUCE_READONLY,
 };
 
 static struct nft_expr_type nft_tproxy_type __read_mostly = {
index 3b27926..d0f9b1d 100644 (file)
@@ -17,6 +17,7 @@ struct nft_tunnel {
        enum nft_tunnel_keys    key:8;
        u8                      dreg;
        enum nft_tunnel_mode    mode:8;
+       u8                      len;
 };
 
 static void nft_tunnel_get_eval(const struct nft_expr *expr,
@@ -101,6 +102,7 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
                priv->mode = NFT_TUNNEL_MODE_NONE;
        }
 
+       priv->len = len;
        return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
                                        NULL, NFT_DATA_VALUE, len);
 }
@@ -122,6 +124,31 @@ nla_put_failure:
        return -1;
 }
 
+static bool nft_tunnel_get_reduce(struct nft_regs_track *track,
+                                 const struct nft_expr *expr)
+{
+       const struct nft_tunnel *priv = nft_expr_priv(expr);
+       const struct nft_tunnel *tunnel;
+
+       if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
+               return false;
+       }
+
+       tunnel = nft_expr_priv(track->regs[priv->dreg].selector);
+       if (priv->key != tunnel->key ||
+           priv->dreg != tunnel->dreg ||
+           priv->mode != tunnel->mode) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
+               return false;
+       }
+
+       if (!track->regs[priv->dreg].bitwise)
+               return true;
+
+       return false;
+}
+
 static struct nft_expr_type nft_tunnel_type;
 static const struct nft_expr_ops nft_tunnel_get_ops = {
        .type           = &nft_tunnel_type,
@@ -129,6 +156,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = {
        .eval           = nft_tunnel_get_eval,
        .init           = nft_tunnel_get_init,
        .dump           = nft_tunnel_get_dump,
+       .reduce         = nft_tunnel_get_reduce,
 };
 
 static struct nft_expr_type nft_tunnel_type __read_mostly = {
index cbbbc4e..becb88f 100644 (file)
@@ -27,6 +27,7 @@ struct nft_xfrm {
        u8                      dreg;
        u8                      dir;
        u8                      spnum;
+       u8                      len;
 };
 
 static int nft_xfrm_get_init(const struct nft_ctx *ctx,
@@ -86,6 +87,7 @@ static int nft_xfrm_get_init(const struct nft_ctx *ctx,
 
        priv->spnum = spnum;
 
+       priv->len = len;
        return nft_parse_register_store(ctx, tb[NFTA_XFRM_DREG], &priv->dreg,
                                        NULL, NFT_DATA_VALUE, len);
 }
@@ -252,6 +254,31 @@ static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *e
        return nft_chain_validate_hooks(ctx->chain, hooks);
 }
 
+static bool nft_xfrm_reduce(struct nft_regs_track *track,
+                           const struct nft_expr *expr)
+{
+       const struct nft_xfrm *priv = nft_expr_priv(expr);
+       const struct nft_xfrm *xfrm;
+
+       if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
+               return false;
+       }
+
+       xfrm = nft_expr_priv(track->regs[priv->dreg].selector);
+       if (priv->key != xfrm->key ||
+           priv->dreg != xfrm->dreg ||
+           priv->dir != xfrm->dir ||
+           priv->spnum != xfrm->spnum) {
+               nft_reg_track_update(track, expr, priv->dreg, priv->len);
+               return false;
+       }
+
+       if (!track->regs[priv->dreg].bitwise)
+               return true;
+
+       return nft_expr_reduce_bitwise(track, expr);
+}
 
 static struct nft_expr_type nft_xfrm_type;
 static const struct nft_expr_ops nft_xfrm_get_ops = {
@@ -261,6 +288,7 @@ static const struct nft_expr_ops nft_xfrm_get_ops = {
        .init           = nft_xfrm_get_init,
        .dump           = nft_xfrm_get_dump,
        .validate       = nft_xfrm_validate,
+       .reduce         = nft_xfrm_reduce,
 };
 
 static struct nft_expr_type nft_xfrm_type __read_mostly = {
index beb0e57..54c0830 100644 (file)
@@ -885,6 +885,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
        unsigned char bitmask;
        unsigned char byte;
 
+       if (offset >= bitmap_len)
+               return -1;
        byte_offset = offset / 8;
        byte = bitmap[byte_offset];
        bit_spot = offset;
index 8b41248..5176f6c 100644 (file)
@@ -346,7 +346,7 @@ size_t ovs_key_attr_size(void)
        /* Whenever adding new OVS_KEY_ FIELDS, we should consider
         * updating this function.
         */
-       BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 30);
+       BUILD_BUG_ON(OVS_KEY_ATTR_MAX != 32);
 
        return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
                + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
@@ -482,7 +482,14 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
                        return -EINVAL;
                }
 
-               if (attrs & (1 << type)) {
+               if (type == OVS_KEY_ATTR_PACKET_TYPE ||
+                   type == OVS_KEY_ATTR_ND_EXTENSIONS ||
+                   type == OVS_KEY_ATTR_TUNNEL_INFO) {
+                       OVS_NLERR(log, "Key type %d is not supported", type);
+                       return -EINVAL;
+               }
+
+               if (attrs & (1ULL << type)) {
                        OVS_NLERR(log, "Duplicate key (type %d).", type);
                        return -EINVAL;
                }
@@ -495,7 +502,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
                }
 
                if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
-                       attrs |= 1 << type;
+                       attrs |= 1ULL << type;
                        a[type] = nla;
                }
        }
index 1b93ce1..c39c098 100644 (file)
@@ -2318,8 +2318,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                                        copy_skb = skb_get(skb);
                                        skb_head = skb->data;
                                }
-                               if (copy_skb)
+                               if (copy_skb) {
+                                       memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
+                                              sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
                                        skb_set_owner_r(copy_skb, sk);
+                               }
                        }
                        snaplen = po->rx_ring.frame_size - macoff;
                        if ((int)snaplen < 0) {
@@ -3464,6 +3467,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
+               const size_t max_len = min(sizeof(skb->cb),
+                                          sizeof(struct sockaddr_storage));
                int copy_len;
 
                /* If the address length field is there to be filled
@@ -3486,6 +3491,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                                msg->msg_namelen = sizeof(struct sockaddr_ll);
                        }
                }
+               if (WARN_ON_ONCE(copy_len > max_len)) {
+                       copy_len = max_len;
+                       msg->msg_namelen = copy_len;
+               }
                memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
        }
 
index 65218b7..2b582da 100644 (file)
@@ -146,7 +146,7 @@ EXPORT_SYMBOL(phonet_header_ops);
  * Prepends an ISI header and sends a datagram.
  */
 static int pn_send(struct sk_buff *skb, struct net_device *dev,
-                       u16 dst, u16 src, u8 res, u8 irq)
+                       u16 dst, u16 src, u8 res)
 {
        struct phonethdr *ph;
        int err;
@@ -182,7 +182,7 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev,
        if (skb->pkt_type == PACKET_LOOPBACK) {
                skb_reset_mac_header(skb);
                skb_orphan(skb);
-               err = (irq ? netif_rx(skb) : netif_rx_ni(skb)) ? -ENOBUFS : 0;
+               err = netif_rx(skb) ? -ENOBUFS : 0;
        } else {
                err = dev_hard_header(skb, dev, ntohs(skb->protocol),
                                        NULL, NULL, skb->len);
@@ -214,7 +214,7 @@ static int pn_raw_send(const void *data, int len, struct net_device *dev,
        skb_reserve(skb, MAX_PHONET_HEADER);
        __skb_put(skb, len);
        skb_copy_to_linear_data(skb, data, len);
-       return pn_send(skb, dev, dst, src, res, 1);
+       return pn_send(skb, dev, dst, src, res);
 }
 
 /*
@@ -269,7 +269,7 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
        if (!pn_addr(src))
                src = pn_object(saddr, pn_obj(src));
 
-       err = pn_send(skb, dev, dst, src, res, 0);
+       err = pn_send(skb, dev, dst, src, res);
        dev_put(dev);
        return err;
 
index 5b1927d..dac4fdc 100644 (file)
@@ -78,6 +78,7 @@ struct rfkill_data {
        struct mutex            mtx;
        wait_queue_head_t       read_wait;
        bool                    input_handler;
+       u8                      max_size;
 };
 
 
@@ -1153,6 +1154,8 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
        if (!data)
                return -ENOMEM;
 
+       data->max_size = RFKILL_EVENT_SIZE_V1;
+
        INIT_LIST_HEAD(&data->events);
        mutex_init(&data->mtx);
        init_waitqueue_head(&data->read_wait);
@@ -1235,6 +1238,7 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
                                list);
 
        sz = min_t(unsigned long, sizeof(ev->ev), count);
+       sz = min_t(unsigned long, sz, data->max_size);
        ret = sz;
        if (copy_to_user(buf, &ev->ev, sz))
                ret = -EFAULT;
@@ -1249,6 +1253,7 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
 static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
                                size_t count, loff_t *pos)
 {
+       struct rfkill_data *data = file->private_data;
        struct rfkill *rfkill;
        struct rfkill_event_ext ev;
        int ret;
@@ -1263,6 +1268,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
         * our API version even in a write() call, if it cares.
         */
        count = min(count, sizeof(ev));
+       count = min_t(size_t, count, data->max_size);
        if (copy_from_user(&ev, buf, count))
                return -EFAULT;
 
@@ -1322,31 +1328,47 @@ static int rfkill_fop_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-#ifdef CONFIG_RFKILL_INPUT
 static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
                             unsigned long arg)
 {
        struct rfkill_data *data = file->private_data;
+       int ret = -ENOSYS;
+       u32 size;
 
        if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
                return -ENOSYS;
 
-       if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
-               return -ENOSYS;
-
        mutex_lock(&data->mtx);
-
-       if (!data->input_handler) {
-               if (atomic_inc_return(&rfkill_input_disabled) == 1)
-                       printk(KERN_DEBUG "rfkill: input handler disabled\n");
-               data->input_handler = true;
+       switch (_IOC_NR(cmd)) {
+#ifdef CONFIG_RFKILL_INPUT
+       case RFKILL_IOC_NOINPUT:
+               if (!data->input_handler) {
+                       if (atomic_inc_return(&rfkill_input_disabled) == 1)
+                               printk(KERN_DEBUG "rfkill: input handler disabled\n");
+                       data->input_handler = true;
+               }
+               ret = 0;
+               break;
+#endif
+       case RFKILL_IOC_MAX_SIZE:
+               if (get_user(size, (__u32 __user *)arg)) {
+                       ret = -EFAULT;
+                       break;
+               }
+               if (size < RFKILL_EVENT_SIZE_V1 || size > U8_MAX) {
+                       ret = -EINVAL;
+                       break;
+               }
+               data->max_size = size;
+               ret = 0;
+               break;
+       default:
+               break;
        }
-
        mutex_unlock(&data->mtx);
 
-       return 0;
+       return ret;
 }
-#endif
 
 static const struct file_operations rfkill_fops = {
        .owner          = THIS_MODULE,
@@ -1355,10 +1377,8 @@ static const struct file_operations rfkill_fops = {
        .write          = rfkill_fop_write,
        .poll           = rfkill_fop_poll,
        .release        = rfkill_fop_release,
-#ifdef CONFIG_RFKILL_INPUT
        .unlocked_ioctl = rfkill_fop_ioctl,
        .compat_ioctl   = compat_ptr_ioctl,
-#endif
        .llseek         = no_llseek,
 };
 
index 89e46f6..6a34f7b 100644 (file)
@@ -420,6 +420,19 @@ static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
                break;
        case IPPROTO_UDP:
                break;
+#ifdef CONFIG_NF_CT_PROTO_GRE
+       case IPPROTO_GRE: {
+               struct nf_conntrack_tuple *tuple;
+
+               if (ct->status & IPS_NAT_MASK)
+                       return;
+               tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+               /* No support for GRE v1 */
+               if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
+                       return;
+               break;
+       }
+#endif
        default:
                return;
        }
@@ -439,6 +452,8 @@ tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
        struct flow_ports *ports;
        unsigned int thoff;
        struct iphdr *iph;
+       size_t hdrsize;
+       u8 ipproto;
 
        if (!pskb_network_may_pull(skb, sizeof(*iph)))
                return false;
@@ -450,29 +465,54 @@ tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
            unlikely(thoff != sizeof(struct iphdr)))
                return false;
 
-       if (iph->protocol != IPPROTO_TCP &&
-           iph->protocol != IPPROTO_UDP)
+       ipproto = iph->protocol;
+       switch (ipproto) {
+       case IPPROTO_TCP:
+               hdrsize = sizeof(struct tcphdr);
+               break;
+       case IPPROTO_UDP:
+               hdrsize = sizeof(*ports);
+               break;
+#ifdef CONFIG_NF_CT_PROTO_GRE
+       case IPPROTO_GRE:
+               hdrsize = sizeof(struct gre_base_hdr);
+               break;
+#endif
+       default:
                return false;
+       }
 
        if (iph->ttl <= 1)
                return false;
 
-       if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
-                                       thoff + sizeof(struct tcphdr) :
-                                       thoff + sizeof(*ports)))
+       if (!pskb_network_may_pull(skb, thoff + hdrsize))
                return false;
 
-       iph = ip_hdr(skb);
-       if (iph->protocol == IPPROTO_TCP)
+       switch (ipproto) {
+       case IPPROTO_TCP:
                *tcph = (void *)(skb_network_header(skb) + thoff);
+               fallthrough;
+       case IPPROTO_UDP:
+               ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+               tuple->src_port = ports->source;
+               tuple->dst_port = ports->dest;
+               break;
+       case IPPROTO_GRE: {
+               struct gre_base_hdr *greh;
+
+               greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
+               if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
+                       return false;
+               break;
+       }
+       }
+
+       iph = ip_hdr(skb);
 
-       ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
        tuple->src_v4.s_addr = iph->saddr;
        tuple->dst_v4.s_addr = iph->daddr;
-       tuple->src_port = ports->source;
-       tuple->dst_port = ports->dest;
        tuple->l3proto = AF_INET;
-       tuple->l4proto = iph->protocol;
+       tuple->l4proto = ipproto;
 
        return true;
 }
@@ -485,36 +525,63 @@ tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
        struct flow_ports *ports;
        struct ipv6hdr *ip6h;
        unsigned int thoff;
+       size_t hdrsize;
+       u8 nexthdr;
 
        if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
                return false;
 
        ip6h = ipv6_hdr(skb);
+       thoff = sizeof(*ip6h);
 
-       if (ip6h->nexthdr != IPPROTO_TCP &&
-           ip6h->nexthdr != IPPROTO_UDP)
-               return false;
+       nexthdr = ip6h->nexthdr;
+       switch (nexthdr) {
+       case IPPROTO_TCP:
+               hdrsize = sizeof(struct tcphdr);
+               break;
+       case IPPROTO_UDP:
+               hdrsize = sizeof(*ports);
+               break;
+#ifdef CONFIG_NF_CT_PROTO_GRE
+       case IPPROTO_GRE:
+               hdrsize = sizeof(struct gre_base_hdr);
+               break;
+#endif
+       default:
+               return -1;
+       }
 
        if (ip6h->hop_limit <= 1)
                return false;
 
-       thoff = sizeof(*ip6h);
-       if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
-                                       thoff + sizeof(struct tcphdr) :
-                                       thoff + sizeof(*ports)))
+       if (!pskb_network_may_pull(skb, thoff + hdrsize))
                return false;
 
-       ip6h = ipv6_hdr(skb);
-       if (ip6h->nexthdr == IPPROTO_TCP)
+       switch (nexthdr) {
+       case IPPROTO_TCP:
                *tcph = (void *)(skb_network_header(skb) + thoff);
+               fallthrough;
+       case IPPROTO_UDP:
+               ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+               tuple->src_port = ports->source;
+               tuple->dst_port = ports->dest;
+               break;
+       case IPPROTO_GRE: {
+               struct gre_base_hdr *greh;
+
+               greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
+               if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
+                       return false;
+               break;
+       }
+       }
+
+       ip6h = ipv6_hdr(skb);
 
-       ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
        tuple->src_v6 = ip6h->saddr;
        tuple->dst_v6 = ip6h->daddr;
-       tuple->src_port = ports->source;
-       tuple->dst_port = ports->dest;
        tuple->l3proto = AF_INET6;
-       tuple->l4proto = ip6h->nexthdr;
+       tuple->l4proto = nexthdr;
 
        return true;
 }
index 756e2dc..883454c 100644 (file)
@@ -390,6 +390,13 @@ static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data,
                        entry->vlan.proto = tcf_vlan_push_proto(act);
                        entry->vlan.prio = tcf_vlan_push_prio(act);
                        break;
+               case TCA_VLAN_ACT_POP_ETH:
+                       entry->id = FLOW_ACTION_VLAN_POP_ETH;
+                       break;
+               case TCA_VLAN_ACT_PUSH_ETH:
+                       entry->id = FLOW_ACTION_VLAN_PUSH_ETH;
+                       tcf_vlan_push_eth(entry->vlan_push_eth.src, entry->vlan_push_eth.dst, act);
+                       break;
                default:
                        return -EOPNOTSUPP;
                }
@@ -407,6 +414,12 @@ static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data,
                case TCA_VLAN_ACT_MODIFY:
                        fl_action->id = FLOW_ACTION_VLAN_MANGLE;
                        break;
+               case TCA_VLAN_ACT_POP_ETH:
+                       fl_action->id = FLOW_ACTION_VLAN_POP_ETH;
+                       break;
+               case TCA_VLAN_ACT_PUSH_ETH:
+                       fl_action->id = FLOW_ACTION_VLAN_PUSH_ETH;
+                       break;
                default:
                        return -EOPNOTSUPP;
                }
index 1a9b1f1..c80fc49 100644 (file)
@@ -25,6 +25,7 @@
 #include <net/geneve.h>
 #include <net/vxlan.h>
 #include <net/erspan.h>
+#include <net/gtp.h>
 
 #include <net/dst.h>
 #include <net/dst_metadata.h>
@@ -723,6 +724,7 @@ enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
        [TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
        [TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
        [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
+       [TCA_FLOWER_KEY_ENC_OPTS_GTP]           = { .type = NLA_NESTED },
 };
 
 static const struct nla_policy
@@ -746,6 +748,12 @@ erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
        [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
 };
 
+static const struct nla_policy
+gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
+       [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]      = { .type = NLA_U8 },
+       [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]           = { .type = NLA_U8 },
+};
+
 static const struct nla_policy
 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
        [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
@@ -1262,6 +1270,49 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
        return sizeof(*md);
 }
 
+static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
+                         int depth, int option_len,
+                         struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
+       struct gtp_pdu_session_info *sinfo;
+       u8 len = key->enc_opts.len;
+       int err;
+
+       sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
+       memset(sinfo, 0xff, option_len);
+
+       if (!depth)
+               return sizeof(*sinfo);
+
+       if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
+               NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
+               return -EINVAL;
+       }
+
+       err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
+                              gtp_opt_policy, extack);
+       if (err < 0)
+               return err;
+
+       if (!option_len &&
+           (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
+            !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Missing tunnel key gtp option pdu type or qfi");
+               return -EINVAL;
+       }
+
+       if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
+               sinfo->pdu_type =
+                       nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
+
+       if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
+               sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
+
+       return sizeof(*sinfo);
+}
+
 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                          struct fl_flow_key *mask,
                          struct netlink_ext_ack *extack)
@@ -1386,6 +1437,38 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                                return -EINVAL;
                        }
                        break;
+               case TCA_FLOWER_KEY_ENC_OPTS_GTP:
+                       if (key->enc_opts.dst_opt_type) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Duplicate type for gtp options");
+                               return -EINVAL;
+                       }
+                       option_len = 0;
+                       key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
+                       option_len = fl_set_gtp_opt(nla_opt_key, key,
+                                                   key_depth, option_len,
+                                                   extack);
+                       if (option_len < 0)
+                               return option_len;
+
+                       key->enc_opts.len += option_len;
+                       /* At the same time we need to parse through the mask
+                        * in order to verify exact and mask attribute lengths.
+                        */
+                       mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
+                       option_len = fl_set_gtp_opt(nla_opt_msk, mask,
+                                                   msk_depth, option_len,
+                                                   extack);
+                       if (option_len < 0)
+                               return option_len;
+
+                       mask->enc_opts.len += option_len;
+                       if (key->enc_opts.len != mask->enc_opts.len) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Key and mask miss aligned");
+                               return -EINVAL;
+                       }
+                       break;
                default:
                        NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
                        return -EINVAL;
@@ -2761,6 +2844,34 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
+static int fl_dump_key_gtp_opt(struct sk_buff *skb,
+                              struct flow_dissector_key_enc_opts *enc_opts)
+
+{
+       struct gtp_pdu_session_info *session_info;
+       struct nlattr *nest;
+
+       nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
+       if (!nest)
+               goto nla_put_failure;
+
+       session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
+
+       if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
+                      session_info->pdu_type))
+               goto nla_put_failure;
+
+       if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
+               goto nla_put_failure;
+
+       nla_nest_end(skb, nest);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nest);
+       return -EMSGSIZE;
+}
+
 static int fl_dump_key_ct(struct sk_buff *skb,
                          struct flow_dissector_key_ct *key,
                          struct flow_dissector_key_ct *mask)
@@ -2824,6 +2935,11 @@ static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
                if (err)
                        goto nla_put_failure;
                break;
+       case TUNNEL_GTP_OPT:
+               err = fl_dump_key_gtp_opt(skb, enc_opts);
+               if (err)
+                       goto nla_put_failure;
+               break;
        default:
                goto nla_put_failure;
        }
index 034e2c7..d9c6d8f 100644 (file)
@@ -61,10 +61,6 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
                r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
                r->idiag_retrans = asoc->rtx_data_chunks;
                r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
-       } else {
-               r->idiag_timer = 0;
-               r->idiag_retrans = 0;
-               r->idiag_expires = 0;
        }
 }
 
@@ -144,13 +140,14 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
        r = nlmsg_data(nlh);
        BUG_ON(!sk_fullsock(sk));
 
+       r->idiag_timer = 0;
+       r->idiag_retrans = 0;
+       r->idiag_expires = 0;
        if (asoc) {
                inet_diag_msg_sctpasoc_fill(r, sk, asoc);
        } else {
                inet_diag_msg_common_fill(r, sk);
                r->idiag_state = sk->sk_state;
-               r->idiag_timer = 0;
-               r->idiag_retrans = 0;
        }
 
        if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
index 640af9a..875efcd 100644 (file)
@@ -4,4 +4,5 @@ obj-$(CONFIG_SMC)       += smc.o
 obj-$(CONFIG_SMC_DIAG) += smc_diag.o
 smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
 smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o smc_stats.o
-smc-y += smc_tracepoint.o smc_sysctl.o
+smc-y += smc_tracepoint.o
+smc-$(CONFIG_SYSCTL) += smc_sysctl.o
index e508e4f..f0d118e 100644 (file)
@@ -3179,11 +3179,17 @@ unsigned int smc_net_id;
 
 static __net_init int smc_net_init(struct net *net)
 {
+       int rc;
+
+       rc = smc_sysctl_net_init(net);
+       if (rc)
+               return rc;
        return smc_pnet_net_init(net);
 }
 
 static void __net_exit smc_net_exit(struct net *net)
 {
+       smc_sysctl_net_exit(net);
        smc_pnet_net_exit(net);
 }
 
@@ -3296,17 +3302,9 @@ static int __init smc_init(void)
                goto out_ib;
        }
 
-       rc = smc_sysctl_init();
-       if (rc) {
-               pr_err("%s: sysctl_init fails with %d\n", __func__, rc);
-               goto out_ulp;
-       }
-
        static_branch_enable(&tcp_have_smc);
        return 0;
 
-out_ulp:
-       tcp_unregister_ulp(&smc_ulp_ops);
 out_ib:
        smc_ib_unregister_client();
 out_sock:
@@ -3336,7 +3334,6 @@ out_pernet_subsys:
 static void __exit smc_exit(void)
 {
        static_branch_disable(&tcp_have_smc);
-       smc_sysctl_exit();
        tcp_unregister_ulp(&smc_ulp_ops);
        sock_unregister(PF_SMC);
        smc_core_exit();
index 3b59876..bae1941 100644 (file)
@@ -28,7 +28,7 @@ static struct ctl_table smc_table[] = {
        {  }
 };
 
-static __net_init int smc_sysctl_init_net(struct net *net)
+int __net_init smc_sysctl_net_init(struct net *net)
 {
        struct ctl_table *table;
 
@@ -59,22 +59,7 @@ err_alloc:
        return -ENOMEM;
 }
 
-static __net_exit void smc_sysctl_exit_net(struct net *net)
+void __net_exit smc_sysctl_net_exit(struct net *net)
 {
        unregister_net_sysctl_table(net->smc.smc_hdr);
 }
-
-static struct pernet_operations smc_sysctl_ops __net_initdata = {
-       .init = smc_sysctl_init_net,
-       .exit = smc_sysctl_exit_net,
-};
-
-int __init smc_sysctl_init(void)
-{
-       return register_pernet_subsys(&smc_sysctl_ops);
-}
-
-void smc_sysctl_exit(void)
-{
-       unregister_pernet_subsys(&smc_sysctl_ops);
-}
index 49553ac..0becc11 100644 (file)
 
 #ifdef CONFIG_SYSCTL
 
-int smc_sysctl_init(void);
-void smc_sysctl_exit(void);
+int __net_init smc_sysctl_net_init(struct net *net);
+void __net_exit smc_sysctl_net_exit(struct net *net);
 
 #else
 
-int smc_sysctl_init(void)
+static inline int smc_sysctl_net_init(struct net *net)
 {
+       net->smc.sysctl_autocorking_size = SMC_AUTOCORKING_DEFAULT_SIZE;
        return 0;
 }
 
-void smc_sysctl_exit(void) { }
+static inline void smc_sysctl_net_exit(struct net *net) { }
 
 #endif /* CONFIG_SYSCTL */
 
index 34d6164..24be1d0 100644 (file)
@@ -137,28 +137,25 @@ static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
 {
        struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
        struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
-       int i, rc;
+       int i = 0, rc;
+       int polled = 0;
 
 again:
+       polled++;
        do {
                memset(&wc, 0, sizeof(wc));
                rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
+               if (polled == 1) {
+                       ib_req_notify_cq(dev->roce_cq_send,
+                                        IB_CQ_NEXT_COMP |
+                                        IB_CQ_REPORT_MISSED_EVENTS);
+               }
+               if (!rc)
+                       break;
                for (i = 0; i < rc; i++)
                        smc_wr_tx_process_cqe(&wc[i]);
-               if (rc < SMC_WR_MAX_POLL_CQE)
-                       /* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
-                        * drained, no need to poll again. --Guangguan Wang
-                        */
-                       break;
        } while (rc > 0);
-
-       /* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
-        * 0, it is safe to wait for the next event.
-        * Else we must poll the CQ again to make sure we won't miss any event
-        */
-       if (ib_req_notify_cq(dev->roce_cq_send,
-                            IB_CQ_NEXT_COMP |
-                            IB_CQ_REPORT_MISSED_EVENTS))
+       if (polled == 1)
                goto again;
 }
 
@@ -481,28 +478,24 @@ static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
 {
        struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
        struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
+       int polled = 0;
        int rc;
 
 again:
+       polled++;
        do {
                memset(&wc, 0, sizeof(wc));
                rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
-               if (rc > 0)
-                       smc_wr_rx_process_cqes(&wc[0], rc);
-               if (rc < SMC_WR_MAX_POLL_CQE)
-                       /* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
-                        * drained, no need to poll again. --Guangguan Wang
-                        */
+               if (polled == 1) {
+                       ib_req_notify_cq(dev->roce_cq_recv,
+                                        IB_CQ_SOLICITED_MASK
+                                        | IB_CQ_REPORT_MISSED_EVENTS);
+               }
+               if (!rc)
                        break;
+               smc_wr_rx_process_cqes(&wc[0], rc);
        } while (rc > 0);
-
-       /* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
-        * 0, it is safe to wait for the next event.
-        * Else we must poll the CQ again to make sure we won't miss any event
-        */
-       if (ib_req_notify_cq(dev->roce_cq_recv,
-                            IB_CQ_SOLICITED_MASK |
-                            IB_CQ_REPORT_MISSED_EVENTS))
+       if (polled == 1)
                goto again;
 }
 
index 473a790..6d39ca0 100644 (file)
@@ -352,16 +352,18 @@ static int tipc_enable_bearer(struct net *net, const char *name,
                goto rejected;
        }
 
-       test_and_set_bit_lock(0, &b->up);
-       rcu_assign_pointer(tn->bearer_list[bearer_id], b);
-       if (skb)
-               tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
-
+       /* Create monitoring data before accepting activate messages */
        if (tipc_mon_create(net, bearer_id)) {
                bearer_disable(net, b);
+               kfree_skb(skb);
                return -ENOMEM;
        }
 
+       test_and_set_bit_lock(0, &b->up);
+       rcu_assign_pointer(tn->bearer_list[bearer_id], b);
+       if (skb)
+               tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
+
        pr_info("Enabled bearer <%s>, priority %u\n", name, prio);
 
        return res;
@@ -768,7 +770,7 @@ void tipc_clone_to_loopback(struct net *net, struct sk_buff_head *pkts)
                skb->pkt_type = PACKET_HOST;
                skb->ip_summed = CHECKSUM_UNNECESSARY;
                skb->protocol = eth_type_trans(skb, dev);
-               netif_rx_ni(skb);
+               netif_rx(skb);
        }
 }
 
index 1e14d7f..e260c0d 100644 (file)
@@ -2286,6 +2286,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                break;
 
        case STATE_MSG:
+               /* Validate Gap ACK blocks, drop if invalid */
+               glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
+               if (glen > dlen)
+                       break;
+
                l->rcv_nxt_state = msg_seqno(hdr) + 1;
 
                /* Update own tolerance if peer indicates a non-zero value */
@@ -2311,10 +2316,6 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                        break;
                }
 
-               /* Receive Gap ACK blocks from peer if any */
-               glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
-               if(glen > dlen)
-                       break;
                tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
                             &l->mon_state, l->bearer_id);
 
index b932469..12f7b56 100644 (file)
@@ -1028,20 +1028,21 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
        if (ctx->priv_ctx_tx)
                return -EEXIST;
 
-       start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
-       if (!start_marker_record)
-               return -ENOMEM;
+       netdev = get_netdev_for_sock(sk);
+       if (!netdev) {
+               pr_err_ratelimited("%s: netdev not found\n", __func__);
+               return -EINVAL;
+       }
 
-       offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
-       if (!offload_ctx) {
-               rc = -ENOMEM;
-               goto free_marker_record;
+       if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
+               rc = -EOPNOTSUPP;
+               goto release_netdev;
        }
 
        crypto_info = &ctx->crypto_send.info;
        if (crypto_info->version != TLS_1_2_VERSION) {
                rc = -EOPNOTSUPP;
-               goto free_offload_ctx;
+               goto release_netdev;
        }
 
        switch (crypto_info->cipher_type) {
@@ -1057,13 +1058,13 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
                break;
        default:
                rc = -EINVAL;
-               goto free_offload_ctx;
+               goto release_netdev;
        }
 
        /* Sanity-check the rec_seq_size for stack allocations */
        if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
                rc = -EINVAL;
-               goto free_offload_ctx;
+               goto release_netdev;
        }
 
        prot->version = crypto_info->version;
@@ -1077,7 +1078,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
                             GFP_KERNEL);
        if (!ctx->tx.iv) {
                rc = -ENOMEM;
-               goto free_offload_ctx;
+               goto release_netdev;
        }
 
        memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
@@ -1089,9 +1090,21 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
                goto free_iv;
        }
 
+       start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
+       if (!start_marker_record) {
+               rc = -ENOMEM;
+               goto free_rec_seq;
+       }
+
+       offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
+       if (!offload_ctx) {
+               rc = -ENOMEM;
+               goto free_marker_record;
+       }
+
        rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
        if (rc)
-               goto free_rec_seq;
+               goto free_offload_ctx;
 
        /* start at rec_seq - 1 to account for the start marker record */
        memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
@@ -1118,18 +1131,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
        if (skb)
                TCP_SKB_CB(skb)->eor = 1;
 
-       netdev = get_netdev_for_sock(sk);
-       if (!netdev) {
-               pr_err_ratelimited("%s: netdev not found\n", __func__);
-               rc = -EINVAL;
-               goto disable_cad;
-       }
-
-       if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
-               rc = -EOPNOTSUPP;
-               goto release_netdev;
-       }
-
        /* Avoid offloading if the device is down
         * We don't want to offload new flows after
         * the NETDEV_DOWN event
@@ -1167,20 +1168,19 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
 
 release_lock:
        up_read(&device_offload_lock);
-release_netdev:
-       dev_put(netdev);
-disable_cad:
        clean_acked_data_disable(inet_csk(sk));
        crypto_free_aead(offload_ctx->aead_send);
-free_rec_seq:
-       kfree(ctx->tx.rec_seq);
-free_iv:
-       kfree(ctx->tx.iv);
 free_offload_ctx:
        kfree(offload_ctx);
        ctx->priv_ctx_tx = NULL;
 free_marker_record:
        kfree(start_marker_record);
+free_rec_seq:
+       kfree(ctx->tx.rec_seq);
+free_iv:
+       kfree(ctx->tx.iv);
+release_netdev:
+       dev_put(netdev);
        return rc;
 }
 
index 6bc2879..7b2b0e7 100644 (file)
@@ -553,10 +553,8 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
        int rc = 0;
        int conf;
 
-       if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info))) {
-               rc = -EINVAL;
-               goto out;
-       }
+       if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info)))
+               return -EINVAL;
 
        if (tx) {
                crypto_info = &ctx->crypto_send.info;
@@ -567,10 +565,8 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
        }
 
        /* Currently we don't support set crypto info more than one time */
-       if (TLS_CRYPTO_INFO_READY(crypto_info)) {
-               rc = -EBUSY;
-               goto out;
-       }
+       if (TLS_CRYPTO_INFO_READY(crypto_info))
+               return -EBUSY;
 
        rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info));
        if (rc) {
@@ -672,11 +668,10 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
                ctx->sk_write_space = sk->sk_write_space;
                sk->sk_write_space = tls_write_space;
        }
-       goto out;
+       return 0;
 
 err_crypto_info:
        memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
-out:
        return rc;
 }
 
index 3e0d628..4247c41 100644 (file)
@@ -2049,7 +2049,7 @@ out:
  */
 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
 
-#if (IS_ENABLED(CONFIG_AF_UNIX_OOB))
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other)
 {
        struct unix_sock *ousk = unix_sk(other);
@@ -2115,7 +2115,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
 
        err = -EOPNOTSUPP;
        if (msg->msg_flags & MSG_OOB) {
-#if (IS_ENABLED(CONFIG_AF_UNIX_OOB))
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
                if (len)
                        len--;
                else
@@ -2186,7 +2186,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
                sent += size;
        }
 
-#if (IS_ENABLED(CONFIG_AF_UNIX_OOB))
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
        if (msg->msg_flags & MSG_OOB) {
                err = queue_oob(sock, msg, other);
                if (err)
index 38baeb1..f04abf6 100644 (file)
@@ -334,7 +334,8 @@ void vsock_remove_sock(struct vsock_sock *vsk)
 }
 EXPORT_SYMBOL_GPL(vsock_remove_sock);
 
-void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
+void vsock_for_each_connected_socket(struct vsock_transport *transport,
+                                    void (*fn)(struct sock *sk))
 {
        int i;
 
@@ -343,8 +344,12 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
        for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
                struct vsock_sock *vsk;
                list_for_each_entry(vsk, &vsock_connected_table[i],
-                                   connected_table)
+                                   connected_table) {
+                       if (vsk->transport != transport)
+                               continue;
+
                        fn(sk_vsock(vsk));
+               }
        }
 
        spin_unlock_bh(&vsock_table_lock);
index fb3302f..5afc194 100644 (file)
@@ -24,6 +24,7 @@
 static struct workqueue_struct *virtio_vsock_workqueue;
 static struct virtio_vsock __rcu *the_virtio_vsock;
 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
+static struct virtio_transport virtio_transport; /* forward declaration */
 
 struct virtio_vsock {
        struct virtio_device *vdev;
@@ -384,7 +385,8 @@ static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
        switch (le32_to_cpu(event->id)) {
        case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
                virtio_vsock_update_guest_cid(vsock);
-               vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+               vsock_for_each_connected_socket(&virtio_transport.transport,
+                                               virtio_vsock_reset_sock);
                break;
        }
 }
@@ -662,7 +664,8 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
        synchronize_rcu();
 
        /* Reset all connected sockets when the device disappear */
-       vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+       vsock_for_each_connected_socket(&virtio_transport.transport,
+                                       virtio_vsock_reset_sock);
 
        /* Stop all work handlers to make sure no one is accessing the device,
         * so we can safely call virtio_reset_device().
index 7aef34e..b17dc97 100644 (file)
@@ -75,6 +75,8 @@ static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
 
 static int PROTOCOL_OVERRIDE = -1;
 
+static struct vsock_transport vmci_transport; /* forward declaration */
+
 /* Helper function to convert from a VMCI error code to a VSock error code. */
 
 static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
@@ -882,7 +884,8 @@ static void vmci_transport_qp_resumed_cb(u32 sub_id,
                                         const struct vmci_event_data *e_data,
                                         void *client_data)
 {
-       vsock_for_each_connected_socket(vmci_transport_handle_detach);
+       vsock_for_each_connected_socket(&vmci_transport,
+                                       vmci_transport_handle_detach);
 }
 
 static void vmci_transport_recv_pkt_work(struct work_struct *work)
index eb82205..8b7fb4a 100644 (file)
@@ -181,6 +181,9 @@ static int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width)
        case NL80211_CHAN_WIDTH_160:
                mhz = 160;
                break;
+       case NL80211_CHAN_WIDTH_320:
+               mhz = 320;
+               break;
        default:
                WARN_ON_ONCE(1);
                return -1;
@@ -271,6 +274,17 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
        case NL80211_CHAN_WIDTH_16:
                /* all checked above */
                break;
+       case NL80211_CHAN_WIDTH_320:
+               if (chandef->center_freq1 == control_freq + 150 ||
+                   chandef->center_freq1 == control_freq + 130 ||
+                   chandef->center_freq1 == control_freq + 110 ||
+                   chandef->center_freq1 == control_freq + 90 ||
+                   chandef->center_freq1 == control_freq - 90 ||
+                   chandef->center_freq1 == control_freq - 110 ||
+                   chandef->center_freq1 == control_freq - 130 ||
+                   chandef->center_freq1 == control_freq - 150)
+                       break;
+               fallthrough;
        case NL80211_CHAN_WIDTH_160:
                if (chandef->center_freq1 == control_freq + 70 ||
                    chandef->center_freq1 == control_freq + 50 ||
@@ -307,7 +321,7 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
 EXPORT_SYMBOL(cfg80211_chandef_valid);
 
 static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
-                                 u32 *pri40, u32 *pri80)
+                                 u32 *pri40, u32 *pri80, u32 *pri160)
 {
        int tmp;
 
@@ -315,9 +329,11 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
        case NL80211_CHAN_WIDTH_40:
                *pri40 = c->center_freq1;
                *pri80 = 0;
+               *pri160 = 0;
                break;
        case NL80211_CHAN_WIDTH_80:
        case NL80211_CHAN_WIDTH_80P80:
+               *pri160 = 0;
                *pri80 = c->center_freq1;
                /* n_P20 */
                tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
@@ -327,6 +343,7 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
                *pri40 = c->center_freq1 - 20 + 40 * tmp;
                break;
        case NL80211_CHAN_WIDTH_160:
+               *pri160 = c->center_freq1;
                /* n_P20 */
                tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
                /* n_P40 */
@@ -337,6 +354,20 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
                tmp /= 2;
                *pri80 = c->center_freq1 - 40 + 80 * tmp;
                break;
+       case NL80211_CHAN_WIDTH_320:
+               /* n_P20 */
+               tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
+               /* n_P40 */
+               tmp /= 2;
+               /* freq_P40 */
+               *pri40 = c->center_freq1 - 140 + 40 * tmp;
+               /* n_P80 */
+               tmp /= 2;
+               *pri80 = c->center_freq1 - 120 + 80 * tmp;
+               /* n_P160 */
+               tmp /= 2;
+               *pri160 = c->center_freq1 - 80 + 160 * tmp;
+               break;
        default:
                WARN_ON_ONCE(1);
        }
@@ -346,7 +377,7 @@ const struct cfg80211_chan_def *
 cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
                            const struct cfg80211_chan_def *c2)
 {
-       u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80;
+       u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80, c1_pri160, c2_pri160;
 
        /* If they are identical, return */
        if (cfg80211_chandef_identical(c1, c2))
@@ -381,14 +412,31 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
            c2->width == NL80211_CHAN_WIDTH_20)
                return c1;
 
-       chandef_primary_freqs(c1, &c1_pri40, &c1_pri80);
-       chandef_primary_freqs(c2, &c2_pri40, &c2_pri80);
+       chandef_primary_freqs(c1, &c1_pri40, &c1_pri80, &c1_pri160);
+       chandef_primary_freqs(c2, &c2_pri40, &c2_pri80, &c2_pri160);
 
        if (c1_pri40 != c2_pri40)
                return NULL;
 
-       WARN_ON(!c1_pri80 && !c2_pri80);
-       if (c1_pri80 && c2_pri80 && c1_pri80 != c2_pri80)
+       if (c1->width == NL80211_CHAN_WIDTH_40)
+               return c2;
+
+       if (c2->width == NL80211_CHAN_WIDTH_40)
+               return c1;
+
+       if (c1_pri80 != c2_pri80)
+               return NULL;
+
+       if (c1->width == NL80211_CHAN_WIDTH_80 &&
+           c2->width > NL80211_CHAN_WIDTH_80)
+               return c2;
+
+       if (c2->width == NL80211_CHAN_WIDTH_80 &&
+           c1->width > NL80211_CHAN_WIDTH_80)
+               return c1;
+
+       WARN_ON(!c1_pri160 && !c2_pri160);
+       if (c1_pri160 && c2_pri160 && c1_pri160 != c2_pri160)
                return NULL;
 
        if (c1->width > c2->width)
@@ -960,7 +1008,10 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
        struct ieee80211_sta_vht_cap *vht_cap;
        struct ieee80211_edmg *edmg_cap;
        u32 width, control_freq, cap;
-       bool ext_nss_cap, support_80_80 = false;
+       bool ext_nss_cap, support_80_80 = false, support_320 = false;
+       const struct ieee80211_sband_iftype_data *iftd;
+       struct ieee80211_supported_band *sband;
+       int i;
 
        if (WARN_ON(!cfg80211_chandef_valid(chandef)))
                return false;
@@ -1062,6 +1113,32 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
                      (vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)))
                        return false;
                break;
+       case NL80211_CHAN_WIDTH_320:
+               prohibited_flags |= IEEE80211_CHAN_NO_320MHZ;
+               width = 320;
+
+               if (chandef->chan->band != NL80211_BAND_6GHZ)
+                       return false;
+
+               sband = wiphy->bands[NL80211_BAND_6GHZ];
+               if (!sband)
+                       return false;
+
+               for (i = 0; i < sband->n_iftype_data; i++) {
+                       iftd = &sband->iftype_data[i];
+                       if (!iftd->eht_cap.has_eht)
+                               continue;
+
+                       if (iftd->eht_cap.eht_cap_elem.phy_cap_info[0] &
+                           IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) {
+                               support_320 = true;
+                               break;
+                       }
+               }
+
+               if (!support_320)
+                       return false;
+               break;
        default:
                WARN_ON_ONCE(1);
                return false;
index c01fbcc..ee1c2b6 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  */
 
 #include <linux/if.h>
@@ -285,6 +285,15 @@ static int validate_ie_attr(const struct nlattr *attr,
        return -EINVAL;
 }
 
+static int validate_he_capa(const struct nlattr *attr,
+                           struct netlink_ext_ack *extack)
+{
+       if (!ieee80211_he_capa_size_ok(nla_data(attr), nla_len(attr)))
+               return -EINVAL;
+
+       return 0;
+}
+
 /* policy for the attributes */
 static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR];
 
@@ -730,9 +739,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
        [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
        [NL80211_ATTR_HE_CAPABILITY] =
-               NLA_POLICY_RANGE(NLA_BINARY,
-                                NL80211_HE_MIN_CAPABILITY_LEN,
-                                NL80211_HE_MAX_CAPABILITY_LEN),
+               NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_he_capa,
+                                      NL80211_HE_MAX_CAPABILITY_LEN),
        [NL80211_ATTR_FTM_RESPONDER] =
                NLA_POLICY_NESTED(nl80211_ftm_responder_policy),
        [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1),
@@ -778,6 +786,10 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_MBSSID_ELEMS] = { .type = NLA_NESTED },
        [NL80211_ATTR_RADAR_BACKGROUND] = { .type = NLA_FLAG },
        [NL80211_ATTR_AP_SETTINGS_FLAGS] = { .type = NLA_U32 },
+       [NL80211_ATTR_EHT_CAPABILITY] =
+               NLA_POLICY_RANGE(NLA_BINARY,
+                                NL80211_EHT_MIN_CAPABILITY_LEN,
+                                NL80211_EHT_MAX_CAPABILITY_LEN),
 };
 
 /* policy for the key attributes */
@@ -1148,6 +1160,12 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
                if ((chan->flags & IEEE80211_CHAN_16MHZ) &&
                    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_16MHZ))
                        goto nla_put_failure;
+               if ((chan->flags & IEEE80211_CHAN_NO_320MHZ) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_320MHZ))
+                       goto nla_put_failure;
+               if ((chan->flags & IEEE80211_CHAN_NO_EHT) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_EHT))
+                       goto nla_put_failure;
        }
 
        if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -1729,6 +1747,7 @@ nl80211_send_iftype_data(struct sk_buff *msg,
                         const struct ieee80211_sband_iftype_data *iftdata)
 {
        const struct ieee80211_sta_he_cap *he_cap = &iftdata->he_cap;
+       const struct ieee80211_sta_eht_cap *eht_cap = &iftdata->eht_cap;
 
        if (nl80211_put_iftypes(msg, NL80211_BAND_IFTYPE_ATTR_IFTYPES,
                                iftdata->types_mask))
@@ -1749,6 +1768,32 @@ nl80211_send_iftype_data(struct sk_buff *msg,
                        return -ENOBUFS;
        }
 
+       if (eht_cap->has_eht && he_cap->has_he) {
+               u8 mcs_nss_size, ppe_thresh_size;
+               u16 ppe_thres_hdr;
+
+               mcs_nss_size =
+                       ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+                                                  &eht_cap->eht_cap_elem);
+
+               ppe_thres_hdr = get_unaligned_le16(&eht_cap->eht_ppe_thres[0]);
+               ppe_thresh_size =
+                       ieee80211_eht_ppe_size(ppe_thres_hdr,
+                                              eht_cap->eht_cap_elem.phy_cap_info);
+
+               if (nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC,
+                           sizeof(eht_cap->eht_cap_elem.mac_cap_info),
+                           eht_cap->eht_cap_elem.mac_cap_info) ||
+                   nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY,
+                           sizeof(eht_cap->eht_cap_elem.phy_cap_info),
+                           eht_cap->eht_cap_elem.phy_cap_info) ||
+                   nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET,
+                           mcs_nss_size, &eht_cap->eht_mcs_nss_supp) ||
+                   nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE,
+                           ppe_thresh_size, eht_cap->eht_ppe_thres))
+                       return -ENOBUFS;
+       }
+
        if (sband->band == NL80211_BAND_6GHZ &&
            nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA,
                    sizeof(iftdata->he_6ghz_capa),
@@ -5919,6 +5964,14 @@ bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr)
        case RATE_INFO_BW_HE_RU:
                rate_flg = 0;
                WARN_ON(!(info->flags & RATE_INFO_FLAGS_HE_MCS));
+               break;
+       case RATE_INFO_BW_320:
+               rate_flg = NL80211_RATE_INFO_320_MHZ_WIDTH;
+               break;
+       case RATE_INFO_BW_EHT_RU:
+               rate_flg = 0;
+               WARN_ON(!(info->flags & RATE_INFO_FLAGS_EHT_MCS));
+               break;
        }
 
        if (rate_flg && nla_put_flag(msg, rate_flg))
@@ -5951,6 +6004,17 @@ bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr)
                    nla_put_u8(msg, NL80211_RATE_INFO_HE_RU_ALLOC,
                               info->he_ru_alloc))
                        return false;
+       } else if (info->flags & RATE_INFO_FLAGS_EHT_MCS) {
+               if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_MCS, info->mcs))
+                       return false;
+               if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_NSS, info->nss))
+                       return false;
+               if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_GI, info->eht_gi))
+                       return false;
+               if (info->bw == RATE_INFO_BW_EHT_RU &&
+                   nla_put_u8(msg, NL80211_RATE_INFO_EHT_RU_ALLOC,
+                              info->eht_ru_alloc))
+                       return false;
        }
 
        nla_nest_end(msg, rate);
@@ -6365,7 +6429,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
                if (params->supported_rates)
                        return -EINVAL;
                if (params->ext_capab || params->ht_capa || params->vht_capa ||
-                   params->he_capa)
+                   params->he_capa || params->eht_capa)
                        return -EINVAL;
        }
 
@@ -6568,6 +6632,18 @@ static int nl80211_set_station_tdls(struct genl_info *info,
                        nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
                params->he_capa_len =
                        nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+
+               if (info->attrs[NL80211_ATTR_EHT_CAPABILITY]) {
+                       params->eht_capa =
+                               nla_data(info->attrs[NL80211_ATTR_EHT_CAPABILITY]);
+                       params->eht_capa_len =
+                               nla_len(info->attrs[NL80211_ATTR_EHT_CAPABILITY]);
+
+                       if (!ieee80211_eht_capa_size_ok((const u8 *)params->he_capa,
+                                                       (const u8 *)params->eht_capa,
+                                                       params->eht_capa_len))
+                               return -EINVAL;
+               }
        }
 
        err = nl80211_parse_sta_channel_info(info, params);
@@ -6825,6 +6901,18 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
                        nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
                params.he_capa_len =
                        nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+
+               if (info->attrs[NL80211_ATTR_EHT_CAPABILITY]) {
+                       params.eht_capa =
+                               nla_data(info->attrs[NL80211_ATTR_EHT_CAPABILITY]);
+                       params.eht_capa_len =
+                               nla_len(info->attrs[NL80211_ATTR_EHT_CAPABILITY]);
+
+                       if (!ieee80211_eht_capa_size_ok((const u8 *)params.he_capa,
+                                                       (const u8 *)params.eht_capa,
+                                                       params.eht_capa_len))
+                               return -EINVAL;
+               }
        }
 
        if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY])
@@ -6874,8 +6962,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
                params.ht_capa = NULL;
                params.vht_capa = NULL;
 
-               /* HE requires WME */
-               if (params.he_capa_len || params.he_6ghz_capa)
+               /* HE and EHT require WME */
+               if (params.he_capa_len || params.he_6ghz_capa ||
+                   params.eht_capa_len)
                        return -EINVAL;
        }
 
@@ -7948,6 +8037,7 @@ static int nl80211_get_reg_do(struct sk_buff *skb, struct genl_info *info)
        struct cfg80211_registered_device *rdev;
        struct wiphy *wiphy = NULL;
        struct sk_buff *msg;
+       int err = -EMSGSIZE;
        void *hdr;
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -7966,34 +8056,35 @@ static int nl80211_get_reg_do(struct sk_buff *skb, struct genl_info *info)
 
                rdev = cfg80211_get_dev_from_info(genl_info_net(info), info);
                if (IS_ERR(rdev)) {
-                       nlmsg_free(msg);
-                       rtnl_unlock();
-                       return PTR_ERR(rdev);
+                       err = PTR_ERR(rdev);
+                       goto nla_put_failure;
                }
 
                wiphy = &rdev->wiphy;
                self_managed = wiphy->regulatory_flags &
                               REGULATORY_WIPHY_SELF_MANAGED;
+
+               rcu_read_lock();
+
                regdom = get_wiphy_regdom(wiphy);
 
                /* a self-managed-reg device must have a private regdom */
                if (WARN_ON(!regdom && self_managed)) {
-                       nlmsg_free(msg);
-                       rtnl_unlock();
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto nla_put_failure_rcu;
                }
 
                if (regdom &&
                    nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)))
-                       goto nla_put_failure;
+                       goto nla_put_failure_rcu;
+       } else {
+               rcu_read_lock();
        }
 
        if (!wiphy && reg_last_request_cell_base() &&
            nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE,
                        NL80211_USER_REG_HINT_CELL_BASE))
-               goto nla_put_failure;
-
-       rcu_read_lock();
+               goto nla_put_failure_rcu;
 
        if (!regdom)
                regdom = rcu_dereference(cfg80211_regdomain);
@@ -8013,7 +8104,7 @@ nla_put_failure:
        rtnl_unlock();
 put_failure:
        nlmsg_free(msg);
-       return -EMSGSIZE;
+       return err;
 }
 
 static int nl80211_send_regdom(struct sk_buff *msg, struct netlink_callback *cb,
@@ -8059,19 +8150,19 @@ static int nl80211_get_reg_dump(struct sk_buff *skb,
        struct cfg80211_registered_device *rdev;
        int err, reg_idx, start = cb->args[2];
 
-       rtnl_lock();
+       rcu_read_lock();
 
        if (cfg80211_regdomain && start == 0) {
                err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq,
                                          NLM_F_MULTI, NULL,
-                                         rtnl_dereference(cfg80211_regdomain));
+                                         rcu_dereference(cfg80211_regdomain));
                if (err < 0)
                        goto out_err;
        }
 
        /* the global regdom is idx 0 */
        reg_idx = 1;
-       list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+       list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
                regdom = get_wiphy_regdom(&rdev->wiphy);
                if (!regdom)
                        continue;
@@ -8090,7 +8181,7 @@ static int nl80211_get_reg_dump(struct sk_buff *skb,
        cb->args[2] = reg_idx;
        err = skb->len;
 out_err:
-       rtnl_unlock();
+       rcu_read_unlock();
        return err;
 }
 
@@ -10552,6 +10643,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
                                             NL80211_EXT_FEATURE_VHT_IBSS))
                        return -EINVAL;
                break;
+       case NL80211_CHAN_WIDTH_320:
+               return -EINVAL;
        default:
                return -EINVAL;
        }
index ec25924..c76cd97 100644 (file)
@@ -1238,6 +1238,8 @@ unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
 {
        unsigned int bw = reg_get_max_bandwidth_from_range(rd, rule);
 
+       if (rule->flags & NL80211_RRF_NO_320MHZ)
+               bw = min_t(unsigned int, bw, MHZ_TO_KHZ(160));
        if (rule->flags & NL80211_RRF_NO_160MHZ)
                bw = min_t(unsigned int, bw, MHZ_TO_KHZ(80));
        if (rule->flags & NL80211_RRF_NO_80MHZ)
@@ -1611,6 +1613,8 @@ static u32 map_regdom_flags(u32 rd_flags)
                channel_flags |= IEEE80211_CHAN_NO_160MHZ;
        if (rd_flags & NL80211_RRF_NO_HE)
                channel_flags |= IEEE80211_CHAN_NO_HE;
+       if (rd_flags & NL80211_RRF_NO_320MHZ)
+               channel_flags |= IEEE80211_CHAN_NO_320MHZ;
        return channel_flags;
 }
 
@@ -1773,6 +1777,8 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd
                        bw_flags |= IEEE80211_CHAN_NO_80MHZ;
                if (max_bandwidth_khz < MHZ_TO_KHZ(160))
                        bw_flags |= IEEE80211_CHAN_NO_160MHZ;
+               if (max_bandwidth_khz < MHZ_TO_KHZ(320))
+                       bw_flags |= IEEE80211_CHAN_NO_320MHZ;
        }
        return bw_flags;
 }
index e02f170..a60d7d6 100644 (file)
@@ -1430,6 +1430,135 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
        return result / 10000;
 }
 
+static u32 cfg80211_calculate_bitrate_eht(struct rate_info *rate)
+{
+#define SCALE 6144
+       static const u32 mcs_divisors[16] = {
+               102399, /* 16.666666... */
+                51201, /*  8.333333... */
+                34134, /*  5.555555... */
+                25599, /*  4.166666... */
+                17067, /*  2.777777... */
+                12801, /*  2.083333... */
+                11769, /*  1.851851... */
+                10239, /*  1.666666... */
+                 8532, /*  1.388888... */
+                 7680, /*  1.250000... */
+                 6828, /*  1.111111... */
+                 6144, /*  1.000000... */
+                 5690, /*  0.926106... */
+                 5120, /*  0.833333... */
+               409600, /* 66.666666... */
+               204800, /* 33.333333... */
+       };
+       static const u32 rates_996[3] =  { 480388888, 453700000, 408333333 };
+       static const u32 rates_484[3] =  { 229411111, 216666666, 195000000 };
+       static const u32 rates_242[3] =  { 114711111, 108333333,  97500000 };
+       static const u32 rates_106[3] =  {  40000000,  37777777,  34000000 };
+       static const u32 rates_52[3]  =  {  18820000,  17777777,  16000000 };
+       static const u32 rates_26[3]  =  {   9411111,   8888888,   8000000 };
+       u64 tmp;
+       u32 result;
+
+       if (WARN_ON_ONCE(rate->mcs > 15))
+               return 0;
+       if (WARN_ON_ONCE(rate->eht_gi > NL80211_RATE_INFO_EHT_GI_3_2))
+               return 0;
+       if (WARN_ON_ONCE(rate->eht_ru_alloc >
+                        NL80211_RATE_INFO_EHT_RU_ALLOC_4x996))
+               return 0;
+       if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8))
+               return 0;
+
+       /* Bandwidth checks for MCS 14 */
+       if (rate->mcs == 14) {
+               if ((rate->bw != RATE_INFO_BW_EHT_RU &&
+                    rate->bw != RATE_INFO_BW_80 &&
+                    rate->bw != RATE_INFO_BW_160 &&
+                    rate->bw != RATE_INFO_BW_320) ||
+                   (rate->bw == RATE_INFO_BW_EHT_RU &&
+                    rate->eht_ru_alloc != NL80211_RATE_INFO_EHT_RU_ALLOC_996 &&
+                    rate->eht_ru_alloc != NL80211_RATE_INFO_EHT_RU_ALLOC_2x996 &&
+                    rate->eht_ru_alloc != NL80211_RATE_INFO_EHT_RU_ALLOC_4x996)) {
+                       WARN(1, "invalid EHT BW for MCS 14: bw:%d, ru:%d\n",
+                            rate->bw, rate->eht_ru_alloc);
+                       return 0;
+               }
+       }
+
+       if (rate->bw == RATE_INFO_BW_320 ||
+           (rate->bw == RATE_INFO_BW_EHT_RU &&
+            rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_4x996))
+               result = 4 * rates_996[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484)
+               result = 3 * rates_996[rate->eht_gi] + rates_484[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_3x996)
+               result = 3 * rates_996[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484)
+               result = 2 * rates_996[rate->eht_gi] + rates_484[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_160 ||
+                (rate->bw == RATE_INFO_BW_EHT_RU &&
+                 rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_2x996))
+               result = 2 * rates_996[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc ==
+                NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242)
+               result = rates_996[rate->eht_gi] + rates_484[rate->eht_gi]
+                        + rates_242[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_996P484)
+               result = rates_996[rate->eht_gi] + rates_484[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_80 ||
+                (rate->bw == RATE_INFO_BW_EHT_RU &&
+                 rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_996))
+               result = rates_996[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_484P242)
+               result = rates_484[rate->eht_gi] + rates_242[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_40 ||
+                (rate->bw == RATE_INFO_BW_EHT_RU &&
+                 rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_484))
+               result = rates_484[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_20 ||
+                (rate->bw == RATE_INFO_BW_EHT_RU &&
+                 rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_242))
+               result = rates_242[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_106P26)
+               result = rates_106[rate->eht_gi] + rates_26[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_106)
+               result = rates_106[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_52P26)
+               result = rates_52[rate->eht_gi] + rates_26[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_52)
+               result = rates_52[rate->eht_gi];
+       else if (rate->bw == RATE_INFO_BW_EHT_RU &&
+                rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_26)
+               result = rates_26[rate->eht_gi];
+       else {
+               WARN(1, "invalid EHT MCS: bw:%d, ru:%d\n",
+                    rate->bw, rate->eht_ru_alloc);
+               return 0;
+       }
+
+       /* now scale to the appropriate MCS */
+       tmp = result;
+       tmp *= SCALE;
+       do_div(tmp, mcs_divisors[rate->mcs]);
+       result = tmp;
+
+       /* and take NSS */
+       result = (result * rate->nss) / 8;
+
+       return result / 10000;
+}
+
 u32 cfg80211_calculate_bitrate(struct rate_info *rate)
 {
        if (rate->flags & RATE_INFO_FLAGS_MCS)
@@ -1444,6 +1573,8 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate)
                return cfg80211_calculate_bitrate_vht(rate);
        if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
                return cfg80211_calculate_bitrate_he(rate);
+       if (rate->flags & RATE_INFO_FLAGS_EHT_MCS)
+               return cfg80211_calculate_bitrate_eht(rate);
 
        return rate->legacy;
 }
@@ -2153,7 +2284,7 @@ void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr)
        skb->dev = dev;
        skb->protocol = eth_type_trans(skb, dev);
        memset(skb->cb, 0, sizeof(skb->cb));
-       netif_rx_ni(skb);
+       netif_rx(skb);
 }
 EXPORT_SYMBOL(cfg80211_send_layer2_update);
 
index 39bce5d..36aa01d 100644 (file)
@@ -143,7 +143,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                segs = skb_gso_segment(skb, esp_features);
                if (IS_ERR(segs)) {
                        kfree_skb(skb);
-                       atomic_long_inc(&dev->tx_dropped);
+                       dev_core_stats_tx_dropped_inc(dev);
                        return NULL;
                } else {
                        consume_skb(skb);
@@ -384,16 +384,6 @@ static int xfrm_api_check(struct net_device *dev)
        return NOTIFY_DONE;
 }
 
-static int xfrm_dev_register(struct net_device *dev)
-{
-       return xfrm_api_check(dev);
-}
-
-static int xfrm_dev_feat_change(struct net_device *dev)
-{
-       return xfrm_api_check(dev);
-}
-
 static int xfrm_dev_down(struct net_device *dev)
 {
        if (dev->features & NETIF_F_HW_ESP)
@@ -408,10 +398,10 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
 
        switch (event) {
        case NETDEV_REGISTER:
-               return xfrm_dev_register(dev);
+               return xfrm_api_check(dev);
 
        case NETDEV_FEAT_CHANGE:
-               return xfrm_dev_feat_change(dev);
+               return xfrm_api_check(dev);
 
        case NETDEV_DOWN:
        case NETDEV_UNREGISTER:
index daec1fb..5113fa0 100644 (file)
@@ -304,7 +304,10 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                        if (mtu < IPV6_MIN_MTU)
                                mtu = IPV6_MIN_MTU;
 
-                       icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+                       if (skb->len > 1280)
+                               icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+                       else
+                               goto xmit;
                } else {
                        if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
                                goto xmit;
index 8825261..19aa994 100644 (file)
@@ -3158,7 +3158,7 @@ ok:
 
 nopol:
        if (!(dst_orig->dev->flags & IFF_LOOPBACK) &&
-           !xfrm_default_allow(net, dir)) {
+           net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
                err = -EPERM;
                goto error;
        }
@@ -3569,7 +3569,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
        }
 
        if (!pol) {
-               if (!xfrm_default_allow(net, dir)) {
+               if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
                        return 0;
                }
@@ -3629,7 +3629,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                }
                xfrm_nr = ti;
 
-               if (!xfrm_default_allow(net, dir) && !xfrm_nr) {
+               if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK &&
+                   !xfrm_nr) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
                        goto reject;
                }
@@ -4118,6 +4119,9 @@ static int __net_init xfrm_net_init(struct net *net)
        spin_lock_init(&net->xfrm.xfrm_policy_lock);
        seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
        mutex_init(&net->xfrm.xfrm_cfg_mutex);
+       net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
+       net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
+       net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
 
        rv = xfrm_statistics_init(net);
        if (rv < 0)
index a4fb596..64fa8fd 100644 (file)
@@ -630,13 +630,8 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
 
        xfrm_smark_init(attrs, &x->props.smark);
 
-       if (attrs[XFRMA_IF_ID]) {
+       if (attrs[XFRMA_IF_ID])
                x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
-               if (!x->if_id) {
-                       err = -EINVAL;
-                       goto error;
-               }
-       }
 
        err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
        if (err)
@@ -1432,13 +1427,8 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        mark = xfrm_mark_get(attrs, &m);
 
-       if (attrs[XFRMA_IF_ID]) {
+       if (attrs[XFRMA_IF_ID])
                if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
-               if (!if_id) {
-                       err = -EINVAL;
-                       goto out_noput;
-               }
-       }
 
        if (p->info.seq) {
                x = xfrm_find_acq_byseq(net, mark, p->info.seq);
@@ -1751,13 +1741,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
 
        xfrm_mark_get(attrs, &xp->mark);
 
-       if (attrs[XFRMA_IF_ID]) {
+       if (attrs[XFRMA_IF_ID])
                xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
-               if (!xp->if_id) {
-                       err = -EINVAL;
-                       goto error;
-               }
-       }
 
        return xp;
  error:
@@ -2009,12 +1994,9 @@ static int xfrm_notify_userpolicy(struct net *net)
        }
 
        up = nlmsg_data(nlh);
-       up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
-                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
-       up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
-                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
-       up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
-                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+       up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
+       up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
+       up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
 
        nlmsg_end(skb, nlh);
 
@@ -2025,26 +2007,26 @@ static int xfrm_notify_userpolicy(struct net *net)
        return err;
 }
 
+static bool xfrm_userpolicy_is_valid(__u8 policy)
+{
+       return policy == XFRM_USERPOLICY_BLOCK ||
+              policy == XFRM_USERPOLICY_ACCEPT;
+}
+
 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
                            struct nlattr **attrs)
 {
        struct net *net = sock_net(skb->sk);
        struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
 
-       if (up->in == XFRM_USERPOLICY_BLOCK)
-               net->xfrm.policy_default |= XFRM_POL_DEFAULT_IN;
-       else if (up->in == XFRM_USERPOLICY_ACCEPT)
-               net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_IN;
+       if (xfrm_userpolicy_is_valid(up->in))
+               net->xfrm.policy_default[XFRM_POLICY_IN] = up->in;
 
-       if (up->fwd == XFRM_USERPOLICY_BLOCK)
-               net->xfrm.policy_default |= XFRM_POL_DEFAULT_FWD;
-       else if (up->fwd == XFRM_USERPOLICY_ACCEPT)
-               net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_FWD;
+       if (xfrm_userpolicy_is_valid(up->fwd))
+               net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd;
 
-       if (up->out == XFRM_USERPOLICY_BLOCK)
-               net->xfrm.policy_default |= XFRM_POL_DEFAULT_OUT;
-       else if (up->out == XFRM_USERPOLICY_ACCEPT)
-               net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_OUT;
+       if (xfrm_userpolicy_is_valid(up->out))
+               net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out;
 
        rt_genid_bump_all(net);
 
@@ -2074,13 +2056,9 @@ static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
        }
 
        r_up = nlmsg_data(r_nlh);
-
-       r_up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
-                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
-       r_up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
-                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
-       r_up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
-                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+       r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
+       r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
+       r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
        nlmsg_end(r_skb, r_nlh);
 
        return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
index 4aaee18..4415fb3 100644 (file)
@@ -150,7 +150,6 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
        SOC_SINGLE("E to F Buffer Disable Switch", CS4265_SPDIF_CTL1,
                                6, 1, 0),
        SOC_ENUM("C Data Access", cam_mode_enum),
-       SOC_SINGLE("SPDIF Switch", CS4265_SPDIF_CTL2, 5, 1, 1),
        SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
                                3, 1, 0),
        SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
@@ -186,7 +185,7 @@ static const struct snd_soc_dapm_widget cs4265_dapm_widgets[] = {
 
        SND_SOC_DAPM_SWITCH("Loopback", SND_SOC_NOPM, 0, 0,
                        &loopback_ctl),
-       SND_SOC_DAPM_SWITCH("SPDIF", SND_SOC_NOPM, 0, 0,
+       SND_SOC_DAPM_SWITCH("SPDIF", CS4265_SPDIF_CTL2, 5, 1,
                        &spdif_switch),
        SND_SOC_DAPM_SWITCH("DAC", CS4265_PWRCTL, 1, 1,
                        &dac_switch),
index 03ea959..a0ca58b 100644 (file)
@@ -319,7 +319,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
        if (ucontrol->value.integer.value[0] < 0)
                return -EINVAL;
        val = ucontrol->value.integer.value[0];
-       if (mc->platform_max && val > mc->platform_max)
+       if (mc->platform_max && ((int)val + min) > mc->platform_max)
                return -EINVAL;
        if (val > max - min)
                return -EINVAL;
@@ -332,7 +332,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
                if (ucontrol->value.integer.value[1] < 0)
                        return -EINVAL;
                val2 = ucontrol->value.integer.value[1];
-               if (mc->platform_max && val2 > mc->platform_max)
+               if (mc->platform_max && ((int)val2 + min) > mc->platform_max)
                        return -EINVAL;
                if (val2 > max - min)
                        return -EINVAL;
index 1c94eaf..4a3ff64 100644 (file)
@@ -1261,7 +1261,7 @@ static int had_pcm_mmap(struct snd_pcm_substream *substream,
 {
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        return remap_pfn_range(vma, vma->vm_start,
-                       substream->dma_buffer.addr >> PAGE_SHIFT,
+                       substream->runtime->dma_addr >> PAGE_SHIFT,
                        vma->vm_end - vma->vm_start, vma->vm_page_prot);
 }
 
index b3edde6..323e251 100644 (file)
@@ -281,6 +281,11 @@ struct kvm_arm_copy_mte_tags {
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED       3
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED            (1U << 4)
 
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3    KVM_REG_ARM_FW_REG(3)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL          0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL              1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED       2
+
 /* SVE registers */
 #define KVM_REG_ARM64_SVE              (0x15 << KVM_REG_ARM_COPROC_SHIFT)
 
index 6db4e29..65d1479 100644 (file)
 /* FREE!                                ( 7*32+10) */
 #define X86_FEATURE_PTI                        ( 7*32+11) /* Kernel Page Table Isolation enabled */
 #define X86_FEATURE_RETPOLINE          ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_AMD      ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_LFENCE   ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
 #define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_CDP_L2             ( 7*32+15) /* Code and Data Prioritization L2 */
 #define X86_FEATURE_MSR_SPEC_CTRL      ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
index 740ae76..134612b 100644 (file)
@@ -106,7 +106,7 @@ static void nest_epollfd(void)
        printinfo("Nesting level(s): %d\n", nested);
 
        epollfdp = calloc(nested, sizeof(int));
-       if (!epollfd)
+       if (!epollfdp)
                err(EXIT_FAILURE, "calloc");
 
        for (i = 0; i < nested; i++) {
index 9739b05..2499792 100644 (file)
@@ -1648,6 +1648,7 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
 {
        struct parse_events_term *term;
        struct list_head *list = NULL;
+       struct list_head *orig_head = NULL;
        struct perf_pmu *pmu = NULL;
        int ok = 0;
        char *config;
@@ -1674,7 +1675,6 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
        }
        list_add_tail(&term->list, head);
 
-
        /* Add it for all PMUs that support the alias */
        list = malloc(sizeof(struct list_head));
        if (!list)
@@ -1687,13 +1687,15 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
 
                list_for_each_entry(alias, &pmu->aliases, list) {
                        if (!strcasecmp(alias->name, str)) {
+                               parse_events_copy_term_list(head, &orig_head);
                                if (!parse_events_add_pmu(parse_state, list,
-                                                         pmu->name, head,
+                                                         pmu->name, orig_head,
                                                          true, true)) {
                                        pr_debug("%s -> %s/%s/\n", str,
                                                 pmu->name, alias->str);
                                        ok++;
                                }
+                               parse_events_terms__delete(orig_head);
                        }
                }
        }
@@ -2193,7 +2195,7 @@ int perf_pmu__test_parse_init(void)
        for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) {
                tmp->type = symbols[i].type;
                tmp->symbol = strdup(symbols[i].symbol);
-               if (!list->symbol)
+               if (!tmp->symbol)
                        goto err_free;
        }
 
diff --git a/tools/testing/selftests/drivers/net/mlxsw/hw_stats_l3.sh b/tools/testing/selftests/drivers/net/mlxsw/hw_stats_l3.sh
new file mode 100755 (executable)
index 0000000..941ba4c
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+       l3_monitor_test
+"
+NUM_NETIFS=0
+source $lib_dir/lib.sh
+
+swp=$NETIF_NO_CABLE
+
+cleanup()
+{
+       pre_cleanup
+}
+
+l3_monitor_test()
+{
+       hw_stats_monitor_test $swp l3               \
+               "ip addr add dev $swp 192.0.2.1/28" \
+               "ip addr del dev $swp 192.0.2.1/28"
+}
+
+trap cleanup EXIT
+
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh b/tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh
new file mode 100755 (executable)
index 0000000..fe18984
--- /dev/null
@@ -0,0 +1,421 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+       l3_reporting_test
+       l3_fail_next_test
+       l3_counter_test
+       l3_rollback_test
+       l3_monitor_test
+"
+
+NETDEVSIM_PATH=/sys/bus/netdevsim/
+DEV_ADDR_1=1337
+DEV_ADDR_2=1057
+DEV_ADDR_3=5417
+NUM_NETIFS=0
+source $lib_dir/lib.sh
+
+DUMMY_IFINDEX=
+
+DEV_ADDR()
+{
+       local n=$1; shift
+       local var=DEV_ADDR_$n
+
+       echo ${!var}
+}
+
+DEV()
+{
+       echo netdevsim$(DEV_ADDR $1)
+}
+
+DEVLINK_DEV()
+{
+       echo netdevsim/$(DEV $1)
+}
+
+SYSFS_NET_DIR()
+{
+       echo /sys/bus/netdevsim/devices/$(DEV $1)/net/
+}
+
+DEBUGFS_DIR()
+{
+       echo /sys/kernel/debug/netdevsim/$(DEV $1)/
+}
+
+nsim_add()
+{
+       local n=$1; shift
+
+       echo "$(DEV_ADDR $n) 1" > ${NETDEVSIM_PATH}/new_device
+       while [ ! -d $(SYSFS_NET_DIR $n) ] ; do :; done
+}
+
+nsim_reload()
+{
+       local n=$1; shift
+       local ns=$1; shift
+
+       devlink dev reload $(DEVLINK_DEV $n) netns $ns
+
+       if [ $? -ne 0 ]; then
+               echo "Failed to reload $(DEV $n) into netns \"testns1\""
+               exit 1
+       fi
+
+}
+
+nsim_del()
+{
+       local n=$1; shift
+
+       echo "$(DEV_ADDR $n)" > ${NETDEVSIM_PATH}/del_device
+}
+
+nsim_hwstats_toggle()
+{
+       local action=$1; shift
+       local instance=$1; shift
+       local netdev=$1; shift
+       local type=$1; shift
+
+       local ifindex=$($IP -j link show dev $netdev | jq '.[].ifindex')
+
+       echo $ifindex > $(DEBUGFS_DIR $instance)/hwstats/$type/$action
+}
+
+nsim_hwstats_enable()
+{
+       nsim_hwstats_toggle enable_ifindex "$@"
+}
+
+nsim_hwstats_disable()
+{
+       nsim_hwstats_toggle disable_ifindex "$@"
+}
+
+nsim_hwstats_fail_next_enable()
+{
+       nsim_hwstats_toggle fail_next_enable "$@"
+}
+
+setup_prepare()
+{
+       modprobe netdevsim &> /dev/null
+       nsim_add 1
+       nsim_add 2
+       nsim_add 3
+
+       ip netns add testns1
+
+       if [ $? -ne 0 ]; then
+               echo "Failed to add netns \"testns1\""
+               exit 1
+       fi
+
+       nsim_reload 1 testns1
+       nsim_reload 2 testns1
+       nsim_reload 3 testns1
+
+       IP="ip -n testns1"
+
+       $IP link add name dummy1 type dummy
+       $IP link set dev dummy1 up
+       DUMMY_IFINDEX=$($IP -j link show dev dummy1 | jq '.[].ifindex')
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       $IP link del name dummy1
+       ip netns del testns1
+       nsim_del 3
+       nsim_del 2
+       nsim_del 1
+       modprobe -r netdevsim &> /dev/null
+}
+
+netdev_hwstats_used()
+{
+       local netdev=$1; shift
+       local type=$1; shift
+
+       $IP -j stats show dev "$netdev" group offload subgroup hw_stats_info |
+           jq '.[].info.l3_stats.used'
+}
+
+netdev_check_used()
+{
+       local netdev=$1; shift
+       local type=$1; shift
+
+       [[ $(netdev_hwstats_used $netdev $type) == "true" ]]
+}
+
+netdev_check_unused()
+{
+       local netdev=$1; shift
+       local type=$1; shift
+
+       [[ $(netdev_hwstats_used $netdev $type) == "false" ]]
+}
+
+netdev_hwstats_request()
+{
+       local netdev=$1; shift
+       local type=$1; shift
+
+       $IP -j stats show dev "$netdev" group offload subgroup hw_stats_info |
+           jq ".[].info.${type}_stats.request"
+}
+
+netdev_check_requested()
+{
+       local netdev=$1; shift
+       local type=$1; shift
+
+       [[ $(netdev_hwstats_request $netdev $type) == "true" ]]
+}
+
+netdev_check_unrequested()
+{
+       local netdev=$1; shift
+       local type=$1; shift
+
+       [[ $(netdev_hwstats_request $netdev $type) == "false" ]]
+}
+
+reporting_test()
+{
+       local type=$1; shift
+       local instance=1
+
+       RET=0
+
+       [[ -n $(netdev_hwstats_used dummy1 $type) ]]
+       check_err $? "$type stats not reported"
+
+       netdev_check_unused dummy1 $type
+       check_err $? "$type stats reported as used before either device or netdevsim request"
+
+       nsim_hwstats_enable $instance dummy1 $type
+       netdev_check_unused dummy1 $type
+       check_err $? "$type stats reported as used before device request"
+       netdev_check_unrequested dummy1 $type
+       check_err $? "$type stats reported as requested before device request"
+
+       $IP stats set dev dummy1 ${type}_stats on
+       netdev_check_used dummy1 $type
+       check_err $? "$type stats reported as not used after both device and netdevsim request"
+       netdev_check_requested dummy1 $type
+       check_err $? "$type stats reported as not requested after device request"
+
+       nsim_hwstats_disable $instance dummy1 $type
+       netdev_check_unused dummy1 $type
+       check_err $? "$type stats reported as used after netdevsim request withdrawn"
+
+       nsim_hwstats_enable $instance dummy1 $type
+       netdev_check_used dummy1 $type
+       check_err $? "$type stats reported as not used after netdevsim request reenabled"
+
+       $IP stats set dev dummy1 ${type}_stats off
+       netdev_check_unused dummy1 $type
+       check_err $? "$type stats reported as used after device request withdrawn"
+       netdev_check_unrequested dummy1 $type
+       check_err $? "$type stats reported as requested after device request withdrawn"
+
+       nsim_hwstats_disable $instance dummy1 $type
+       netdev_check_unused dummy1 $type
+       check_err $? "$type stats reported as used after both requests withdrawn"
+
+       log_test "Reporting of $type stats usage"
+}
+
+l3_reporting_test()
+{
+       reporting_test l3
+}
+
+__fail_next_test()
+{
+       local instance=$1; shift
+       local type=$1; shift
+
+       RET=0
+
+       netdev_check_unused dummy1 $type
+       check_err $? "$type stats reported as used before either device or netdevsim request"
+
+       nsim_hwstats_enable $instance dummy1 $type
+       nsim_hwstats_fail_next_enable $instance dummy1 $type
+       netdev_check_unused dummy1 $type
+       check_err $? "$type stats reported as used before device request"
+       netdev_check_unrequested dummy1 $type
+       check_err $? "$type stats reported as requested before device request"
+
+       $IP stats set dev dummy1 ${type}_stats on 2>/dev/null
+       check_fail $? "$type stats request not bounced as it should have been"
+       netdev_check_unused dummy1 $type
+       check_err $? "$type stats reported as used after bounce"
+       netdev_check_unrequested dummy1 $type
+       check_err $? "$type stats reported as requested after bounce"
+
+       $IP stats set dev dummy1 ${type}_stats on
+       check_err $? "$type stats request failed when it shouldn't have"
+       netdev_check_used dummy1 $type
+       check_err $? "$type stats reported as not used after both device and netdevsim request"
+       netdev_check_requested dummy1 $type
+       check_err $? "$type stats reported as not requested after device request"
+
+       $IP stats set dev dummy1 ${type}_stats off
+       nsim_hwstats_disable $instance dummy1 $type
+
+       log_test "Injected failure of $type stats enablement (netdevsim #$instance)"
+}
+
+fail_next_test()
+{
+       __fail_next_test 1 "$@"
+       __fail_next_test 2 "$@"
+       __fail_next_test 3 "$@"
+}
+
+l3_fail_next_test()
+{
+       fail_next_test l3
+}
+
+get_hwstat()
+{
+       local netdev=$1; shift
+       local type=$1; shift
+       local selector=$1; shift
+
+       $IP -j stats show dev $netdev group offload subgroup ${type}_stats |
+                 jq ".[0].stats64.${selector}"
+}
+
+counter_test()
+{
+       local type=$1; shift
+       local instance=1
+
+       RET=0
+
+       nsim_hwstats_enable $instance dummy1 $type
+       $IP stats set dev dummy1 ${type}_stats on
+       netdev_check_used dummy1 $type
+       check_err $? "$type stats reported as not used after both device and netdevsim request"
+
+       # Netdevsim counts 10pps on ingress. We should see maybe a couple
+       # packets, unless things take a reeealy long time.
+       local pkts=$(get_hwstat dummy1 l3 rx.packets)
+       ((pkts < 10))
+       check_err $? "$type stats show >= 10 packets after first enablement"
+
+       sleep 2
+
+       local pkts=$(get_hwstat dummy1 l3 rx.packets)
+       ((pkts >= 20))
+       check_err $? "$type stats show < 20 packets after 2s passed"
+
+       $IP stats set dev dummy1 ${type}_stats off
+
+       sleep 2
+
+       $IP stats set dev dummy1 ${type}_stats on
+       local pkts=$(get_hwstat dummy1 l3 rx.packets)
+       ((pkts < 10))
+       check_err $? "$type stats show >= 10 packets after second enablement"
+
+       $IP stats set dev dummy1 ${type}_stats off
+       nsim_hwstats_fail_next_enable $instance dummy1 $type
+       $IP stats set dev dummy1 ${type}_stats on 2>/dev/null
+       check_fail $? "$type stats request not bounced as it should have been"
+
+       sleep 2
+
+       $IP stats set dev dummy1 ${type}_stats on
+       local pkts=$(get_hwstat dummy1 l3 rx.packets)
+       ((pkts < 10))
+       check_err $? "$type stats show >= 10 packets after post-fail enablement"
+
+       $IP stats set dev dummy1 ${type}_stats off
+
+       log_test "Counter values in $type stats"
+}
+
+l3_counter_test()
+{
+       counter_test l3
+}
+
+rollback_test()
+{
+       local type=$1; shift
+
+       RET=0
+
+       nsim_hwstats_enable 1 dummy1 l3
+       nsim_hwstats_enable 2 dummy1 l3
+       nsim_hwstats_enable 3 dummy1 l3
+
+       # The three netdevsim instances are registered in order of their number
+       # one after another. It is reasonable to expect that whatever
+       # notifications take place hit no. 2 in between hitting nos. 1 and 3,
+       # whatever the actual order. This allows us to test that a fail caused
+       # by no. 2 does not leave the system in a partial state, and rolls
+       # everything back.
+
+       nsim_hwstats_fail_next_enable 2 dummy1 l3
+       $IP stats set dev dummy1 ${type}_stats on 2>/dev/null
+       check_fail $? "$type stats request not bounced as it should have been"
+
+       netdev_check_unused dummy1 $type
+       check_err $? "$type stats reported as used after bounce"
+       netdev_check_unrequested dummy1 $type
+       check_err $? "$type stats reported as requested after bounce"
+
+       sleep 2
+
+       $IP stats set dev dummy1 ${type}_stats on
+       check_err $? "$type stats request not upheld as it should have been"
+
+       local pkts=$(get_hwstat dummy1 l3 rx.packets)
+       ((pkts < 10))
+       check_err $? "$type stats show $pkts packets after post-fail enablement"
+
+       $IP stats set dev dummy1 ${type}_stats off
+
+       nsim_hwstats_disable 3 dummy1 l3
+       nsim_hwstats_disable 2 dummy1 l3
+       nsim_hwstats_disable 1 dummy1 l3
+
+       log_test "Failure in $type stats enablement rolled back"
+}
+
+l3_rollback_test()
+{
+       rollback_test l3
+}
+
+l3_monitor_test()
+{
+       hw_stats_monitor_test dummy1 l3            \
+               "nsim_hwstats_enable 1 dummy1 l3"  \
+               "nsim_hwstats_disable 1 dummy1 l3" \
+               "$IP"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+tests_run
+
+exit $EXIT_STATUS
index aed7845..bc21629 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/udp.h>
 #include <sys/socket.h>
 
+#include "../kselftest.h"
+
 enum {
        ERN_SUCCESS = 0,
        /* Well defined errors, callers may depend on these */
@@ -318,7 +320,7 @@ static const char *cs_ts_info2str(unsigned int info)
                [SCM_TSTAMP_ACK]        = "ACK",
        };
 
-       if (info < sizeof(names) / sizeof(names[0]))
+       if (info < ARRAY_SIZE(names))
                return names[info];
        return "unknown";
 }
index 3f4c8cf..47c4d4b 100755 (executable)
@@ -750,7 +750,7 @@ ipv4_ping_vrf()
                log_start
                show_hint "Fails since address on vrf device is out of device scope"
                run_cmd ping -c1 -w1 -I ${NSA_DEV} ${a}
-               log_test_addr ${a} $? 1 "ping local, device bind"
+               log_test_addr ${a} $? 2 "ping local, device bind"
        done
 
        #
index 6e98efa..5b02b6b 100755 (executable)
@@ -9,9 +9,7 @@ source lib.sh
 h1_create()
 {
        simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
-       vrf_create "vrf-vlan-h1"
-       ip link set dev vrf-vlan-h1 up
-       vlan_create $h1 100 vrf-vlan-h1 198.51.100.1/24
+       vlan_create $h1 100 v$h1 198.51.100.1/24
 }
 
 h1_destroy()
@@ -23,9 +21,7 @@ h1_destroy()
 h2_create()
 {
        simple_if_init $h2 192.0.2.2/24 2001:db8:1::2/64
-       vrf_create "vrf-vlan-h2"
-       ip link set dev vrf-vlan-h2 up
-       vlan_create $h2 100 vrf-vlan-h2 198.51.100.2/24
+       vlan_create $h2 100 v$h2 198.51.100.2/24
 }
 
 h2_destroy()
@@ -41,11 +37,11 @@ switch_create()
        ip link set dev $swp1 master br0
        ip link set dev $swp2 master br0
 
+       bridge link set dev $swp1 learning off
+
        ip link set dev br0 up
        ip link set dev $swp1 up
        ip link set dev $swp2 up
-
-       bridge link set dev $swp1 learning off
 }
 
 switch_destroy()
index 159afc7..664b9ec 100644 (file)
@@ -1498,3 +1498,63 @@ brmcast_check_sg_state()
                check_err_fail $should_fail $? "Entry $src has blocked flag"
        done
 }
+
+start_ip_monitor()
+{
+       local mtype=$1; shift
+       local ip=${1-ip}; shift
+
+       # start the monitor in the background
+       tmpfile=`mktemp /var/run/nexthoptestXXX`
+       mpid=`($ip monitor $mtype > $tmpfile & echo $!) 2>/dev/null`
+       sleep 0.2
+       echo "$mpid $tmpfile"
+}
+
+stop_ip_monitor()
+{
+       local mpid=$1; shift
+       local tmpfile=$1; shift
+       local el=$1; shift
+       local what=$1; shift
+
+       sleep 0.2
+       kill $mpid
+       local lines=`grep '^\w' $tmpfile | wc -l`
+       test $lines -eq $el
+       check_err $? "$what: $lines lines of events, expected $el"
+       rm -rf $tmpfile
+}
+
+hw_stats_monitor_test()
+{
+       local dev=$1; shift
+       local type=$1; shift
+       local make_suitable=$1; shift
+       local make_unsuitable=$1; shift
+       local ip=${1-ip}; shift
+
+       RET=0
+
+       # Expect a notification about enablement.
+       local ipmout=$(start_ip_monitor stats "$ip")
+       $ip stats set dev $dev ${type}_stats on
+       stop_ip_monitor $ipmout 1 "${type}_stats enablement"
+
+       # Expect a notification about offload.
+       local ipmout=$(start_ip_monitor stats "$ip")
+       $make_suitable
+       stop_ip_monitor $ipmout 1 "${type}_stats installation"
+
+       # Expect a notification about loss of offload.
+       local ipmout=$(start_ip_monitor stats "$ip")
+       $make_unsuitable
+       stop_ip_monitor $ipmout 1 "${type}_stats deinstallation"
+
+       # Expect a notification about disablement
+       local ipmout=$(start_ip_monitor stats "$ip")
+       $ip stats set dev $dev ${type}_stats off
+       stop_ip_monitor $ipmout 1 "${type}_stats disablement"
+
+       log_test "${type}_stats notifications"
+}
index 45c6e5f..7314257 100755 (executable)
@@ -1,6 +1,11 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
 ret=0
 sin=""
 sinfail=""
@@ -9,6 +14,9 @@ cin=""
 cinfail=""
 cinsent=""
 cout=""
+capout=""
+ns1=""
+ns2=""
 ksft_skip=4
 timeout_poll=30
 timeout_test=$((timeout_poll * 2 + 1))
@@ -16,12 +24,19 @@ capture=0
 checksum=0
 ip_mptcp=0
 check_invert=0
-do_all_tests=1
+validate_checksum=0
 init=0
 
+declare -A all_tests
+declare -a only_tests_ids
+declare -a only_tests_names
+declare -A failed_tests
 TEST_COUNT=0
+TEST_NAME=""
 nr_blank=40
 
+export FAILING_LINKS=""
+
 # generated using "nfbpf_compile '(ip && (ip[54] & 0xf0) == 0x30) ||
 #                                (ip6 && (ip6[74] & 0xf0) == 0x30)'"
 CBPF_MPTCP_SUBOPTION_ADD_ADDR="14,
@@ -44,12 +59,14 @@ init_partial()
 {
        capout=$(mktemp)
 
+       local rndh
        rndh=$(mktemp -u XXXXXX)
 
        ns1="ns1-$rndh"
        ns2="ns2-$rndh"
 
-       for netns in "$ns1" "$ns2";do
+       local netns
+       for netns in "$ns1" "$ns2"; do
                ip netns add $netns || exit $ksft_skip
                ip -net $netns link set lo up
                ip netns exec $netns sysctl -q net.mptcp.enabled=1
@@ -61,14 +78,17 @@ init_partial()
        done
 
        check_invert=0
+       validate_checksum=$checksum
+       FAILING_LINKS=""
 
-       #  ns1              ns2
+       #  ns1         ns2
        # ns1eth1    ns2eth1
        # ns1eth2    ns2eth2
        # ns1eth3    ns2eth3
        # ns1eth4    ns2eth4
 
-       for i in `seq 1 4`; do
+       local i
+       for i in $(seq 1 4); do
                ip link add ns1eth$i netns "$ns1" type veth peer name ns2eth$i netns "$ns2"
                ip -net "$ns1" addr add 10.0.$i.1/24 dev ns1eth$i
                ip -net "$ns1" addr add dead:beef:$i::1/64 dev ns1eth$i nodad
@@ -86,7 +106,8 @@ init_partial()
 
 init_shapers()
 {
-       for i in `seq 1 4`; do
+       local i
+       for i in $(seq 1 4); do
                tc -n $ns1 qdisc add dev ns1eth$i root netem rate 20mbit delay 1
                tc -n $ns2 qdisc add dev ns2eth$i root netem rate 20mbit delay 1
        done
@@ -96,6 +117,7 @@ cleanup_partial()
 {
        rm -f "$capout"
 
+       local netns
        for netns in "$ns1" "$ns2"; do
                ip netns del $netns
                rm -f /tmp/$netns.{nstat,out}
@@ -144,8 +166,38 @@ cleanup()
        cleanup_partial
 }
 
+skip_test()
+{
+       if [ "${#only_tests_ids[@]}" -eq 0 ] && [ "${#only_tests_names[@]}" -eq 0 ]; then
+               return 1
+       fi
+
+       local i
+       for i in "${only_tests_ids[@]}"; do
+               if [ "${TEST_COUNT}" -eq "${i}" ]; then
+                       return 1
+               fi
+       done
+       for i in "${only_tests_names[@]}"; do
+               if [ "${TEST_NAME}" = "${i}" ]; then
+                       return 1
+               fi
+       done
+
+       return 0
+}
+
+# $1: test name
 reset()
 {
+       TEST_NAME="${1}"
+
+       TEST_COUNT=$((TEST_COUNT+1))
+
+       if skip_test; then
+               return 1
+       fi
+
        if [ "${init}" != "1" ]; then
                init
        else
@@ -153,29 +205,34 @@ reset()
        fi
 
        init_partial
+
+       return 0
 }
 
+# $1: test name
 reset_with_cookies()
 {
-       reset
+       reset "${1}" || return 1
 
-       for netns in "$ns1" "$ns2";do
+       local netns
+       for netns in "$ns1" "$ns2"; do
                ip netns exec $netns sysctl -q net.ipv4.tcp_syncookies=2
        done
 }
 
+# $1: test name
 reset_with_add_addr_timeout()
 {
-       local ip="${1:-4}"
+       local ip="${2:-4}"
        local tables
 
+       reset "${1}" || return 1
+
        tables="iptables"
        if [ $ip -eq 6 ]; then
                tables="ip6tables"
        fi
 
-       reset
-
        ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
        ip netns exec $ns2 $tables -A OUTPUT -p tcp \
                -m tcp --tcp-option 30 \
@@ -184,28 +241,46 @@ reset_with_add_addr_timeout()
                -j DROP
 }
 
+# $1: test name
 reset_with_checksum()
 {
        local ns1_enable=$1
        local ns2_enable=$2
 
-       reset
+       reset "checksum test ${1} ${2}" || return 1
 
        ip netns exec $ns1 sysctl -q net.mptcp.checksum_enabled=$ns1_enable
        ip netns exec $ns2 sysctl -q net.mptcp.checksum_enabled=$ns2_enable
+
+       validate_checksum=1
 }
 
 reset_with_allow_join_id0()
 {
-       local ns1_enable=$1
-       local ns2_enable=$2
+       local ns1_enable=$2
+       local ns2_enable=$3
 
-       reset
+       reset "${1}" || return 1
 
        ip netns exec $ns1 sysctl -q net.mptcp.allow_join_initial_addr_port=$ns1_enable
        ip netns exec $ns2 sysctl -q net.mptcp.allow_join_initial_addr_port=$ns2_enable
 }
 
+fail_test()
+{
+       ret=1
+       failed_tests[${TEST_COUNT}]="${TEST_NAME}"
+}
+
+get_failed_tests_ids()
+{
+       # sorted
+       local i
+       for i in "${!failed_tests[@]}"; do
+               echo "${i}"
+       done | sort -n
+}
+
 print_file_err()
 {
        ls -l "$1" 1>&2
@@ -215,23 +290,23 @@ print_file_err()
 
 check_transfer()
 {
-       in=$1
-       out=$2
-       what=$3
-
-       cmp -l "$in" "$out" | while read line; do
-               local arr=($line)
-
-               let sum=0${arr[1]}+0${arr[2]}
+       local in=$1
+       local out=$2
+       local what=$3
+       local i a b
+
+       local line
+       cmp -l "$in" "$out" | while read -r i a b; do
+               local sum=$((0${a} + 0${b}))
                if [ $check_invert -eq 0 ] || [ $sum -ne $((0xff)) ]; then
                        echo "[ FAIL ] $what does not match (in, out):"
                        print_file_err "$in"
                        print_file_err "$out"
-                       ret=1
+                       fail_test
 
                        return 1
                else
-                       echo "$what has inverted byte at ${arr[0]}"
+                       echo "$what has inverted byte at ${i}"
                fi
        done
 
@@ -240,28 +315,28 @@ check_transfer()
 
 do_ping()
 {
-       listener_ns="$1"
-       connector_ns="$2"
-       connect_addr="$3"
+       local listener_ns="$1"
+       local connector_ns="$2"
+       local connect_addr="$3"
 
-       ip netns exec ${connector_ns} ping -q -c 1 $connect_addr >/dev/null
-       if [ $? -ne 0 ] ; then
+       if ! ip netns exec ${connector_ns} ping -q -c 1 $connect_addr >/dev/null; then
                echo "$listener_ns -> $connect_addr connectivity [ FAIL ]" 1>&2
-               ret=1
+               fail_test
        fi
 }
 
 link_failure()
 {
-       ns="$1"
+       local ns="$1"
 
        if [ -z "$FAILING_LINKS" ]; then
                l=$((RANDOM%4))
                FAILING_LINKS=$((l+1))
        fi
 
+       local l
        for l in $FAILING_LINKS; do
-               veth="ns1eth$l"
+               local veth="ns1eth$l"
                ip -net "$ns" link set "$veth" down
        done
 }
@@ -278,9 +353,10 @@ wait_local_port_listen()
        local listener_ns="${1}"
        local port="${2}"
 
-       local port_hex i
-
+       local port_hex
        port_hex="$(printf "%04X" "${port}")"
+
+       local i
        for i in $(seq 10); do
                ip netns exec "${listener_ns}" cat /proc/net/tcp* | \
                        awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
@@ -291,7 +367,7 @@ wait_local_port_listen()
 
 rm_addr_count()
 {
-       ns=${1}
+       local ns=${1}
 
        ip netns exec ${ns} nstat -as | grep MPTcpExtRmAddr | awk '{print $2}'
 }
@@ -302,8 +378,8 @@ wait_rm_addr()
        local ns="${1}"
        local old_cnt="${2}"
        local cnt
-       local i
 
+       local i
        for i in $(seq 10); do
                cnt=$(rm_addr_count ${ns})
                [ "$cnt" = "${old_cnt}" ] || break
@@ -311,6 +387,21 @@ wait_rm_addr()
        done
 }
 
+wait_mpj()
+{
+       local ns="${1}"
+       local cnt old_cnt
+
+       old_cnt=$(ip netns exec ${ns} nstat -as | grep MPJoinAckRx | awk '{print $2}')
+
+       local i
+       for i in $(seq 10); do
+               cnt=$(ip netns exec ${ns} nstat -as | grep MPJoinAckRx | awk '{print $2}')
+               [ "$cnt" = "${old_cnt}" ] || break
+               sleep 0.1
+       done
+}
+
 pm_nl_set_limits()
 {
        local ns=$1
@@ -328,32 +419,33 @@ pm_nl_add_endpoint()
 {
        local ns=$1
        local addr=$2
-       local flags
-       local port
-       local dev
-       local id
+       local flags _flags
+       local port _port
+       local dev _dev
+       local id _id
        local nr=2
 
-       for p in $@
+       local p
+       for p in "${@}"
        do
                if [ $p = "flags" ]; then
                        eval _flags=\$"$nr"
-                       [ ! -z $_flags ]; flags="flags $_flags"
+                       [ -n "$_flags" ]; flags="flags $_flags"
                fi
                if [ $p = "dev" ]; then
                        eval _dev=\$"$nr"
-                       [ ! -z $_dev ]; dev="dev $_dev"
+                       [ -n "$_dev" ]; dev="dev $_dev"
                fi
                if [ $p = "id" ]; then
                        eval _id=\$"$nr"
-                       [ ! -z $_id ]; id="id $_id"
+                       [ -n "$_id" ]; id="id $_id"
                fi
                if [ $p = "port" ]; then
                        eval _port=\$"$nr"
-                       [ ! -z $_port ]; port="port $_port"
+                       [ -n "$_port" ]; port="port $_port"
                fi
 
-               let nr+=1
+               nr=$((nr + 1))
        done
 
        if [ $ip_mptcp -eq 1 ]; then
@@ -411,27 +503,111 @@ pm_nl_change_endpoint()
        fi
 }
 
+pm_nl_check_endpoint()
+{
+       local line expected_line
+       local need_title=$1
+       local msg="$2"
+       local ns=$3
+       local addr=$4
+       local _flags=""
+       local flags
+       local _port
+       local port
+       local dev
+       local _id
+       local id
+
+       if [ "${need_title}" = 1 ]; then
+               printf "%03u %-36s %s" "${TEST_COUNT}" "${TEST_NAME}" "${msg}"
+       else
+               printf "%-${nr_blank}s %s" " " "${msg}"
+       fi
+
+       shift 4
+       while [ -n "$1" ]; do
+               if [ $1 = "flags" ]; then
+                       _flags=$2
+                       [ -n "$_flags" ]; flags="flags $_flags"
+                       shift
+               elif [ $1 = "dev" ]; then
+                       [ -n "$2" ]; dev="dev $1"
+                       shift
+               elif [ $1 = "id" ]; then
+                       _id=$2
+                       [ -n "$_id" ]; id="id $_id"
+                       shift
+               elif [ $1 = "port" ]; then
+                       _port=$2
+                       [ -n "$_port" ]; port=" port $_port"
+                       shift
+               fi
+
+               shift
+       done
+
+       if [ -z "$id" ]; then
+               echo "[skip] bad test - missing endpoint id"
+               return
+       fi
+
+       if [ $ip_mptcp -eq 1 ]; then
+               line=$(ip -n $ns mptcp endpoint show $id)
+               # the dump order is: address id flags port dev
+               expected_line="$addr"
+               [ -n "$addr" ] && expected_line="$expected_line $addr"
+               expected_line="$expected_line $id"
+               [ -n "$_flags" ] && expected_line="$expected_line ${_flags//","/" "}"
+               [ -n "$dev" ] && expected_line="$expected_line $dev"
+               [ -n "$port" ] && expected_line="$expected_line $port"
+       else
+               line=$(ip netns exec $ns ./pm_nl_ctl get $_id)
+               # the dump order is: id flags dev address port
+               expected_line="$id"
+               [ -n "$flags" ] && expected_line="$expected_line $flags"
+               [ -n "$dev" ] && expected_line="$expected_line $dev"
+               [ -n "$addr" ] && expected_line="$expected_line $addr"
+               [ -n "$_port" ] && expected_line="$expected_line $_port"
+       fi
+       if [ "$line" = "$expected_line" ]; then
+               echo "[ ok ]"
+       else
+               echo "[fail] expected '$expected_line' found '$line'"
+               fail_test
+       fi
+}
+
+filter_tcp_from()
+{
+       local ns="${1}"
+       local src="${2}"
+       local target="${3}"
+
+       ip netns exec "${ns}" iptables -A INPUT -s "${src}" -p tcp -j "${target}"
+}
+
 do_transfer()
 {
-       listener_ns="$1"
-       connector_ns="$2"
-       cl_proto="$3"
-       srv_proto="$4"
-       connect_addr="$5"
-       test_link_fail="$6"
-       addr_nr_ns1="$7"
-       addr_nr_ns2="$8"
-       speed="$9"
-       sflags="${10}"
-
-       port=$((10000+$TEST_COUNT))
-       TEST_COUNT=$((TEST_COUNT+1))
+       local listener_ns="$1"
+       local connector_ns="$2"
+       local cl_proto="$3"
+       local srv_proto="$4"
+       local connect_addr="$5"
+       local test_link_fail="$6"
+       local addr_nr_ns1="$7"
+       local addr_nr_ns2="$8"
+       local speed="$9"
+       local sflags="${10}"
+
+       local port=$((10000 + TEST_COUNT - 1))
+       local cappid
 
        :> "$cout"
        :> "$sout"
        :> "$capout"
 
        if [ $capture -eq 1 ]; then
+               local capuser
                if [ -z $SUDO_USER ] ; then
                        capuser=""
                else
@@ -485,7 +661,7 @@ do_transfer()
                                ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
                                        $extra_args ${local_addr} < "$sin" > "$sout" &
        fi
-       spid=$!
+       local spid=$!
 
        wait_local_port_listen "${listener_ns}" "${port}"
 
@@ -502,21 +678,23 @@ do_transfer()
                                        ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
                                                $extra_args $connect_addr > "$cout" &
        else
-               cat "$cinfail" | tee "$cinsent" | \
+               tee "$cinsent" < "$cinfail" | \
                        timeout ${timeout_test} \
                                ip netns exec ${connector_ns} \
                                        ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
                                                $extra_args $connect_addr > "$cout" &
        fi
-       cpid=$!
+       local cpid=$!
 
        # let the mptcp subflow be established in background before
        # do endpoint manipulation
-       [ $addr_nr_ns1 = "0" -a $addr_nr_ns2 = "0" ] || sleep 1
+       if [ $addr_nr_ns1 != "0" ] || [ $addr_nr_ns2 != "0" ]; then
+               sleep 1
+       fi
 
        if [ $addr_nr_ns1 -gt 0 ]; then
-               let add_nr_ns1=addr_nr_ns1
-               counter=2
+               local counter=2
+               local add_nr_ns1=${addr_nr_ns1}
                while [ $add_nr_ns1 -gt 0 ]; do
                        local addr
                        if is_v6 "${connect_addr}"; then
@@ -525,18 +703,21 @@ do_transfer()
                                addr="10.0.$counter.1"
                        fi
                        pm_nl_add_endpoint $ns1 $addr flags signal
-                       let counter+=1
-                       let add_nr_ns1-=1
+                       counter=$((counter + 1))
+                       add_nr_ns1=$((add_nr_ns1 - 1))
                done
        elif [ $addr_nr_ns1 -lt 0 ]; then
-               let rm_nr_ns1=-addr_nr_ns1
+               local rm_nr_ns1=$((-addr_nr_ns1))
                if [ $rm_nr_ns1 -lt 8 ]; then
-                       counter=0
-                       pm_nl_show_endpoints ${listener_ns} | while read line; do
+                       local counter=0
+                       local line
+                       pm_nl_show_endpoints ${listener_ns} | while read -r line; do
+                               # shellcheck disable=SC2206 # we do want to split per word
                                local arr=($line)
                                local nr=0
 
-                               for i in ${arr[@]}; do
+                               local i
+                               for i in "${arr[@]}"; do
                                        if [ $i = "id" ]; then
                                                if [ $counter -eq $rm_nr_ns1 ]; then
                                                        break
@@ -545,9 +726,9 @@ do_transfer()
                                                rm_addr=$(rm_addr_count ${connector_ns})
                                                pm_nl_del_endpoint ${listener_ns} $id
                                                wait_rm_addr ${connector_ns} ${rm_addr}
-                                               let counter+=1
+                                               counter=$((counter + 1))
                                        fi
-                                       let nr+=1
+                                       nr=$((nr + 1))
                                done
                        done
                elif [ $rm_nr_ns1 -eq 8 ]; then
@@ -557,7 +738,7 @@ do_transfer()
                fi
        fi
 
-       flags="subflow"
+       local flags="subflow"
        if [[ "${addr_nr_ns2}" = "fullmesh_"* ]]; then
                flags="${flags},fullmesh"
                addr_nr_ns2=${addr_nr_ns2:9}
@@ -565,11 +746,11 @@ do_transfer()
 
        # if newly added endpoints must be deleted, give the background msk
        # some time to created them
-       [ $addr_nr_ns1 -gt 0 -a $addr_nr_ns2 -lt 0 ] && sleep 1
+       [ $addr_nr_ns1 -gt 0 ] && [ $addr_nr_ns2 -lt 0 ] && sleep 1
 
        if [ $addr_nr_ns2 -gt 0 ]; then
-               let add_nr_ns2=addr_nr_ns2
-               counter=3
+               local add_nr_ns2=${addr_nr_ns2}
+               local counter=3
                while [ $add_nr_ns2 -gt 0 ]; do
                        local addr
                        if is_v6 "${connect_addr}"; then
@@ -578,31 +759,35 @@ do_transfer()
                                addr="10.0.$counter.2"
                        fi
                        pm_nl_add_endpoint $ns2 $addr flags $flags
-                       let counter+=1
-                       let add_nr_ns2-=1
+                       counter=$((counter + 1))
+                       add_nr_ns2=$((add_nr_ns2 - 1))
                done
        elif [ $addr_nr_ns2 -lt 0 ]; then
-               let rm_nr_ns2=-addr_nr_ns2
+               local rm_nr_ns2=$((-addr_nr_ns2))
                if [ $rm_nr_ns2 -lt 8 ]; then
-                       counter=0
-                       pm_nl_show_endpoints ${connector_ns} | while read line; do
+                       local counter=0
+                       local line
+                       pm_nl_show_endpoints ${connector_ns} | while read -r line; do
+                               # shellcheck disable=SC2206 # we do want to split per word
                                local arr=($line)
                                local nr=0
 
-                               for i in ${arr[@]}; do
+                               local i
+                               for i in "${arr[@]}"; do
                                        if [ $i = "id" ]; then
                                                if [ $counter -eq $rm_nr_ns2 ]; then
                                                        break
                                                fi
+                                               local id rm_addr
                                                # rm_addr are serialized, allow the previous one to
                                                # complete
                                                id=${arr[$nr+1]}
                                                rm_addr=$(rm_addr_count ${listener_ns})
                                                pm_nl_del_endpoint ${connector_ns} $id
                                                wait_rm_addr ${listener_ns} ${rm_addr}
-                                               let counter+=1
+                                               counter=$((counter + 1))
                                        fi
-                                       let nr+=1
+                                       nr=$((nr + 1))
                                done
                        done
                elif [ $rm_nr_ns2 -eq 8 ]; then
@@ -618,19 +803,24 @@ do_transfer()
                fi
        fi
 
-       if [ ! -z $sflags ]; then
+       if [ -n "${sflags}" ]; then
                sleep 1
+
+               local netns
                for netns in "$ns1" "$ns2"; do
-                       pm_nl_show_endpoints $netns | while read line; do
+                       local line
+                       pm_nl_show_endpoints $netns | while read -r line; do
+                               # shellcheck disable=SC2206 # we do want to split per word
                                local arr=($line)
                                local nr=0
                                local id
 
-                               for i in ${arr[@]}; do
+                               local i
+                               for i in "${arr[@]}"; do
                                        if [ $i = "id" ]; then
                                                id=${arr[$nr+1]}
                                        fi
-                                       let nr+=1
+                                       nr=$((nr + 1))
                                done
                                pm_nl_change_endpoint $netns $id $sflags
                        done
@@ -638,9 +828,9 @@ do_transfer()
        fi
 
        wait $cpid
-       retc=$?
+       local retc=$?
        wait $spid
-       rets=$?
+       local rets=$?
 
        if [ $capture -eq 1 ]; then
            sleep 1
@@ -662,7 +852,7 @@ do_transfer()
                cat /tmp/${connector_ns}.out
 
                cat "$capout"
-               ret=1
+               fail_test
                return 1
        fi
 
@@ -690,9 +880,9 @@ do_transfer()
 
 make_file()
 {
-       name=$1
-       who=$2
-       size=$3
+       local name=$1
+       local who=$2
+       local size=$3
 
        dd if=/dev/urandom of="$name" bs=1024 count=$size 2> /dev/null
        echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
@@ -702,14 +892,16 @@ make_file()
 
 run_tests()
 {
-       listener_ns="$1"
-       connector_ns="$2"
-       connect_addr="$3"
-       test_linkfail="${4:-0}"
-       addr_nr_ns1="${5:-0}"
-       addr_nr_ns2="${6:-0}"
-       speed="${7:-fast}"
-       sflags="${8:-""}"
+       local listener_ns="$1"
+       local connector_ns="$2"
+       local connect_addr="$3"
+       local test_linkfail="${4:-0}"
+       local addr_nr_ns1="${5:-0}"
+       local addr_nr_ns2="${6:-0}"
+       local speed="${7:-fast}"
+       local sflags="${8:-""}"
+
+       local size
 
        # The values above 2 are reused to make test files
        # with the given sizes (KB)
@@ -722,14 +914,14 @@ run_tests()
                make_file "$cinfail" "client" $size
        # create the input file for the failure test when
        # the first failure test run
-       elif [ "$test_linkfail" -ne 0 -a -z "$cinfail" ]; then
+       elif [ "$test_linkfail" -ne 0 ] && [ -z "$cinfail" ]; then
                # the client file must be considerably larger
                # of the maximum expected cwin value, or the
                # link utilization will be not predicable
                size=$((RANDOM%2))
                size=$((size+1))
                size=$((size*8192))
-               size=$((size + ( $RANDOM % 8192) ))
+               size=$((size + ( RANDOM % 8192) ))
 
                cinfail=$(mktemp)
                make_file "$cinfail" "client" $size
@@ -742,7 +934,7 @@ run_tests()
                        sinfail=$(mktemp)
                fi
                make_file "$sinfail" "server" $size
-       elif [ "$test_linkfail" -eq 2 -a -z "$sinfail" ]; then
+       elif [ "$test_linkfail" -eq 2 ] && [ -z "$sinfail" ]; then
                size=$((RANDOM%16))
                size=$((size+1))
                size=$((size*2048))
@@ -765,9 +957,8 @@ dump_stats()
 
 chk_csum_nr()
 {
-       local msg=${1:-""}
-       local csum_ns1=${2:-0}
-       local csum_ns2=${3:-0}
+       local csum_ns1=${1:-0}
+       local csum_ns2=${2:-0}
        local count
        local dump_stats
        local allow_multi_errors_ns1=0
@@ -782,29 +973,24 @@ chk_csum_nr()
                csum_ns2=${csum_ns2:1}
        fi
 
-       if [ ! -z "$msg" ]; then
-               printf "%03u" "$TEST_COUNT"
-       else
-               echo -n "   "
-       fi
-       printf " %-36s %s" "$msg" "sum"
-       count=`ip netns exec $ns1 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}'`
+       printf "%-${nr_blank}s %s" " " "sum"
+       count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}')
        [ -z "$count" ] && count=0
-       if [ "$count" != $csum_ns1 -a $allow_multi_errors_ns1 -eq 0 ] ||
-          [ "$count" -lt $csum_ns1 -a $allow_multi_errors_ns1 -eq 1 ]; then
+       if { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } ||
+          { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
                echo "[fail] got $count data checksum error[s] expected $csum_ns1"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
        fi
        echo -n " - csum  "
-       count=`ip netns exec $ns2 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}'`
+       count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}')
        [ -z "$count" ] && count=0
-       if [ "$count" != $csum_ns2 -a $allow_multi_errors_ns2 -eq 0 ] ||
-          [ "$count" -lt $csum_ns2 -a $allow_multi_errors_ns2 -eq 1 ]; then
+       if { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } ||
+          { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
                echo "[fail] got $count data checksum error[s] expected $csum_ns2"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo "[ ok ]"
@@ -820,22 +1006,22 @@ chk_fail_nr()
        local dump_stats
 
        printf "%-${nr_blank}s %s" " " "ftx"
-       count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPFailTx | awk '{print $2}'`
+       count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPFailTx | awk '{print $2}')
        [ -z "$count" ] && count=0
        if [ "$count" != "$fail_tx" ]; then
                echo "[fail] got $count MP_FAIL[s] TX expected $fail_tx"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
        fi
 
        echo -n " - failrx"
-       count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPFailRx | awk '{print $2}'`
+       count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtMPFailRx | awk '{print $2}')
        [ -z "$count" ] && count=0
        if [ "$count" != "$fail_rx" ]; then
                echo "[fail] got $count MP_FAIL[s] RX expected $fail_rx"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo "[ ok ]"
@@ -856,7 +1042,7 @@ chk_fclose_nr()
        [ -z "$count" ] && count=0
        if [ "$count" != "$fclose_tx" ]; then
                echo "[fail] got $count MP_FASTCLOSE[s] TX expected $fclose_tx"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
@@ -867,7 +1053,7 @@ chk_fclose_nr()
        [ -z "$count" ] && count=0
        if [ "$count" != "$fclose_rx" ]; then
                echo "[fail] got $count MP_FASTCLOSE[s] RX expected $fclose_rx"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo "[ ok ]"
@@ -898,7 +1084,7 @@ chk_rst_nr()
        [ -z "$count" ] && count=0
        if [ "$count" != "$rst_tx" ]; then
                echo "[fail] got $count MP_RST[s] TX expected $rst_tx"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
@@ -909,7 +1095,7 @@ chk_rst_nr()
        [ -z "$count" ] && count=0
        if [ "$count" != "$rst_rx" ]; then
                echo "[fail] got $count MP_RST[s] RX expected $rst_rx"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
@@ -922,32 +1108,37 @@ chk_rst_nr()
 
 chk_join_nr()
 {
-       local msg="$1"
-       local syn_nr=$2
-       local syn_ack_nr=$3
-       local ack_nr=$4
-       local csum_ns1=${5:-0}
-       local csum_ns2=${6:-0}
-       local fail_nr=${7:-0}
-       local rst_nr=${8:-0}
+       local syn_nr=$1
+       local syn_ack_nr=$2
+       local ack_nr=$3
+       local csum_ns1=${4:-0}
+       local csum_ns2=${5:-0}
+       local fail_nr=${6:-0}
+       local rst_nr=${7:-0}
+       local corrupted_pkts=${8:-0}
        local count
        local dump_stats
        local with_cookie
+       local title="${TEST_NAME}"
 
-       printf "%03u %-36s %s" "$TEST_COUNT" "$msg" "syn"
-       count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'`
+       if [ "${corrupted_pkts}" -gt 0 ]; then
+               title+=": ${corrupted_pkts} corrupted pkts"
+       fi
+
+       printf "%03u %-36s %s" "${TEST_COUNT}" "${title}" "syn"
+       count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}')
        [ -z "$count" ] && count=0
        if [ "$count" != "$syn_nr" ]; then
                echo "[fail] got $count JOIN[s] syn expected $syn_nr"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
        fi
 
        echo -n " - synack"
-       with_cookie=`ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies`
-       count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}'`
+       with_cookie=$(ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies)
+       count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}')
        [ -z "$count" ] && count=0
        if [ "$count" != "$syn_ack_nr" ]; then
                # simult connections exceeding the limit with cookie enabled could go up to
@@ -957,7 +1148,7 @@ chk_join_nr()
                        echo -n "[ ok ]"
                else
                        echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
-                       ret=1
+                       fail_test
                        dump_stats=1
                fi
        else
@@ -965,18 +1156,18 @@ chk_join_nr()
        fi
 
        echo -n " - ack"
-       count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinAckRx | awk '{print $2}'`
+       count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinAckRx | awk '{print $2}')
        [ -z "$count" ] && count=0
        if [ "$count" != "$ack_nr" ]; then
                echo "[fail] got $count JOIN[s] ack expected $ack_nr"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo "[ ok ]"
        fi
        [ "${dump_stats}" = 1 ] && dump_stats
        if [ $checksum -eq 1 ]; then
-               chk_csum_nr "" $csum_ns1 $csum_ns2
+               chk_csum_nr $csum_ns1 $csum_ns2
                chk_fail_nr $fail_nr $fail_nr
                chk_rst_nr $rst_nr $rst_nr
        fi
@@ -998,18 +1189,18 @@ chk_stale_nr()
        local recover_nr
 
        printf "%-${nr_blank}s %-18s" " " "stale"
-       stale_nr=`ip netns exec $ns nstat -as | grep MPTcpExtSubflowStale | awk '{print $2}'`
+       stale_nr=$(ip netns exec $ns nstat -as | grep MPTcpExtSubflowStale | awk '{print $2}')
        [ -z "$stale_nr" ] && stale_nr=0
-       recover_nr=`ip netns exec $ns nstat -as | grep MPTcpExtSubflowRecover | awk '{print $2}'`
+       recover_nr=$(ip netns exec $ns nstat -as | grep MPTcpExtSubflowRecover | awk '{print $2}')
        [ -z "$recover_nr" ] && recover_nr=0
 
        if [ $stale_nr -lt $stale_min ] ||
-          [ $stale_max -gt 0 -a $stale_nr -gt $stale_max ] ||
-          [ $((stale_nr - $recover_nr)) -ne $stale_delta ]; then
+          { [ $stale_max -gt 0 ] && [ $stale_nr -gt $stale_max ]; } ||
+          [ $((stale_nr - recover_nr)) -ne $stale_delta ]; then
                echo "[fail] got $stale_nr stale[s] $recover_nr recover[s], " \
                     " expected stale in range [$stale_min..$stale_max]," \
                     " stale-recover delta $stale_delta "
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo "[ ok ]"
@@ -1036,28 +1227,28 @@ chk_add_nr()
        local dump_stats
        local timeout
 
-       timeout=`ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout`
+       timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
 
        printf "%-${nr_blank}s %s" " " "add"
-       count=`ip netns exec $ns2 nstat -as MPTcpExtAddAddr | grep MPTcpExtAddAddr | awk '{print $2}'`
+       count=$(ip netns exec $ns2 nstat -as MPTcpExtAddAddr | grep MPTcpExtAddAddr | awk '{print $2}')
        [ -z "$count" ] && count=0
 
        # if the test configured a short timeout tolerate greater then expected
        # add addrs options, due to retransmissions
-       if [ "$count" != "$add_nr" ] && [ "$timeout" -gt 1 -o "$count" -lt "$add_nr" ]; then
+       if [ "$count" != "$add_nr" ] && { [ "$timeout" -gt 1 ] || [ "$count" -lt "$add_nr" ]; }; then
                echo "[fail] got $count ADD_ADDR[s] expected $add_nr"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
        fi
 
        echo -n " - echo  "
-       count=`ip netns exec $ns1 nstat -as | grep MPTcpExtEchoAdd | awk '{print $2}'`
+       count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtEchoAdd | awk '{print $2}')
        [ -z "$count" ] && count=0
        if [ "$count" != "$echo_nr" ]; then
                echo "[fail] got $count ADD_ADDR echo[s] expected $echo_nr"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
@@ -1065,76 +1256,76 @@ chk_add_nr()
 
        if [ $port_nr -gt 0 ]; then
                echo -n " - pt "
-               count=`ip netns exec $ns2 nstat -as | grep MPTcpExtPortAdd | awk '{print $2}'`
+               count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtPortAdd | awk '{print $2}')
                [ -z "$count" ] && count=0
                if [ "$count" != "$port_nr" ]; then
                        echo "[fail] got $count ADD_ADDR[s] with a port-number expected $port_nr"
-                       ret=1
+                       fail_test
                        dump_stats=1
                else
                        echo "[ ok ]"
                fi
 
                printf "%-${nr_blank}s %s" " " "syn"
-               count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinPortSynRx |
-                       awk '{print $2}'`
+               count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinPortSynRx |
+                       awk '{print $2}')
                [ -z "$count" ] && count=0
                if [ "$count" != "$syn_nr" ]; then
                        echo "[fail] got $count JOIN[s] syn with a different \
                                port-number expected $syn_nr"
-                       ret=1
+                       fail_test
                        dump_stats=1
                else
                        echo -n "[ ok ]"
                fi
 
                echo -n " - synack"
-               count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinPortSynAckRx |
-                       awk '{print $2}'`
+               count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinPortSynAckRx |
+                       awk '{print $2}')
                [ -z "$count" ] && count=0
                if [ "$count" != "$syn_ack_nr" ]; then
                        echo "[fail] got $count JOIN[s] synack with a different \
                                port-number expected $syn_ack_nr"
-                       ret=1
+                       fail_test
                        dump_stats=1
                else
                        echo -n "[ ok ]"
                fi
 
                echo -n " - ack"
-               count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinPortAckRx |
-                       awk '{print $2}'`
+               count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinPortAckRx |
+                       awk '{print $2}')
                [ -z "$count" ] && count=0
                if [ "$count" != "$ack_nr" ]; then
                        echo "[fail] got $count JOIN[s] ack with a different \
                                port-number expected $ack_nr"
-                       ret=1
+                       fail_test
                        dump_stats=1
                else
                        echo "[ ok ]"
                fi
 
                printf "%-${nr_blank}s %s" " " "syn"
-               count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMismatchPortSynRx |
-                       awk '{print $2}'`
+               count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMismatchPortSynRx |
+                       awk '{print $2}')
                [ -z "$count" ] && count=0
                if [ "$count" != "$mis_syn_nr" ]; then
                        echo "[fail] got $count JOIN[s] syn with a mismatched \
                                port-number expected $mis_syn_nr"
-                       ret=1
+                       fail_test
                        dump_stats=1
                else
                        echo -n "[ ok ]"
                fi
 
                echo -n " - ack   "
-               count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMismatchPortAckRx |
-                       awk '{print $2}'`
+               count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMismatchPortAckRx |
+                       awk '{print $2}')
                [ -z "$count" ] && count=0
                if [ "$count" != "$mis_ack_nr" ]; then
                        echo "[fail] got $count JOIN[s] ack with a mismatched \
                                port-number expected $mis_ack_nr"
-                       ret=1
+                       fail_test
                        dump_stats=1
                else
                        echo "[ ok ]"
@@ -1150,36 +1341,67 @@ chk_rm_nr()
 {
        local rm_addr_nr=$1
        local rm_subflow_nr=$2
-       local invert=${3:-""}
+       local invert
+       local simult
        local count
        local dump_stats
        local addr_ns=$ns1
        local subflow_ns=$ns2
        local extra_msg=""
 
-       if [[ $invert = "invert" ]]; then
+       shift 2
+       while [ -n "$1" ]; do
+               [ "$1" = "invert" ] && invert=true
+               [ "$1" = "simult" ] && simult=true
+               shift
+       done
+
+       if [ -z $invert ]; then
+               addr_ns=$ns1
+               subflow_ns=$ns2
+       elif [ $invert = "true" ]; then
                addr_ns=$ns2
                subflow_ns=$ns1
                extra_msg="   invert"
        fi
 
        printf "%-${nr_blank}s %s" " " "rm "
-       count=`ip netns exec $addr_ns nstat -as | grep MPTcpExtRmAddr | awk '{print $2}'`
+       count=$(ip netns exec $addr_ns nstat -as | grep MPTcpExtRmAddr | awk '{print $2}')
        [ -z "$count" ] && count=0
        if [ "$count" != "$rm_addr_nr" ]; then
                echo "[fail] got $count RM_ADDR[s] expected $rm_addr_nr"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
        fi
 
        echo -n " - rmsf  "
-       count=`ip netns exec $subflow_ns nstat -as | grep MPTcpExtRmSubflow | awk '{print $2}'`
+       count=$(ip netns exec $subflow_ns nstat -as | grep MPTcpExtRmSubflow | awk '{print $2}')
        [ -z "$count" ] && count=0
+       if [ -n "$simult" ]; then
+               local cnt suffix
+
+               cnt=$(ip netns exec $addr_ns nstat -as | grep MPTcpExtRmSubflow | awk '{print $2}')
+
+               # in case of simult flush, the subflow removal count on each side is
+               # unreliable
+               [ -z "$cnt" ] && cnt=0
+               count=$((count + cnt))
+               [ "$count" != "$rm_subflow_nr" ] && suffix="$count in [$rm_subflow_nr:$((rm_subflow_nr*2))]"
+               if [ $count -ge "$rm_subflow_nr" ] && \
+                  [ "$count" -le "$((rm_subflow_nr *2 ))" ]; then
+                       echo "[ ok ] $suffix"
+               else
+                       echo "[fail] got $count RM_SUBFLOW[s] expected in range [$rm_subflow_nr:$((rm_subflow_nr*2))]"
+                       fail_test
+                       dump_stats=1
+               fi
+               return
+       fi
        if [ "$count" != "$rm_subflow_nr" ]; then
                echo "[fail] got $count RM_SUBFLOW[s] expected $rm_subflow_nr"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
@@ -1198,22 +1420,22 @@ chk_prio_nr()
        local dump_stats
 
        printf "%-${nr_blank}s %s" " " "ptx"
-       count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPPrioTx | awk '{print $2}'`
+       count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPPrioTx | awk '{print $2}')
        [ -z "$count" ] && count=0
        if [ "$count" != "$mp_prio_nr_tx" ]; then
                echo "[fail] got $count MP_PRIO[s] TX expected $mp_prio_nr_tx"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo -n "[ ok ]"
        fi
 
        echo -n " - prx   "
-       count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPPrioRx | awk '{print $2}'`
+       count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPPrioRx | awk '{print $2}')
        [ -z "$count" ] && count=0
        if [ "$count" != "$mp_prio_nr_rx" ]; then
                echo "[fail] got $count MP_PRIO[s] RX expected $mp_prio_nr_rx"
-               ret=1
+               fail_test
                dump_stats=1
        else
                echo "[ ok ]"
@@ -1228,29 +1450,33 @@ chk_link_usage()
        local link=$2
        local out=$3
        local expected_rate=$4
-       local tx_link=`ip netns exec $ns cat /sys/class/net/$link/statistics/tx_bytes`
-       local tx_total=`ls -l $out | awk '{print $5}'`
-       local tx_rate=$((tx_link * 100 / $tx_total))
+
+       local tx_link tx_total
+       tx_link=$(ip netns exec $ns cat /sys/class/net/$link/statistics/tx_bytes)
+       tx_total=$(stat --format=%s $out)
+       local tx_rate=$((tx_link * 100 / tx_total))
        local tolerance=5
 
        printf "%-${nr_blank}s %-18s" " " "link usage"
-       if [ $tx_rate -lt $((expected_rate - $tolerance)) -o \
-            $tx_rate -gt $((expected_rate + $tolerance)) ]; then
+       if [ $tx_rate -lt $((expected_rate - tolerance)) ] || \
+          [ $tx_rate -gt $((expected_rate + tolerance)) ]; then
                echo "[fail] got $tx_rate% usage, expected $expected_rate%"
-               ret=1
+               fail_test
        else
                echo "[ ok ]"
        fi
 }
 
-wait_for_tw()
+wait_attempt_fail()
 {
        local timeout_ms=$((timeout_poll * 1000))
        local time=0
        local ns=$1
 
        while [ $time -lt $timeout_ms ]; do
-               local cnt=$(ip netns exec $ns nstat -as TcpAttemptFails | grep TcpAttemptFails | awk '{print $2}')
+               local cnt
+
+               cnt=$(ip netns exec $ns nstat -as TcpAttemptFails | grep TcpAttemptFails | awk '{print $2}')
 
                [ "$cnt" = 1 ] && return 1
                time=$((time + 100))
@@ -1261,887 +1487,968 @@ wait_for_tw()
 
 subflows_tests()
 {
-       reset
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "no JOIN" "0" "0" "0"
+       if reset "no JOIN"; then
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 0 0 0
+       fi
 
        # subflow limited by client
-       reset
-       pm_nl_set_limits $ns1 0 0
-       pm_nl_set_limits $ns2 0 0
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "single subflow, limited by client" 0 0 0
+       if reset "single subflow, limited by client"; then
+               pm_nl_set_limits $ns1 0 0
+               pm_nl_set_limits $ns2 0 0
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 0 0 0
+       fi
 
        # subflow limited by server
-       reset
-       pm_nl_set_limits $ns1 0 0
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "single subflow, limited by server" 1 1 0
+       if reset "single subflow, limited by server"; then
+               pm_nl_set_limits $ns1 0 0
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 0
+       fi
 
        # subflow
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "single subflow" 1 1 1
+       if reset "single subflow"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+       fi
 
        # multiple subflows
-       reset
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 0 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "multiple subflows" 2 2 2
+       if reset "multiple subflows"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 0 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 2 2 2
+       fi
 
        # multiple subflows limited by server
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "multiple subflows, limited by server" 2 2 1
+       if reset "multiple subflows, limited by server"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 2 2 1
+       fi
 
        # single subflow, dev
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow dev ns2eth3
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "single subflow, dev" 1 1 1
+       if reset "single subflow, dev"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow dev ns2eth3
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+       fi
 }
 
 subflows_error_tests()
 {
        # If a single subflow is configured, and matches the MPC src
        # address, no additional subflow should be created
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
-       chk_join_nr "no MPC reuse with single endpoint" 0 0 0
+       if reset "no MPC reuse with single endpoint"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+               chk_join_nr 0 0 0
+       fi
 
        # multiple subflows, with subflow creation error
-       reset
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 0 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
-       ip netns exec $ns1 iptables -A INPUT -s 10.0.3.2 -p tcp -j REJECT
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
-       chk_join_nr "multi subflows, with failing subflow" 1 1 1
+       if reset "multi subflows, with failing subflow"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 0 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+               filter_tcp_from $ns1 10.0.3.2 REJECT
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+               chk_join_nr 1 1 1
+       fi
 
        # multiple subflows, with subflow timeout on MPJ
-       reset
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 0 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
-       ip netns exec $ns1 iptables -A INPUT -s 10.0.3.2 -p tcp -j DROP
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
-       chk_join_nr "multi subflows, with subflow timeout" 1 1 1
+       if reset "multi subflows, with subflow timeout"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 0 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+               filter_tcp_from $ns1 10.0.3.2 DROP
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+               chk_join_nr 1 1 1
+       fi
 
        # multiple subflows, check that the endpoint corresponding to
        # closed subflow (due to reset) is not reused if additional
        # subflows are added later
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       ip netns exec $ns1 iptables -A INPUT -s 10.0.3.2 -p tcp -j REJECT
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow &
-
-       # updates in the child shell do not have any effect here, we
-       # need to bump the test counter for the above case
-       TEST_COUNT=$((TEST_COUNT+1))
-
-       # mpj subflow will be in TW after the reset
-       wait_for_tw $ns2
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
-       wait
-
-       # additional subflow could be created only if the PM select
-       # the later endpoint, skipping the already used one
-       chk_join_nr "multi subflows, fair usage on close" 1 1 1
+       if reset "multi subflows, fair usage on close"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               filter_tcp_from $ns1 10.0.3.2 REJECT
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow &
+
+               # mpj subflow will be in TW after the reset
+               wait_attempt_fail $ns2
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+               wait
+
+               # additional subflow could be created only if the PM select
+               # the later endpoint, skipping the already used one
+               chk_join_nr 1 1 1
+       fi
 }
 
 signal_address_tests()
 {
        # add_address, unused
-       reset
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "unused signal address" 0 0 0
-       chk_add_nr 1 1
+       if reset "unused signal address"; then
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 0 0 0
+               chk_add_nr 1 1
+       fi
 
        # accept and use add_addr
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "signal address" 1 1 1
-       chk_add_nr 1 1
+       if reset "signal address"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+       fi
 
        # accept and use add_addr with an additional subflow
        # note: signal address in server ns and local addresses in client ns must
        # belong to different subnets or one of the listed local address could be
        # used for 'add_addr' subflow
-       reset
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 1 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "subflow and signal" 2 2 2
-       chk_add_nr 1 1
+       if reset "subflow and signal"; then
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 1 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 2 2 2
+               chk_add_nr 1 1
+       fi
 
        # accept and use add_addr with additional subflows
-       reset
-       pm_nl_set_limits $ns1 0 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_set_limits $ns2 1 3
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "multiple subflows and signal" 3 3 3
-       chk_add_nr 1 1
+       if reset "multiple subflows and signal"; then
+               pm_nl_set_limits $ns1 0 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_set_limits $ns2 1 3
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 3 3 3
+               chk_add_nr 1 1
+       fi
 
        # signal addresses
-       reset
-       pm_nl_set_limits $ns1 3 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
-       pm_nl_set_limits $ns2 3 3
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "signal addresses" 3 3 3
-       chk_add_nr 3 3
+       if reset "signal addresses"; then
+               pm_nl_set_limits $ns1 3 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
+               pm_nl_set_limits $ns2 3 3
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 3 3 3
+               chk_add_nr 3 3
+       fi
 
        # signal invalid addresses
-       reset
-       pm_nl_set_limits $ns1 3 3
-       pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
-       pm_nl_set_limits $ns2 3 3
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "signal invalid addresses" 1 1 1
-       chk_add_nr 3 3
+       if reset "signal invalid addresses"; then
+               pm_nl_set_limits $ns1 3 3
+               pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
+               pm_nl_set_limits $ns2 3 3
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+               chk_add_nr 3 3
+       fi
 
        # signal addresses race test
-       reset
-       pm_nl_set_limits $ns1 4 4
-       pm_nl_set_limits $ns2 4 4
-       pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
-       pm_nl_add_endpoint $ns2 10.0.1.2 flags signal
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags signal
-       pm_nl_add_endpoint $ns2 10.0.4.2 flags signal
-
-       # the peer could possibly miss some addr notification, allow retransmission
-       ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
-       chk_join_nr "signal addresses race test" 3 3 3
-
-       # the server will not signal the address terminating
-       # the MPC subflow
-       chk_add_nr 3 3
+       if reset "signal addresses race test"; then
+               pm_nl_set_limits $ns1 4 4
+               pm_nl_set_limits $ns2 4 4
+               pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
+               pm_nl_add_endpoint $ns2 10.0.1.2 flags signal
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags signal
+               pm_nl_add_endpoint $ns2 10.0.4.2 flags signal
+
+               # the peer could possibly miss some addr notification, allow retransmission
+               ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+               chk_join_nr 3 3 3
+
+               # the server will not signal the address terminating
+               # the MPC subflow
+               chk_add_nr 3 3
+       fi
 }
 
 link_failure_tests()
 {
        # accept and use add_addr with additional subflows and link loss
-       reset
-
-       # without any b/w limit each veth could spool the packets and get
-       # them acked at xmit time, so that the corresponding subflow will
-       # have almost always no outstanding pkts, the scheduler will pick
-       # always the first subflow and we will have hard time testing
-       # active backup and link switch-over.
-       # Let's set some arbitrary (low) virtual link limits.
-       init_shapers
-       pm_nl_set_limits $ns1 0 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
-       pm_nl_set_limits $ns2 1 3
-       pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.4.2 dev ns2eth4 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 1
-       chk_join_nr "multiple flows, signal, link failure" 3 3 3
-       chk_add_nr 1 1
-       chk_stale_nr $ns2 1 5 1
+       if reset "multiple flows, signal, link failure"; then
+               # without any b/w limit each veth could spool the packets and get
+               # them acked at xmit time, so that the corresponding subflow will
+               # have almost always no outstanding pkts, the scheduler will pick
+               # always the first subflow and we will have hard time testing
+               # active backup and link switch-over.
+               # Let's set some arbitrary (low) virtual link limits.
+               init_shapers
+               pm_nl_set_limits $ns1 0 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
+               pm_nl_set_limits $ns2 1 3
+               pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.4.2 dev ns2eth4 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 1
+               chk_join_nr 3 3 3
+               chk_add_nr 1 1
+               chk_stale_nr $ns2 1 5 1
+       fi
 
        # accept and use add_addr with additional subflows and link loss
        # for bidirectional transfer
-       reset
-       init_shapers
-       pm_nl_set_limits $ns1 0 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
-       pm_nl_set_limits $ns2 1 3
-       pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.4.2 dev ns2eth4 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 2
-       chk_join_nr "multi flows, signal, bidi, link fail" 3 3 3
-       chk_add_nr 1 1
-       chk_stale_nr $ns2 1 -1 1
+       if reset "multi flows, signal, bidi, link fail"; then
+               init_shapers
+               pm_nl_set_limits $ns1 0 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
+               pm_nl_set_limits $ns2 1 3
+               pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.4.2 dev ns2eth4 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 2
+               chk_join_nr 3 3 3
+               chk_add_nr 1 1
+               chk_stale_nr $ns2 1 -1 1
+       fi
 
        # 2 subflows plus 1 backup subflow with a lossy link, backup
        # will never be used
-       reset
-       init_shapers
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
-       pm_nl_set_limits $ns2 1 2
-       export FAILING_LINKS="1"
-       pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
-       run_tests $ns1 $ns2 10.0.1.1 1
-       chk_join_nr "backup subflow unused, link failure" 2 2 2
-       chk_add_nr 1 1
-       chk_link_usage $ns2 ns2eth3 $cinsent 0
+       if reset "backup subflow unused, link failure"; then
+               init_shapers
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
+               pm_nl_set_limits $ns2 1 2
+               FAILING_LINKS="1"
+               pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
+               run_tests $ns1 $ns2 10.0.1.1 1
+               chk_join_nr 2 2 2
+               chk_add_nr 1 1
+               chk_link_usage $ns2 ns2eth3 $cinsent 0
+       fi
 
        # 2 lossy links after half transfer, backup will get half of
        # the traffic
-       reset
-       init_shapers
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
-       pm_nl_set_limits $ns2 1 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
-       export FAILING_LINKS="1 2"
-       run_tests $ns1 $ns2 10.0.1.1 1
-       chk_join_nr "backup flow used, multi links fail" 2 2 2
-       chk_add_nr 1 1
-       chk_stale_nr $ns2 2 4 2
-       chk_link_usage $ns2 ns2eth3 $cinsent 50
+       if reset "backup flow used, multi links fail"; then
+               init_shapers
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
+               pm_nl_set_limits $ns2 1 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
+               FAILING_LINKS="1 2"
+               run_tests $ns1 $ns2 10.0.1.1 1
+               chk_join_nr 2 2 2
+               chk_add_nr 1 1
+               chk_stale_nr $ns2 2 4 2
+               chk_link_usage $ns2 ns2eth3 $cinsent 50
+       fi
 
        # use a backup subflow with the first subflow on a lossy link
        # for bidirectional transfer
-       reset
-       init_shapers
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
-       pm_nl_set_limits $ns2 1 3
-       pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
-       run_tests $ns1 $ns2 10.0.1.1 2
-       chk_join_nr "backup flow used, bidi, link failure" 2 2 2
-       chk_add_nr 1 1
-       chk_stale_nr $ns2 1 -1 2
-       chk_link_usage $ns2 ns2eth3 $cinsent 50
+       if reset "backup flow used, bidi, link failure"; then
+               init_shapers
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
+               pm_nl_set_limits $ns2 1 3
+               pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
+               FAILING_LINKS="1 2"
+               run_tests $ns1 $ns2 10.0.1.1 2
+               chk_join_nr 2 2 2
+               chk_add_nr 1 1
+               chk_stale_nr $ns2 1 -1 2
+               chk_link_usage $ns2 ns2eth3 $cinsent 50
+       fi
 }
 
 add_addr_timeout_tests()
 {
        # add_addr timeout
-       reset_with_add_addr_timeout
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
-       chk_join_nr "signal address, ADD_ADDR timeout" 1 1 1
-       chk_add_nr 4 0
+       if reset_with_add_addr_timeout "signal address, ADD_ADDR timeout"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+               chk_join_nr 1 1 1
+               chk_add_nr 4 0
+       fi
 
        # add_addr timeout IPv6
-       reset_with_add_addr_timeout 6
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
-       run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
-       chk_join_nr "signal address, ADD_ADDR6 timeout" 1 1 1
-       chk_add_nr 4 0
+       if reset_with_add_addr_timeout "signal address, ADD_ADDR6 timeout" 6; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+               run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
+               chk_join_nr 1 1 1
+               chk_add_nr 4 0
+       fi
 
        # signal addresses timeout
-       reset_with_add_addr_timeout
-       pm_nl_set_limits $ns1 2 2
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
-       pm_nl_set_limits $ns2 2 2
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 speed_10
-       chk_join_nr "signal addresses, ADD_ADDR timeout" 2 2 2
-       chk_add_nr 8 0
+       if reset_with_add_addr_timeout "signal addresses, ADD_ADDR timeout"; then
+               pm_nl_set_limits $ns1 2 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+               pm_nl_set_limits $ns2 2 2
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 speed_10
+               chk_join_nr 2 2 2
+               chk_add_nr 8 0
+       fi
 
        # signal invalid addresses timeout
-       reset_with_add_addr_timeout
-       pm_nl_set_limits $ns1 2 2
-       pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
-       pm_nl_set_limits $ns2 2 2
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 speed_10
-       chk_join_nr "invalid address, ADD_ADDR timeout" 1 1 1
-       chk_add_nr 8 0
+       if reset_with_add_addr_timeout "invalid address, ADD_ADDR timeout"; then
+               pm_nl_set_limits $ns1 2 2
+               pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+               pm_nl_set_limits $ns2 2 2
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 speed_10
+               chk_join_nr 1 1 1
+               chk_add_nr 8 0
+       fi
 }
 
 remove_tests()
 {
        # single subflow, remove
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 0 0 -1 slow
-       chk_join_nr "remove single subflow" 1 1 1
-       chk_rm_nr 1 1
+       if reset "remove single subflow"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 0 0 -1 slow
+               chk_join_nr 1 1 1
+               chk_rm_nr 1 1
+       fi
 
        # multiple subflows, remove
-       reset
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 0 2
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 0 0 -2 slow
-       chk_join_nr "remove multiple subflows" 2 2 2
-       chk_rm_nr 2 2
+       if reset "remove multiple subflows"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 0 2
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 0 0 -2 slow
+               chk_join_nr 2 2 2
+               chk_rm_nr 2 2
+       fi
 
        # single address, remove
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_set_limits $ns2 1 1
-       run_tests $ns1 $ns2 10.0.1.1 0 -1 0 slow
-       chk_join_nr "remove single address" 1 1 1
-       chk_add_nr 1 1
-       chk_rm_nr 1 1 invert
+       if reset "remove single address"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_set_limits $ns2 1 1
+               run_tests $ns1 $ns2 10.0.1.1 0 -1 0 slow
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+               chk_rm_nr 1 1 invert
+       fi
 
        # subflow and signal, remove
-       reset
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_set_limits $ns2 1 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 0 -1 -1 slow
-       chk_join_nr "remove subflow and signal" 2 2 2
-       chk_add_nr 1 1
-       chk_rm_nr 1 1
+       if reset "remove subflow and signal"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_set_limits $ns2 1 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 0 -1 -1 slow
+               chk_join_nr 2 2 2
+               chk_add_nr 1 1
+               chk_rm_nr 1 1
+       fi
 
        # subflows and signal, remove
-       reset
-       pm_nl_set_limits $ns1 0 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_set_limits $ns2 1 3
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 0 -1 -2 slow
-       chk_join_nr "remove subflows and signal" 3 3 3
-       chk_add_nr 1 1
-       chk_rm_nr 2 2
+       if reset "remove subflows and signal"; then
+               pm_nl_set_limits $ns1 0 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_set_limits $ns2 1 3
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 0 -1 -2 slow
+               chk_join_nr 3 3 3
+               chk_add_nr 1 1
+               chk_rm_nr 2 2
+       fi
 
        # addresses remove
-       reset
-       pm_nl_set_limits $ns1 3 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal id 250
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
-       pm_nl_set_limits $ns2 3 3
-       run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow
-       chk_join_nr "remove addresses" 3 3 3
-       chk_add_nr 3 3
-       chk_rm_nr 3 3 invert
+       if reset "remove addresses"; then
+               pm_nl_set_limits $ns1 3 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal id 250
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
+               pm_nl_set_limits $ns2 3 3
+               run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow
+               chk_join_nr 3 3 3
+               chk_add_nr 3 3
+               chk_rm_nr 3 3 invert
+       fi
 
        # invalid addresses remove
-       reset
-       pm_nl_set_limits $ns1 3 3
-       pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
-       pm_nl_set_limits $ns2 3 3
-       run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow
-       chk_join_nr "remove invalid addresses" 1 1 1
-       chk_add_nr 3 3
-       chk_rm_nr 3 1 invert
+       if reset "remove invalid addresses"; then
+               pm_nl_set_limits $ns1 3 3
+               pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
+               pm_nl_set_limits $ns2 3 3
+               run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow
+               chk_join_nr 1 1 1
+               chk_add_nr 3 3
+               chk_rm_nr 3 1 invert
+       fi
 
        # subflows and signal, flush
-       reset
-       pm_nl_set_limits $ns1 0 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_set_limits $ns2 1 3
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
-       chk_join_nr "flush subflows and signal" 3 3 3
-       chk_add_nr 1 1
-       chk_rm_nr 2 2
+       if reset "flush subflows and signal"; then
+               pm_nl_set_limits $ns1 0 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_set_limits $ns2 1 3
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
+               chk_join_nr 3 3 3
+               chk_add_nr 1 1
+               chk_rm_nr 1 3 invert simult
+       fi
 
        # subflows flush
-       reset
-       pm_nl_set_limits $ns1 3 3
-       pm_nl_set_limits $ns2 3 3
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow id 150
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
-       chk_join_nr "flush subflows" 3 3 3
-       chk_rm_nr 3 3
+       if reset "flush subflows"; then
+               pm_nl_set_limits $ns1 3 3
+               pm_nl_set_limits $ns2 3 3
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow id 150
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
+               chk_join_nr 3 3 3
+               chk_rm_nr 0 3 simult
+       fi
 
        # addresses flush
-       reset
-       pm_nl_set_limits $ns1 3 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal id 250
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
-       pm_nl_set_limits $ns2 3 3
-       run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
-       chk_join_nr "flush addresses" 3 3 3
-       chk_add_nr 3 3
-       chk_rm_nr 3 3 invert
+       if reset "flush addresses"; then
+               pm_nl_set_limits $ns1 3 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal id 250
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
+               pm_nl_set_limits $ns2 3 3
+               run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
+               chk_join_nr 3 3 3
+               chk_add_nr 3 3
+               chk_rm_nr 3 3 invert simult
+       fi
 
        # invalid addresses flush
-       reset
-       pm_nl_set_limits $ns1 3 3
-       pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
-       pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
-       pm_nl_set_limits $ns2 3 3
-       run_tests $ns1 $ns2 10.0.1.1 0 -8 0 slow
-       chk_join_nr "flush invalid addresses" 1 1 1
-       chk_add_nr 3 3
-       chk_rm_nr 3 1 invert
+       if reset "flush invalid addresses"; then
+               pm_nl_set_limits $ns1 3 3
+               pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+               pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
+               pm_nl_set_limits $ns2 3 3
+               run_tests $ns1 $ns2 10.0.1.1 0 -8 0 slow
+               chk_join_nr 1 1 1
+               chk_add_nr 3 3
+               chk_rm_nr 3 1 invert
+       fi
 
        # remove id 0 subflow
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 0 0 -9 slow
-       chk_join_nr "remove id 0 subflow" 1 1 1
-       chk_rm_nr 1 1
+       if reset "remove id 0 subflow"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 0 0 -9 slow
+               chk_join_nr 1 1 1
+               chk_rm_nr 1 1
+       fi
 
        # remove id 0 address
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_set_limits $ns2 1 1
-       run_tests $ns1 $ns2 10.0.1.1 0 -9 0 slow
-       chk_join_nr "remove id 0 address" 1 1 1
-       chk_add_nr 1 1
-       chk_rm_nr 1 1 invert
+       if reset "remove id 0 address"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_set_limits $ns2 1 1
+               run_tests $ns1 $ns2 10.0.1.1 0 -9 0 slow
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+               chk_rm_nr 1 1 invert
+       fi
 }
 
 add_tests()
 {
        # add single subflow
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       run_tests $ns1 $ns2 10.0.1.1 0 0 1 slow
-       chk_join_nr "add single subflow" 1 1 1
+       if reset "add single subflow"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               run_tests $ns1 $ns2 10.0.1.1 0 0 1 slow
+               chk_join_nr 1 1 1
+       fi
 
        # add signal address
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 1 1
-       run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
-       chk_join_nr "add signal address" 1 1 1
-       chk_add_nr 1 1
+       if reset "add signal address"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 1 1
+               run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+       fi
 
        # add multiple subflows
-       reset
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 0 2
-       run_tests $ns1 $ns2 10.0.1.1 0 0 2 slow
-       chk_join_nr "add multiple subflows" 2 2 2
+       if reset "add multiple subflows"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 0 2
+               run_tests $ns1 $ns2 10.0.1.1 0 0 2 slow
+               chk_join_nr 2 2 2
+       fi
 
        # add multiple subflows IPv6
-       reset
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 0 2
-       run_tests $ns1 $ns2 dead:beef:1::1 0 0 2 slow
-       chk_join_nr "add multiple subflows IPv6" 2 2 2
+       if reset "add multiple subflows IPv6"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 0 2
+               run_tests $ns1 $ns2 dead:beef:1::1 0 0 2 slow
+               chk_join_nr 2 2 2
+       fi
 
        # add multiple addresses IPv6
-       reset
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 2 2
-       run_tests $ns1 $ns2 dead:beef:1::1 0 2 0 slow
-       chk_join_nr "add multiple addresses IPv6" 2 2 2
-       chk_add_nr 2 2
+       if reset "add multiple addresses IPv6"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 2 2
+               run_tests $ns1 $ns2 dead:beef:1::1 0 2 0 slow
+               chk_join_nr 2 2 2
+               chk_add_nr 2 2
+       fi
 }
 
 ipv6_tests()
 {
        # subflow IPv6
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 dead:beef:3::2 dev ns2eth3 flags subflow
-       run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
-       chk_join_nr "single subflow IPv6" 1 1 1
+       if reset "single subflow IPv6"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 dead:beef:3::2 dev ns2eth3 flags subflow
+               run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
+               chk_join_nr 1 1 1
+       fi
 
        # add_address, unused IPv6
-       reset
-       pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
-       run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
-       chk_join_nr "unused signal address IPv6" 0 0 0
-       chk_add_nr 1 1
+       if reset "unused signal address IPv6"; then
+               pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+               run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
+               chk_join_nr 0 0 0
+               chk_add_nr 1 1
+       fi
 
        # signal address IPv6
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
-       pm_nl_set_limits $ns2 1 1
-       run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
-       chk_join_nr "single address IPv6" 1 1 1
-       chk_add_nr 1 1
+       if reset "single address IPv6"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+               pm_nl_set_limits $ns2 1 1
+               run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+       fi
 
        # single address IPv6, remove
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
-       pm_nl_set_limits $ns2 1 1
-       run_tests $ns1 $ns2 dead:beef:1::1 0 -1 0 slow
-       chk_join_nr "remove single address IPv6" 1 1 1
-       chk_add_nr 1 1
-       chk_rm_nr 1 1 invert
+       if reset "remove single address IPv6"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+               pm_nl_set_limits $ns2 1 1
+               run_tests $ns1 $ns2 dead:beef:1::1 0 -1 0 slow
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+               chk_rm_nr 1 1 invert
+       fi
 
        # subflow and signal IPv6, remove
-       reset
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
-       pm_nl_set_limits $ns2 1 2
-       pm_nl_add_endpoint $ns2 dead:beef:3::2 dev ns2eth3 flags subflow
-       run_tests $ns1 $ns2 dead:beef:1::1 0 -1 -1 slow
-       chk_join_nr "remove subflow and signal IPv6" 2 2 2
-       chk_add_nr 1 1
-       chk_rm_nr 1 1
+       if reset "remove subflow and signal IPv6"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+               pm_nl_set_limits $ns2 1 2
+               pm_nl_add_endpoint $ns2 dead:beef:3::2 dev ns2eth3 flags subflow
+               run_tests $ns1 $ns2 dead:beef:1::1 0 -1 -1 slow
+               chk_join_nr 2 2 2
+               chk_add_nr 1 1
+               chk_rm_nr 1 1
+       fi
 }
 
 v4mapped_tests()
 {
        # subflow IPv4-mapped to IPv4-mapped
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 "::ffff:10.0.3.2" flags subflow
-       run_tests $ns1 $ns2 "::ffff:10.0.1.1"
-       chk_join_nr "single subflow IPv4-mapped" 1 1 1
+       if reset "single subflow IPv4-mapped"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 "::ffff:10.0.3.2" flags subflow
+               run_tests $ns1 $ns2 "::ffff:10.0.1.1"
+               chk_join_nr 1 1 1
+       fi
 
        # signal address IPv4-mapped with IPv4-mapped sk
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns1 "::ffff:10.0.2.1" flags signal
-       run_tests $ns1 $ns2 "::ffff:10.0.1.1"
-       chk_join_nr "signal address IPv4-mapped" 1 1 1
-       chk_add_nr 1 1
+       if reset "signal address IPv4-mapped"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns1 "::ffff:10.0.2.1" flags signal
+               run_tests $ns1 $ns2 "::ffff:10.0.1.1"
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+       fi
 
        # subflow v4-map-v6
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 "::ffff:10.0.1.1"
-       chk_join_nr "single subflow v4-map-v6" 1 1 1
+       if reset "single subflow v4-map-v6"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 "::ffff:10.0.1.1"
+               chk_join_nr 1 1 1
+       fi
 
        # signal address v4-map-v6
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       run_tests $ns1 $ns2 "::ffff:10.0.1.1"
-       chk_join_nr "signal address v4-map-v6" 1 1 1
-       chk_add_nr 1 1
+       if reset "signal address v4-map-v6"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 "::ffff:10.0.1.1"
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+       fi
 
        # subflow v6-map-v4
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 "::ffff:10.0.3.2" flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "single subflow v6-map-v4" 1 1 1
+       if reset "single subflow v6-map-v4"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 "::ffff:10.0.3.2" flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+       fi
 
        # signal address v6-map-v4
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns1 "::ffff:10.0.2.1" flags signal
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "signal address v6-map-v4" 1 1 1
-       chk_add_nr 1 1
+       if reset "signal address v6-map-v4"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns1 "::ffff:10.0.2.1" flags signal
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+       fi
 
        # no subflow IPv6 to v4 address
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 dead:beef:2::2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "no JOIN with diff families v4-v6" 0 0 0
+       if reset "no JOIN with diff families v4-v6"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 dead:beef:2::2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 0 0 0
+       fi
 
        # no subflow IPv6 to v4 address even if v6 has a valid v4 at the end
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 dead:beef:2::10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "no JOIN with diff families v4-v6-2" 0 0 0
+       if reset "no JOIN with diff families v4-v6-2"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 dead:beef:2::10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 0 0 0
+       fi
 
        # no subflow IPv4 to v6 address, no need to slow down too then
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 dead:beef:1::1
-       chk_join_nr "no JOIN with diff families v6-v4" 0 0 0
+       if reset "no JOIN with diff families v6-v4"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 dead:beef:1::1
+               chk_join_nr 0 0 0
+       fi
 }
 
 backup_tests()
 {
        # single subflow, backup
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow nobackup
-       chk_join_nr "single subflow, backup" 1 1 1
-       chk_prio_nr 0 1
+       if reset "single subflow, backup"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow nobackup
+               chk_join_nr 1 1 1
+               chk_prio_nr 0 1
+       fi
 
        # single address, backup
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_set_limits $ns2 1 1
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
-       chk_join_nr "single address, backup" 1 1 1
-       chk_add_nr 1 1
-       chk_prio_nr 1 0
+       if reset "single address, backup"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_set_limits $ns2 1 1
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+               chk_prio_nr 1 1
+       fi
 
        # single address with port, backup
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
-       pm_nl_set_limits $ns2 1 1
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
-       chk_join_nr "single address with port, backup" 1 1 1
-       chk_add_nr 1 1
-       chk_prio_nr 1 0
+       if reset "single address with port, backup"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+               pm_nl_set_limits $ns2 1 1
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+               chk_prio_nr 1 1
+       fi
 }
 
 add_addr_ports_tests()
 {
        # signal address with port
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "signal address with port" 1 1 1
-       chk_add_nr 1 1 1
+       if reset "signal address with port"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1 1
+       fi
 
        # subflow and signal with port
-       reset
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 1 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "subflow and signal with port" 2 2 2
-       chk_add_nr 1 1 1
+       if reset "subflow and signal with port"; then
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 1 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 2 2 2
+               chk_add_nr 1 1 1
+       fi
 
        # single address with port, remove
-       reset
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
-       pm_nl_set_limits $ns2 1 1
-       run_tests $ns1 $ns2 10.0.1.1 0 -1 0 slow
-       chk_join_nr "remove single address with port" 1 1 1
-       chk_add_nr 1 1 1
-       chk_rm_nr 1 1 invert
+       if reset "remove single address with port"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+               pm_nl_set_limits $ns2 1 1
+               run_tests $ns1 $ns2 10.0.1.1 0 -1 0 slow
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1 1
+               chk_rm_nr 1 1 invert
+       fi
 
        # subflow and signal with port, remove
-       reset
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
-       pm_nl_set_limits $ns2 1 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 0 -1 -1 slow
-       chk_join_nr "remove subflow and signal with port" 2 2 2
-       chk_add_nr 1 1 1
-       chk_rm_nr 1 1
+       if reset "remove subflow and signal with port"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+               pm_nl_set_limits $ns2 1 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 0 -1 -1 slow
+               chk_join_nr 2 2 2
+               chk_add_nr 1 1 1
+               chk_rm_nr 1 1
+       fi
 
        # subflows and signal with port, flush
-       reset
-       pm_nl_set_limits $ns1 0 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
-       pm_nl_set_limits $ns2 1 3
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1 0 -8 -2 slow
-       chk_join_nr "flush subflows and signal with port" 3 3 3
-       chk_add_nr 1 1
-       chk_rm_nr 2 2
+       if reset "flush subflows and signal with port"; then
+               pm_nl_set_limits $ns1 0 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+               pm_nl_set_limits $ns2 1 3
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1 0 -8 -2 slow
+               chk_join_nr 3 3 3
+               chk_add_nr 1 1
+               chk_rm_nr 1 3 invert simult
+       fi
 
        # multiple addresses with port
-       reset
-       pm_nl_set_limits $ns1 2 2
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal port 10100
-       pm_nl_set_limits $ns2 2 2
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "multiple addresses with port" 2 2 2
-       chk_add_nr 2 2 2
+       if reset "multiple addresses with port"; then
+               pm_nl_set_limits $ns1 2 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal port 10100
+               pm_nl_set_limits $ns2 2 2
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 2 2 2
+               chk_add_nr 2 2 2
+       fi
 
        # multiple addresses with ports
-       reset
-       pm_nl_set_limits $ns1 2 2
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
-       pm_nl_add_endpoint $ns1 10.0.3.1 flags signal port 10101
-       pm_nl_set_limits $ns2 2 2
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "multiple addresses with ports" 2 2 2
-       chk_add_nr 2 2 2
+       if reset "multiple addresses with ports"; then
+               pm_nl_set_limits $ns1 2 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+               pm_nl_add_endpoint $ns1 10.0.3.1 flags signal port 10101
+               pm_nl_set_limits $ns2 2 2
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 2 2 2
+               chk_add_nr 2 2 2
+       fi
 }
 
 syncookies_tests()
 {
        # single subflow, syncookies
-       reset_with_cookies
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "single subflow with syn cookies" 1 1 1
+       if reset_with_cookies "single subflow with syn cookies"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+       fi
 
        # multiple subflows with syn cookies
-       reset_with_cookies
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 0 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "multiple subflows with syn cookies" 2 2 2
+       if reset_with_cookies "multiple subflows with syn cookies"; then
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 0 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 2 2 2
+       fi
 
        # multiple subflows limited by server
-       reset_with_cookies
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "subflows limited by server w cookies" 2 1 1
+       if reset_with_cookies "subflows limited by server w cookies"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 2 1 1
+       fi
 
        # test signal address with cookies
-       reset_with_cookies
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "signal address with syn cookies" 1 1 1
-       chk_add_nr 1 1
+       if reset_with_cookies "signal address with syn cookies"; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+       fi
 
        # test cookie with subflow and signal
-       reset_with_cookies
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_set_limits $ns1 0 2
-       pm_nl_set_limits $ns2 1 2
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "subflow and signal w cookies" 2 2 2
-       chk_add_nr 1 1
+       if reset_with_cookies "subflow and signal w cookies"; then
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_set_limits $ns1 0 2
+               pm_nl_set_limits $ns2 1 2
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 2 2 2
+               chk_add_nr 1 1
+       fi
 
        # accept and use add_addr with additional subflows
-       reset_with_cookies
-       pm_nl_set_limits $ns1 0 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_set_limits $ns2 1 3
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "subflows and signal w. cookies" 3 3 3
-       chk_add_nr 1 1
+       if reset_with_cookies "subflows and signal w. cookies"; then
+               pm_nl_set_limits $ns1 0 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_set_limits $ns2 1 3
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 3 3 3
+               chk_add_nr 1 1
+       fi
 }
 
 checksum_tests()
 {
        # checksum test 0 0
-       reset_with_checksum 0 0
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_csum_nr "checksum test 0 0"
+       if reset_with_checksum 0 0; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 0 0 0
+       fi
 
        # checksum test 1 1
-       reset_with_checksum 1 1
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_csum_nr "checksum test 1 1"
+       if reset_with_checksum 1 1; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 0 0 0
+       fi
 
        # checksum test 0 1
-       reset_with_checksum 0 1
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_csum_nr "checksum test 0 1"
+       if reset_with_checksum 0 1; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 0 0 0
+       fi
 
        # checksum test 1 0
-       reset_with_checksum 1 0
-       pm_nl_set_limits $ns1 0 1
-       pm_nl_set_limits $ns2 0 1
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_csum_nr "checksum test 1 0"
+       if reset_with_checksum 1 0; then
+               pm_nl_set_limits $ns1 0 1
+               pm_nl_set_limits $ns2 0 1
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 0 0 0
+       fi
 }
 
 deny_join_id0_tests()
 {
        # subflow allow join id0 ns1
-       reset_with_allow_join_id0 1 0
-       pm_nl_set_limits $ns1 1 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "single subflow allow join id0 ns1" 1 1 1
+       if reset_with_allow_join_id0 "single subflow allow join id0 ns1" 1 0; then
+               pm_nl_set_limits $ns1 1 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+       fi
 
        # subflow allow join id0 ns2
-       reset_with_allow_join_id0 0 1
-       pm_nl_set_limits $ns1 1 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "single subflow allow join id0 ns2" 0 0 0
+       if reset_with_allow_join_id0 "single subflow allow join id0 ns2" 0 1; then
+               pm_nl_set_limits $ns1 1 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 0 0 0
+       fi
 
        # signal address allow join id0 ns1
        # ADD_ADDRs are not affected by allow_join_id0 value.
-       reset_with_allow_join_id0 1 0
-       pm_nl_set_limits $ns1 1 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "signal address allow join id0 ns1" 1 1 1
-       chk_add_nr 1 1
+       if reset_with_allow_join_id0 "signal address allow join id0 ns1" 1 0; then
+               pm_nl_set_limits $ns1 1 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+       fi
 
        # signal address allow join id0 ns2
        # ADD_ADDRs are not affected by allow_join_id0 value.
-       reset_with_allow_join_id0 0 1
-       pm_nl_set_limits $ns1 1 1
-       pm_nl_set_limits $ns2 1 1
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "signal address allow join id0 ns2" 1 1 1
-       chk_add_nr 1 1
+       if reset_with_allow_join_id0 "signal address allow join id0 ns2" 0 1; then
+               pm_nl_set_limits $ns1 1 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+               chk_add_nr 1 1
+       fi
 
        # subflow and address allow join id0 ns1
-       reset_with_allow_join_id0 1 0
-       pm_nl_set_limits $ns1 2 2
-       pm_nl_set_limits $ns2 2 2
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "subflow and address allow join id0 1" 2 2 2
+       if reset_with_allow_join_id0 "subflow and address allow join id0 1" 1 0; then
+               pm_nl_set_limits $ns1 2 2
+               pm_nl_set_limits $ns2 2 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 2 2 2
+       fi
 
        # subflow and address allow join id0 ns2
-       reset_with_allow_join_id0 0 1
-       pm_nl_set_limits $ns1 2 2
-       pm_nl_set_limits $ns2 2 2
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
-       run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "subflow and address allow join id0 2" 1 1 1
+       if reset_with_allow_join_id0 "subflow and address allow join id0 2" 0 1; then
+               pm_nl_set_limits $ns1 2 2
+               pm_nl_set_limits $ns2 2 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+               run_tests $ns1 $ns2 10.0.1.1
+               chk_join_nr 1 1 1
+       fi
 }
 
 fullmesh_tests()
@@ -2149,115 +2456,128 @@ fullmesh_tests()
        # fullmesh 1
        # 2 fullmesh addrs in ns2, added before the connection,
        # 1 non-fullmesh addr in ns1, added during the connection.
-       reset
-       pm_nl_set_limits $ns1 0 4
-       pm_nl_set_limits $ns2 1 4
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,fullmesh
-       pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,fullmesh
-       run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
-       chk_join_nr "fullmesh test 2x1" 4 4 4
-       chk_add_nr 1 1
+       if reset "fullmesh test 2x1"; then
+               pm_nl_set_limits $ns1 0 4
+               pm_nl_set_limits $ns2 1 4
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,fullmesh
+               pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,fullmesh
+               run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
+               chk_join_nr 4 4 4
+               chk_add_nr 1 1
+       fi
 
        # fullmesh 2
        # 1 non-fullmesh addr in ns1, added before the connection,
        # 1 fullmesh addr in ns2, added during the connection.
-       reset
-       pm_nl_set_limits $ns1 1 3
-       pm_nl_set_limits $ns2 1 3
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_1 slow
-       chk_join_nr "fullmesh test 1x1" 3 3 3
-       chk_add_nr 1 1
+       if reset "fullmesh test 1x1"; then
+               pm_nl_set_limits $ns1 1 3
+               pm_nl_set_limits $ns2 1 3
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_1 slow
+               chk_join_nr 3 3 3
+               chk_add_nr 1 1
+       fi
 
        # fullmesh 3
        # 1 non-fullmesh addr in ns1, added before the connection,
        # 2 fullmesh addrs in ns2, added during the connection.
-       reset
-       pm_nl_set_limits $ns1 2 5
-       pm_nl_set_limits $ns2 1 5
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_2 slow
-       chk_join_nr "fullmesh test 1x2" 5 5 5
-       chk_add_nr 1 1
+       if reset "fullmesh test 1x2"; then
+               pm_nl_set_limits $ns1 2 5
+               pm_nl_set_limits $ns2 1 5
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_2 slow
+               chk_join_nr 5 5 5
+               chk_add_nr 1 1
+       fi
 
        # fullmesh 4
        # 1 non-fullmesh addr in ns1, added before the connection,
        # 2 fullmesh addrs in ns2, added during the connection,
        # limit max_subflows to 4.
-       reset
-       pm_nl_set_limits $ns1 2 4
-       pm_nl_set_limits $ns2 1 4
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
-       run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_2 slow
-       chk_join_nr "fullmesh test 1x2, limited" 4 4 4
-       chk_add_nr 1 1
+       if reset "fullmesh test 1x2, limited"; then
+               pm_nl_set_limits $ns1 2 4
+               pm_nl_set_limits $ns2 1 4
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_2 slow
+               chk_join_nr 4 4 4
+               chk_add_nr 1 1
+       fi
 
        # set fullmesh flag
-       reset
-       pm_nl_set_limits $ns1 4 4
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow
-       pm_nl_set_limits $ns2 4 4
-       run_tests $ns1 $ns2 10.0.1.1 0 0 1 slow fullmesh
-       chk_join_nr "set fullmesh flag test" 2 2 2
-       chk_rm_nr 0 1
+       if reset "set fullmesh flag test"; then
+               pm_nl_set_limits $ns1 4 4
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow
+               pm_nl_set_limits $ns2 4 4
+               run_tests $ns1 $ns2 10.0.1.1 0 0 1 slow fullmesh
+               chk_join_nr 2 2 2
+               chk_rm_nr 0 1
+       fi
 
        # set nofullmesh flag
-       reset
-       pm_nl_set_limits $ns1 4 4
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow,fullmesh
-       pm_nl_set_limits $ns2 4 4
-       run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_1 slow nofullmesh
-       chk_join_nr "set nofullmesh flag test" 2 2 2
-       chk_rm_nr 0 1
+       if reset "set nofullmesh flag test"; then
+               pm_nl_set_limits $ns1 4 4
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow,fullmesh
+               pm_nl_set_limits $ns2 4 4
+               run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_1 slow nofullmesh
+               chk_join_nr 2 2 2
+               chk_rm_nr 0 1
+       fi
 
        # set backup,fullmesh flags
-       reset
-       pm_nl_set_limits $ns1 4 4
-       pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow
-       pm_nl_set_limits $ns2 4 4
-       run_tests $ns1 $ns2 10.0.1.1 0 0 1 slow backup,fullmesh
-       chk_join_nr "set backup,fullmesh flags test" 2 2 2
-       chk_prio_nr 0 1
-       chk_rm_nr 0 1
+       if reset "set backup,fullmesh flags test"; then
+               pm_nl_set_limits $ns1 4 4
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow
+               pm_nl_set_limits $ns2 4 4
+               run_tests $ns1 $ns2 10.0.1.1 0 0 1 slow backup,fullmesh
+               chk_join_nr 2 2 2
+               chk_prio_nr 0 1
+               chk_rm_nr 0 1
+       fi
 
        # set nobackup,nofullmesh flags
-       reset
-       pm_nl_set_limits $ns1 4 4
-       pm_nl_set_limits $ns2 4 4
-       pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,backup,fullmesh
-       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow nobackup,nofullmesh
-       chk_join_nr "set nobackup,nofullmesh flags test" 2 2 2
-       chk_prio_nr 0 1
-       chk_rm_nr 0 1
+       if reset "set nobackup,nofullmesh flags test"; then
+               pm_nl_set_limits $ns1 4 4
+               pm_nl_set_limits $ns2 4 4
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,backup,fullmesh
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow nobackup,nofullmesh
+               chk_join_nr 2 2 2
+               chk_prio_nr 0 1
+               chk_rm_nr 0 1
+       fi
 }
 
 fastclose_tests()
 {
-       reset
-       run_tests $ns1 $ns2 10.0.1.1 1024 0 fastclose_2
-       chk_join_nr "fastclose test" 0 0 0
-       chk_fclose_nr 1 1
-       chk_rst_nr 1 1 invert
+       if reset "fastclose test"; then
+               run_tests $ns1 $ns2 10.0.1.1 1024 0 fastclose_2
+               chk_join_nr 0 0 0
+               chk_fclose_nr 1 1
+               chk_rst_nr 1 1 invert
+       fi
 }
 
-all_tests()
+implicit_tests()
 {
-       subflows_tests
-       subflows_error_tests
-       signal_address_tests
-       link_failure_tests
-       add_addr_timeout_tests
-       remove_tests
-       add_tests
-       ipv6_tests
-       v4mapped_tests
-       backup_tests
-       add_addr_ports_tests
-       syncookies_tests
-       checksum_tests
-       deny_join_id0_tests
-       fullmesh_tests
-       fastclose_tests
+       # userspace pm type prevents add_addr
+       if reset "implicit EP"; then
+               pm_nl_set_limits $ns1 2 2
+               pm_nl_set_limits $ns2 2 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+               run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow &
+
+               wait_mpj $ns1
+               pm_nl_check_endpoint 1 "creation" \
+                       $ns2 10.0.2.2 id 1 flags implicit
+
+               pm_nl_add_endpoint $ns2 10.0.2.2 id 33
+               pm_nl_check_endpoint 0 "ID change is prevented" \
+                       $ns2 10.0.2.2 id 1 flags implicit
+
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
+               pm_nl_check_endpoint 0 "modif is allowed" \
+                       $ns2 10.0.2.2 id 1 flags signal
+               wait
+       fi
 }
 
 # [$1: error message]
@@ -2269,108 +2589,69 @@ usage()
        fi
 
        echo "mptcp_join usage:"
-       echo "  -f subflows_tests"
-       echo "  -e subflows_error_tests"
-       echo "  -s signal_address_tests"
-       echo "  -l link_failure_tests"
-       echo "  -t add_addr_timeout_tests"
-       echo "  -r remove_tests"
-       echo "  -a add_tests"
-       echo "  -6 ipv6_tests"
-       echo "  -4 v4mapped_tests"
-       echo "  -b backup_tests"
-       echo "  -p add_addr_ports_tests"
-       echo "  -k syncookies_tests"
-       echo "  -S checksum_tests"
-       echo "  -d deny_join_id0_tests"
-       echo "  -m fullmesh_tests"
-       echo "  -z fastclose_tests"
+
+       local key
+       for key in "${!all_tests[@]}"; do
+               echo "  -${key} ${all_tests[${key}]}"
+       done
+
        echo "  -c capture pcap files"
        echo "  -C enable data checksum"
        echo "  -i use ip mptcp"
        echo "  -h help"
 
+       echo "[test ids|names]"
+
        exit ${ret}
 }
 
-for arg in "$@"; do
-       # check for "capture/checksum" args before launching tests
-       if [[ "${arg}" =~ ^"-"[0-9a-zA-Z]*"c"[0-9a-zA-Z]*$ ]]; then
-               capture=1
-       fi
-       if [[ "${arg}" =~ ^"-"[0-9a-zA-Z]*"C"[0-9a-zA-Z]*$ ]]; then
-               checksum=1
-       fi
-       if [[ "${arg}" =~ ^"-"[0-9a-zA-Z]*"i"[0-9a-zA-Z]*$ ]]; then
-               ip_mptcp=1
-       fi
 
-       # exception for the capture/checksum/ip_mptcp options, the rest means: a part of the tests
-       if [ "${arg}" != "-c" ] && [ "${arg}" != "-C" ] && [ "${arg}" != "-i" ]; then
-               do_all_tests=0
-       fi
+# Use a "simple" array to force an specific order we cannot have with an associative one
+all_tests_sorted=(
+       f@subflows_tests
+       e@subflows_error_tests
+       s@signal_address_tests
+       l@link_failure_tests
+       t@add_addr_timeout_tests
+       r@remove_tests
+       a@add_tests
+       6@ipv6_tests
+       4@v4mapped_tests
+       b@backup_tests
+       p@add_addr_ports_tests
+       k@syncookies_tests
+       S@checksum_tests
+       d@deny_join_id0_tests
+       m@fullmesh_tests
+       z@fastclose_tests
+       I@implicit_tests
+)
+
+all_tests_args=""
+all_tests_names=()
+for subtests in "${all_tests_sorted[@]}"; do
+       key="${subtests%@*}"
+       value="${subtests#*@}"
+
+       all_tests_args+="${key}"
+       all_tests_names+=("${value}")
+       all_tests[${key}]="${value}"
 done
 
-if [ $do_all_tests -eq 1 ]; then
-       all_tests
-       exit $ret
-fi
-
-while getopts 'fesltra64bpkdmchzCSi' opt; do
+tests=()
+while getopts "${all_tests_args}cCih" opt; do
        case $opt in
-               f)
-                       subflows_tests
-                       ;;
-               e)
-                       subflows_error_tests
-                       ;;
-               s)
-                       signal_address_tests
-                       ;;
-               l)
-                       link_failure_tests
-                       ;;
-               t)
-                       add_addr_timeout_tests
-                       ;;
-               r)
-                       remove_tests
-                       ;;
-               a)
-                       add_tests
-                       ;;
-               6)
-                       ipv6_tests
-                       ;;
-               4)
-                       v4mapped_tests
-                       ;;
-               b)
-                       backup_tests
-                       ;;
-               p)
-                       add_addr_ports_tests
-                       ;;
-               k)
-                       syncookies_tests
-                       ;;
-               S)
-                       checksum_tests
-                       ;;
-               d)
-                       deny_join_id0_tests
-                       ;;
-               m)
-                       fullmesh_tests
-                       ;;
-               z)
-                       fastclose_tests
+               ["${all_tests_args}"])
+                       tests+=("${all_tests[${opt}]}")
                        ;;
                c)
+                       capture=1
                        ;;
                C)
+                       checksum=1
                        ;;
                i)
+                       ip_mptcp=1
                        ;;
                h)
                        usage
@@ -2381,4 +2662,31 @@ while getopts 'fesltra64bpkdmchzCSi' opt; do
        esac
 done
 
+shift $((OPTIND - 1))
+
+for arg in "${@}"; do
+       if [[ "${arg}" =~ ^[0-9]+$ ]]; then
+               only_tests_ids+=("${arg}")
+       else
+               only_tests_names+=("${arg}")
+       fi
+done
+
+if [ ${#tests[@]} -eq 0 ]; then
+       tests=("${all_tests_names[@]}")
+fi
+
+for subtests in "${tests[@]}"; do
+       "${subtests}"
+done
+
+if [ ${ret} -ne 0 ]; then
+       echo
+       echo "${#failed_tests[@]} failure(s) has(ve) been detected:"
+       for i in $(get_failed_tests_ids); do
+               echo -e "\t- ${i}: ${failed_tests[${i}]}"
+       done
+       echo
+fi
+
 exit $ret
index 22a5ec1..a75a68a 100644 (file)
@@ -436,6 +436,13 @@ static void print_addr(struct rtattr *attrs, int len)
                                        printf(",");
                        }
 
+                       if (flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) {
+                               printf("implicit");
+                               flags &= ~MPTCP_PM_ADDR_FLAG_IMPLICIT;
+                               if (flags)
+                                       printf(",");
+                       }
+
                        /* bump unknown flags, if any */
                        if (flags)
                                printf("0x%x", flags);
index 543ad75..694732e 100755 (executable)
@@ -374,6 +374,16 @@ run_cmd() {
        return $rc
 }
 
+run_cmd_bg() {
+       cmd="$*"
+
+       if [ "$VERBOSE" = "1" ]; then
+               printf "    COMMAND: %s &\n" "${cmd}"
+       fi
+
+       $cmd 2>&1 &
+}
+
 # Find the auto-generated name for this namespace
 nsname() {
        eval echo \$NS_$1
@@ -670,10 +680,10 @@ setup_nettest_xfrm() {
        [ ${1} -eq 6 ] && proto="-6" || proto=""
        port=${2}
 
-       run_cmd ${ns_a} nettest ${proto} -q -D -s -x -p ${port} -t 5 &
+       run_cmd_bg "${ns_a}" nettest "${proto}" -q -D -s -x -p "${port}" -t 5
        nettest_pids="${nettest_pids} $!"
 
-       run_cmd ${ns_b} nettest ${proto} -q -D -s -x -p ${port} -t 5 &
+       run_cmd_bg "${ns_b}" nettest "${proto}" -q -D -s -x -p "${port}" -t 5
        nettest_pids="${nettest_pids} $!"
 }
 
@@ -865,7 +875,6 @@ setup_ovs_bridge() {
 setup() {
        [ "$(id -u)" -ne 0 ] && echo "  need to run as root" && return $ksft_skip
 
-       cleanup
        for arg do
                eval setup_${arg} || { echo "  ${arg} not supported"; return 1; }
        done
@@ -876,7 +885,7 @@ trace() {
 
        for arg do
                [ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue
-               ${ns_cmd} tcpdump -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null &
+               ${ns_cmd} tcpdump --immediate-mode -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null &
                tcpdump_pids="${tcpdump_pids} $!"
                ns_cmd=
        done
@@ -1836,6 +1845,10 @@ run_test() {
 
        unset IFS
 
+       # Since cleanup() relies on variables modified by this subshell, it
+       # has to run in this context.
+       trap cleanup EXIT
+
        if [ "$VERBOSE" = "1" ]; then
                printf "\n##########################################################################\n\n"
        fi
index 3653d64..1a736f7 100644 (file)
@@ -53,6 +53,7 @@
 #include <unistd.h>
 
 #include "psock_lib.h"
+#include "../kselftest.h"
 
 #define RING_NUM_FRAMES                        20
 
@@ -117,7 +118,7 @@ static void sock_fanout_set_cbpf(int fd)
        struct sock_fprog bpf_prog;
 
        bpf_prog.filter = bpf_filter;
-       bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
+       bpf_prog.len = ARRAY_SIZE(bpf_filter);
 
        if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &bpf_prog,
                       sizeof(bpf_prog))) {
@@ -162,7 +163,7 @@ static void sock_fanout_set_ebpf(int fd)
        memset(&attr, 0, sizeof(attr));
        attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
        attr.insns = (unsigned long) prog;
-       attr.insn_cnt = sizeof(prog) / sizeof(prog[0]);
+       attr.insn_cnt = ARRAY_SIZE(prog);
        attr.license = (unsigned long) "GPL";
        attr.log_buf = (unsigned long) log_buf,
        attr.log_size = sizeof(log_buf),
index b2eebf6..c9ba36a 100644 (file)
@@ -86,7 +86,7 @@ static void attach_bpf(int fd)
 
        memset(&attr, 0, sizeof(attr));
        attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
-       attr.insn_cnt = sizeof(prog) / sizeof(prog[0]);
+       attr.insn_cnt = ARRAY_SIZE(prog);
        attr.insns = (unsigned long) &prog;
        attr.license = (unsigned long) &bpf_license;
        attr.log_buf = (unsigned long) &bpf_log_buf;
index c548934..90026a2 100644 (file)
@@ -52,6 +52,8 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include "../kselftest.h"
+
 #define TOEPLITZ_KEY_MIN_LEN   40
 #define TOEPLITZ_KEY_MAX_LEN   60
 
@@ -295,7 +297,7 @@ static void __set_filter(int fd, int off_proto, uint8_t proto, int off_dport)
        struct sock_fprog prog = {};
 
        prog.filter = filter;
-       prog.len = sizeof(filter) / sizeof(struct sock_filter);
+       prog.len = ARRAY_SIZE(filter);
        if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &prog, sizeof(prog)))
                error(1, errno, "setsockopt filter");
 }
@@ -324,7 +326,7 @@ static void set_filter_null(int fd)
        struct sock_fprog prog = {};
 
        prog.filter = filter;
-       prog.len = sizeof(filter) / sizeof(struct sock_filter);
+       prog.len = ARRAY_SIZE(filter);
        if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &prog, sizeof(prog)))
                error(1, errno, "setsockopt filter");
 }
index fabb1d5..10f2fde 100644 (file)
@@ -161,7 +161,7 @@ static void validate_timestamp(struct timespec *cur, int min_delay)
        max_delay = min_delay + cfg_delay_tolerance_usec;
 
        if (cur64 < start64 + min_delay || cur64 > start64 + max_delay) {
-               fprintf(stderr, "ERROR: %lu us expected between %d and %d\n",
+               fprintf(stderr, "ERROR: %" PRId64 " us expected between %d and %d\n",
                                cur64 - start64, min_delay, max_delay);
                test_failed = true;
        }
@@ -170,9 +170,9 @@ static void validate_timestamp(struct timespec *cur, int min_delay)
 static void __print_ts_delta_formatted(int64_t ts_delta)
 {
        if (cfg_print_nsec)
-               fprintf(stderr, "%lu ns", ts_delta);
+               fprintf(stderr, "%" PRId64 " ns", ts_delta);
        else
-               fprintf(stderr, "%lu us", ts_delta / NSEC_PER_USEC);
+               fprintf(stderr, "%" PRId64 " us", ts_delta / NSEC_PER_USEC);
 }
 
 static void __print_timestamp(const char *name, struct timespec *cur,
index 79fe627..eb8543b 100755 (executable)
@@ -880,9 +880,8 @@ EOF
                return $ksft_skip
        fi
 
-       # test default behaviour. Packet from ns1 to ns0 is not redirected
-       # due to automatic port translation.
-       test_port_shadow "default" "ROUTER"
+       # test default behaviour. Packet from ns1 to ns0 is redirected to ns2.
+       test_port_shadow "default" "CLIENT"
 
        # test packet filter based mitigation: prevent forwarding of
        # packets claiming to come from the service port.
index ea04f04..ccb0f06 100644 (file)
@@ -21,7 +21,7 @@ NAMES = {
           'BATCH_FILE': './batch.txt',
           'BATCH_DIR': 'tmp',
           # Length of time in seconds to wait before terminating a command
-          'TIMEOUT': 12,
+          'TIMEOUT': 24,
           # Name of the namespace to use
           'NS': 'tcut',
           # Directory containing eBPF test programs
index 1607322..a14b5b8 100644 (file)
@@ -1,6 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for vm selftests
 
+LOCAL_HDRS += $(selfdir)/vm/local_config.h $(top_srcdir)/mm/gup_test.h
+
 include local_config.mk
 
 uname_M := $(shell uname -m 2>/dev/null || echo not)
@@ -140,10 +142,6 @@ endif
 
 $(OUTPUT)/mlock-random-test $(OUTPUT)/memfd_secret: LDLIBS += -lcap
 
-$(OUTPUT)/gup_test: ../../../../mm/gup_test.h
-
-$(OUTPUT)/hmm-tests: local_config.h
-
 # HMM_EXTRA_LIBS may get set in local_config.mk, or it may be left empty.
 $(OUTPUT)/hmm-tests: LDLIBS += $(HMM_EXTRA_LIBS)
 
index 2a7c336..1d68908 100644 (file)
@@ -3,9 +3,10 @@
  * hugepage-mremap:
  *
  * Example of remapping huge page memory in a user application using the
- * mremap system call.  Code assumes a hugetlbfs filesystem is mounted
- * at './huge'.  The amount of memory used by this test is decided by a command
- * line argument in MBs. If missing, the default amount is 10MB.
+ * mremap system call.  The path to a file in a hugetlbfs filesystem must
+ * be passed as the last argument to this test.  The amount of memory used
+ * by this test in MBs can optionally be passed as an argument.  If no memory
+ * amount is passed, the default amount is 10MB.
  *
  * To make sure the test triggers pmd sharing and goes through the 'unshare'
  * path in the mremap code use 1GB (1024) or more.
@@ -25,7 +26,6 @@
 #define DEFAULT_LENGTH_MB 10UL
 #define MB_TO_BYTES(x) (x * 1024 * 1024)
 
-#define FILE_NAME "huge/hugepagefile"
 #define PROTECTION (PROT_READ | PROT_WRITE | PROT_EXEC)
 #define FLAGS (MAP_SHARED | MAP_ANONYMOUS)
 
@@ -107,17 +107,26 @@ static void register_region_with_uffd(char *addr, size_t len)
 
 int main(int argc, char *argv[])
 {
+       size_t length;
+
+       if (argc != 2 && argc != 3) {
+               printf("Usage: %s [length_in_MB] <hugetlb_file>\n", argv[0]);
+               exit(1);
+       }
+
        /* Read memory length as the first arg if valid, otherwise fallback to
-        * the default length. Any additional args are ignored.
+        * the default length.
         */
-       size_t length = argc > 1 ? (size_t)atoi(argv[1]) : 0UL;
+       if (argc == 3)
+               length = argc > 2 ? (size_t)atoi(argv[1]) : 0UL;
 
        length = length > 0 ? length : DEFAULT_LENGTH_MB;
        length = MB_TO_BYTES(length);
 
        int ret = 0;
 
-       int fd = open(FILE_NAME, O_CREAT | O_RDWR, 0755);
+       /* last arg is the hugetlb file name */
+       int fd = open(argv[argc-1], O_CREAT | O_RDWR, 0755);
 
        if (fd < 0) {
                perror("Open failed");
@@ -169,5 +178,8 @@ int main(int argc, char *argv[])
 
        munmap(addr, length);
 
+       close(fd);
+       unlink(argv[argc-1]);
+
        return ret;
 }
index 75d4017..71d2dc1 100755 (executable)
@@ -111,13 +111,14 @@ fi
 echo "-----------------------"
 echo "running hugepage-mremap"
 echo "-----------------------"
-./hugepage-mremap 256
+./hugepage-mremap $mnt/huge_mremap
 if [ $? -ne 0 ]; then
        echo "[FAIL]"
        exitcode=1
 else
        echo "[PASS]"
 fi
+rm -f $mnt/huge_mremap
 
 echo "NOTE: The above hugetlb tests provide minimal coverage.  Use"
 echo "      https://github.com/libhugetlbfs/libhugetlbfs.git for"
index 2f49c9a..3fc1d2e 100644 (file)
@@ -46,6 +46,7 @@
 #include <signal.h>
 #include <poll.h>
 #include <string.h>
+#include <linux/mman.h>
 #include <sys/mman.h>
 #include <sys/syscall.h>
 #include <sys/ioctl.h>
index 2a3638c..dc57746 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/kernel.h>
 #include <sys/types.h>
 #include <sys/socket.h>
+#include <time.h>
+#include <sys/mman.h>
 
 #include "timeout.h"
 #include "control.h"
@@ -391,6 +393,209 @@ static void test_seqpacket_msg_trunc_server(const struct test_opts *opts)
        close(fd);
 }
 
+static time_t current_nsec(void)
+{
+       struct timespec ts;
+
+       if (clock_gettime(CLOCK_REALTIME, &ts)) {
+               perror("clock_gettime(3) failed");
+               exit(EXIT_FAILURE);
+       }
+
+       return (ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
+}
+
+#define RCVTIMEO_TIMEOUT_SEC 1
+#define READ_OVERHEAD_NSEC 250000000 /* 0.25 sec */
+
+static void test_seqpacket_timeout_client(const struct test_opts *opts)
+{
+       int fd;
+       struct timeval tv;
+       char dummy;
+       time_t read_enter_ns;
+       time_t read_overhead_ns;
+
+       fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+       if (fd < 0) {
+               perror("connect");
+               exit(EXIT_FAILURE);
+       }
+
+       tv.tv_sec = RCVTIMEO_TIMEOUT_SEC;
+       tv.tv_usec = 0;
+
+       if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (void *)&tv, sizeof(tv)) == -1) {
+               perror("setsockopt 'SO_RCVTIMEO'");
+               exit(EXIT_FAILURE);
+       }
+
+       read_enter_ns = current_nsec();
+
+       if (read(fd, &dummy, sizeof(dummy)) != -1) {
+               fprintf(stderr,
+                       "expected 'dummy' read(2) failure\n");
+               exit(EXIT_FAILURE);
+       }
+
+       if (errno != EAGAIN) {
+               perror("EAGAIN expected");
+               exit(EXIT_FAILURE);
+       }
+
+       read_overhead_ns = current_nsec() - read_enter_ns -
+                       1000000000ULL * RCVTIMEO_TIMEOUT_SEC;
+
+       if (read_overhead_ns > READ_OVERHEAD_NSEC) {
+               fprintf(stderr,
+                       "too much time in read(2), %lu > %i ns\n",
+                       read_overhead_ns, READ_OVERHEAD_NSEC);
+               exit(EXIT_FAILURE);
+       }
+
+       control_writeln("WAITDONE");
+       close(fd);
+}
+
+static void test_seqpacket_timeout_server(const struct test_opts *opts)
+{
+       int fd;
+
+       fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+       if (fd < 0) {
+               perror("accept");
+               exit(EXIT_FAILURE);
+       }
+
+       control_expectln("WAITDONE");
+       close(fd);
+}
+
+#define BUF_PATTERN_1 'a'
+#define BUF_PATTERN_2 'b'
+
+static void test_seqpacket_invalid_rec_buffer_client(const struct test_opts *opts)
+{
+       int fd;
+       unsigned char *buf1;
+       unsigned char *buf2;
+       int buf_size = getpagesize() * 3;
+
+       fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+       if (fd < 0) {
+               perror("connect");
+               exit(EXIT_FAILURE);
+       }
+
+       buf1 = malloc(buf_size);
+       if (!buf1) {
+               perror("'malloc()' for 'buf1'");
+               exit(EXIT_FAILURE);
+       }
+
+       buf2 = malloc(buf_size);
+       if (!buf2) {
+               perror("'malloc()' for 'buf2'");
+               exit(EXIT_FAILURE);
+       }
+
+       memset(buf1, BUF_PATTERN_1, buf_size);
+       memset(buf2, BUF_PATTERN_2, buf_size);
+
+       if (send(fd, buf1, buf_size, 0) != buf_size) {
+               perror("send failed");
+               exit(EXIT_FAILURE);
+       }
+
+       if (send(fd, buf2, buf_size, 0) != buf_size) {
+               perror("send failed");
+               exit(EXIT_FAILURE);
+       }
+
+       close(fd);
+}
+
+static void test_seqpacket_invalid_rec_buffer_server(const struct test_opts *opts)
+{
+       int fd;
+       unsigned char *broken_buf;
+       unsigned char *valid_buf;
+       int page_size = getpagesize();
+       int buf_size = page_size * 3;
+       ssize_t res;
+       int prot = PROT_READ | PROT_WRITE;
+       int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+       int i;
+
+       fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+       if (fd < 0) {
+               perror("accept");
+               exit(EXIT_FAILURE);
+       }
+
+       /* Setup first buffer. */
+       broken_buf = mmap(NULL, buf_size, prot, flags, -1, 0);
+       if (broken_buf == MAP_FAILED) {
+               perror("mmap for 'broken_buf'");
+               exit(EXIT_FAILURE);
+       }
+
+       /* Unmap "hole" in buffer. */
+       if (munmap(broken_buf + page_size, page_size)) {
+               perror("'broken_buf' setup");
+               exit(EXIT_FAILURE);
+       }
+
+       valid_buf = mmap(NULL, buf_size, prot, flags, -1, 0);
+       if (valid_buf == MAP_FAILED) {
+               perror("mmap for 'valid_buf'");
+               exit(EXIT_FAILURE);
+       }
+
+       /* Try to fill buffer with unmapped middle. */
+       res = read(fd, broken_buf, buf_size);
+       if (res != -1) {
+               fprintf(stderr,
+                       "expected 'broken_buf' read(2) failure, got %zi\n",
+                       res);
+               exit(EXIT_FAILURE);
+       }
+
+       if (errno != ENOMEM) {
+               perror("unexpected errno of 'broken_buf'");
+               exit(EXIT_FAILURE);
+       }
+
+       /* Try to fill valid buffer. */
+       res = read(fd, valid_buf, buf_size);
+       if (res < 0) {
+               perror("unexpected 'valid_buf' read(2) failure");
+               exit(EXIT_FAILURE);
+       }
+
+       if (res != buf_size) {
+               fprintf(stderr,
+                       "invalid 'valid_buf' read(2), expected %i, got %zi\n",
+                       buf_size, res);
+               exit(EXIT_FAILURE);
+       }
+
+       for (i = 0; i < buf_size; i++) {
+               if (valid_buf[i] != BUF_PATTERN_2) {
+                       fprintf(stderr,
+                               "invalid pattern for 'valid_buf' at %i, expected %hhX, got %hhX\n",
+                               i, BUF_PATTERN_2, valid_buf[i]);
+                       exit(EXIT_FAILURE);
+               }
+       }
+
+       /* Unmap buffers. */
+       munmap(broken_buf, page_size);
+       munmap(broken_buf + page_size * 2, page_size);
+       munmap(valid_buf, buf_size);
+       close(fd);
+}
+
 static struct test_case test_cases[] = {
        {
                .name = "SOCK_STREAM connection reset",
@@ -431,6 +636,16 @@ static struct test_case test_cases[] = {
                .run_client = test_seqpacket_msg_trunc_client,
                .run_server = test_seqpacket_msg_trunc_server,
        },
+       {
+               .name = "SOCK_SEQPACKET timeout",
+               .run_client = test_seqpacket_timeout_client,
+               .run_server = test_seqpacket_timeout_server,
+       },
+       {
+               .name = "SOCK_SEQPACKET invalid receive buffer",
+               .run_client = test_seqpacket_invalid_rec_buffer_client,
+               .run_server = test_seqpacket_invalid_rec_buffer_server,
+       },
        {},
 };
 
diff --git a/tools/virtio/linux/mm_types.h b/tools/virtio/linux/mm_types.h
new file mode 100644 (file)
index 0000000..356ba4d
--- /dev/null
@@ -0,0 +1,3 @@
+struct folio {
+       struct page page;
+};
index cb3f29c..23f142a 100644 (file)
@@ -130,6 +130,7 @@ static void vdev_info_init(struct vdev_info* dev, unsigned long long features)
        memset(dev, 0, sizeof *dev);
        dev->vdev.features = features;
        INIT_LIST_HEAD(&dev->vdev.vqs);
+       spin_lock_init(&dev->vdev.vqs_list_lock);
        dev->buf_size = 1024;
        dev->buf = malloc(dev->buf_size);
        assert(dev->buf);