Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
authorDavid S. Miller <davem@davemloft.net>
Sat, 2 Nov 2019 22:27:42 +0000 (15:27 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 2 Nov 2019 22:29:58 +0000 (15:29 -0700)
Alexei Starovoitov says:

====================
pull-request: bpf-next 2019-11-02

The following pull-request contains BPF updates for your *net-next* tree.

We've added 30 non-merge commits during the last 7 day(s) which contain
a total of 41 files changed, 1864 insertions(+), 474 deletions(-).

The main changes are:

1) Fix long standing user vs kernel access issue by introducing
   bpf_probe_read_user() and bpf_probe_read_kernel() helpers, from Daniel.

2) Accelerated xskmap lookup, from Björn and Maciej.

3) Support for automatic map pinning in libbpf, from Toke.

4) Cleanup of BTF-enabled raw tracepoints, from Alexei.

5) Various fixes to libbpf and selftests.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
830 files changed:
.mailmap
Documentation/ABI/testing/sysfs-class-net-statistics
Documentation/arm64/silicon-errata.rst
Documentation/devicetree/bindings/arm/rockchip.yaml
Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
Documentation/devicetree/bindings/net/nfc/pn532.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt [deleted file]
Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
Documentation/devicetree/bindings/riscv/cpus.yaml
Documentation/networking/device_drivers/freescale/dpaa2/index.rst
Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst [new file with mode: 0644]
Documentation/networking/device_drivers/intel/e100.rst
Documentation/networking/device_drivers/intel/e1000.rst
Documentation/networking/device_drivers/intel/e1000e.rst
Documentation/networking/device_drivers/intel/fm10k.rst
Documentation/networking/device_drivers/intel/i40e.rst
Documentation/networking/device_drivers/intel/iavf.rst
Documentation/networking/device_drivers/intel/ice.rst
Documentation/networking/device_drivers/intel/igb.rst
Documentation/networking/device_drivers/intel/igbvf.rst
Documentation/networking/device_drivers/intel/ixgbe.rst
Documentation/networking/device_drivers/intel/ixgbevf.rst
Documentation/networking/device_drivers/pensando/ionic.rst
Documentation/networking/devlink-params-mv88e6xxx.txt [new file with mode: 0644]
Documentation/networking/ip-sysctl.txt
MAINTAINERS
Makefile
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/hsdk_defconfig
arch/arc/kernel/perf_event.c
arch/arm/boot/dts/am3874-iceboard.dts
arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi
arch/arm/boot/dts/imx6-logicpd-som.dtsi
arch/arm/boot/dts/imx7s.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/omap4-panda-common.dtsi
arch/arm/boot/dts/omap4-sdp.dts
arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap54xx-clocks.dtsi
arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/vf610-zii-scu4-aib.dts
arch/arm/configs/davinci_all_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/include/asm/domain.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/head-common.S
arch/arm/kernel/head-nommu.S
arch/arm/mach-davinci/dm365.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mm/alignment.c
arch/arm/mm/proc-v7m.S
arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi
arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
arch/arm64/boot/dts/freescale/imx8mm.dtsi
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
arch/arm64/boot/dts/freescale/imx8mq.dtsi
arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kvm/sys_regs.c
arch/mips/bcm63xx/prom.c
arch/mips/include/asm/bmips.h
arch/mips/include/asm/vdso/gettimeofday.h
arch/mips/kernel/smp-bmips.c
arch/mips/mm/tlbex.c
arch/parisc/kernel/entry.S
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/kvm/book3s_xive.h
arch/powerpc/kvm/book3s_xive_native.c
arch/riscv/include/asm/bug.h
arch/riscv/include/asm/io.h
arch/riscv/include/asm/irq.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/switch_to.h
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/head.h [new file with mode: 0644]
arch/riscv/kernel/irq.c
arch/riscv/kernel/module-sections.c
arch/riscv/kernel/process.c
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/reset.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/signal.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/smpboot.c
arch/riscv/kernel/syscall_table.c
arch/riscv/kernel/time.c
arch/riscv/kernel/traps.c
arch/riscv/kernel/vdso.c
arch/riscv/mm/context.c
arch/riscv/mm/fault.c
arch/riscv/mm/init.c
arch/riscv/mm/sifive_l2_cache.c
arch/s390/boot/startup.c
arch/s390/kernel/machine_kexec_reloc.c
arch/um/drivers/ubd_kern.c
arch/x86/boot/compressed/acpi.c
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/misc.c
arch/x86/events/amd/ibs.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.h
arch/x86/hyperv/hv_apic.c
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/vmware.h
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/head64.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/nested.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/xen/enlighten_pv.c
block/blk-iocost.c
drivers/acpi/nfit/core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_perflib.c
drivers/acpi/processor_thermal.c
drivers/amba/bus.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/ata/libahci_platform.c
drivers/atm/firestream.c
drivers/base/power/qos.c
drivers/block/nbd.c
drivers/bus/fsl-mc/dprc-driver.c
drivers/bus/fsl-mc/dprc.c
drivers/bus/fsl-mc/fsl-mc-bus.c
drivers/bus/fsl-mc/fsl-mc-private.h
drivers/bus/ti-sysc.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
drivers/cpuidle/cpuidle-haltpoll.c
drivers/crypto/chelsio/chtls/chtls_cm.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/dma/imx-sdma.c
drivers/dma/qcom/bam_dma.c
drivers/dma/sprd-dma.c
drivers/dma/tegra210-adma.c
drivers/dma/ti/cppi41.c
drivers/dma/xilinx/xilinx_dma.c
drivers/edac/ghes_edac.c
drivers/firmware/broadcom/Kconfig
drivers/firmware/broadcom/Makefile
drivers/firmware/broadcom/tee_bnxt_fw.c [new file with mode: 0644]
drivers/firmware/efi/Kconfig
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/arm32-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/test/efi_test.c
drivers/firmware/efi/tpm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/display/dc/calcs/Makefile
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
drivers/gpu/drm/amd/display/dc/dcn20/Makefile
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/Makefile
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
drivers/gpu/drm/amd/display/dc/dsc/Makefile
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
drivers/gpu/drm/etnaviv/etnaviv_dump.c
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.h
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/v3d/v3d_gem.c
drivers/hid/hid-axff.c
drivers/hid/hid-core.c
drivers/hid/hid-dr.c
drivers/hid/hid-emsff.c
drivers/hid/hid-gaff.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-holtekff.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg2ff.c
drivers/hid/hid-lg3ff.c
drivers/hid/hid-lg4ff.c
drivers/hid/hid-lgff.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-microsoft.c
drivers/hid/hid-prodikeys.c
drivers/hid/hid-sony.c
drivers/hid/hid-tmff.c
drivers/hid/hid-zpff.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
drivers/i2c/busses/i2c-aspeed.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/device.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/netlink.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/sw/siw/siw_qp.c
drivers/infiniband/sw/siw/siw_verbs.c
drivers/input/touchscreen/st1232.c
drivers/iommu/amd_iommu_quirks.c
drivers/iommu/intel-iommu.c
drivers/iommu/ipmmu-vmsa.c
drivers/irqchip/irq-al-fic.c
drivers/irqchip/irq-atmel-aic5.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-sifive-plic.c
drivers/isdn/capi/capi.c
drivers/isdn/hardware/mISDN/hfcsusb.h
drivers/macintosh/windfarm_cpufreq_clamp.c
drivers/mfd/mt6397-core.c
drivers/mmc/host/cqhci.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/sdhci-omap.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/Kconfig
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/b53/b53_priv.h
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/chip.h
drivers/net/dsa/mv88e6xxx/global1.h
drivers/net/dsa/mv88e6xxx/global1_atu.c
drivers/net/dsa/qca8k.c
drivers/net/dsa/sja1105/Kconfig
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/arc/emac_rockchip.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/cortina/gemini.h
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
drivers/net/ethernet/freescale/dpaa2/Makefile
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/dpmac.c [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/dpmac.h [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
drivers/net/ethernet/freescale/dpaa2/dprtc.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/google/gve/gve_tx.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hnae3.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/hp/Kconfig [deleted file]
drivers/net/ethernet/hp/Makefile [deleted file]
drivers/net/ethernet/hp/hp100.c [deleted file]
drivers/net/ethernet/hp/hp100.h [deleted file]
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/ptp.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb.c
drivers/net/ethernet/intel/i40e/i40e_dcb.h
drivers/net/ethernet/intel/i40e/i40e_devids.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_82575.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/mvneta_bm.h
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mediatek/mtk_eth_path.c
drivers/net/ethernet/mediatek/mtk_sgmii.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
drivers/net/ethernet/mellanox/mlxsw/port.h
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/resources.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_main.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/qlogic/qed/qed_int.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_sriov.h
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/fjes/fjes_main.c
drivers/net/hamradio/bpqether.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/netdevsim/dev.c
drivers/net/phy/dp83867.c
drivers/net/phy/marvell.c
drivers/net/phy/phylink.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/team/team.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wimax/i2400m/debugfs.c
drivers/net/wimax/i2400m/op-rfkill.c
drivers/net/wimax/i2400m/usb.c
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intersil/hostap/hostap_hw.c
drivers/net/wireless/mediatek/mt76/Makefile
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
drivers/net/wireless/mediatek/mt76/pci.c [new file with mode: 0644]
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/ps.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/wireless/virt_wifi.c
drivers/nfc/pn533/Kconfig
drivers/nfc/pn533/Makefile
drivers/nfc/pn533/i2c.c
drivers/nfc/pn533/pn533.c
drivers/nfc/pn533/pn533.h
drivers/nfc/pn533/uart.c [new file with mode: 0644]
drivers/nfc/pn533/usb.c
drivers/nvme/host/multipath.c
drivers/nvme/host/tcp.c
drivers/of/of_reserved_mem.c
drivers/of/unittest.c
drivers/opp/core.c
drivers/opp/of.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/aspeed/pinmux-aspeed.h
drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
drivers/pinctrl/bcm/pinctrl-ns2-mux.c
drivers/pinctrl/berlin/pinctrl-as370.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinctrl-stmfx.c
drivers/regulator/core.c
drivers/regulator/da9062-regulator.c
drivers/regulator/fixed.c
drivers/regulator/lochnagar-regulator.c
drivers/regulator/of_regulator.c
drivers/regulator/pfuze100-regulator.c
drivers/regulator/qcom-rpmh-regulator.c
drivers/regulator/ti-abb-regulator.c
drivers/s390/cio/qdio.h
drivers/s390/cio/qdio_main.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3.h
drivers/s390/net/qeth_l3_main.c
drivers/scsi/Kconfig
drivers/scsi/ch.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/hpsa.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sni_53c710.c
drivers/soc/fsl/qbman/qman.c
drivers/soc/imx/soc-imx-scu.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/hp/Kconfig [new file with mode: 0644]
drivers/staging/hp/Makefile [new file with mode: 0644]
drivers/staging/hp/hp100.c [new file with mode: 0644]
drivers/staging/hp/hp100.h [new file with mode: 0644]
drivers/staging/wlan-ng/cfg80211.c
drivers/target/target_core_device.c
drivers/thermal/cpu_cooling.c
drivers/tty/serial/8250/8250_men_mcb.c
drivers/usb/cdns3/core.c
drivers/usb/cdns3/gadget.c
drivers/usb/class/usblp.c
drivers/usb/gadget/udc/lpc32xx_udc.c
drivers/usb/misc/ldusb.c
drivers/usb/misc/legousbtower.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vringh.c
drivers/virtio/virtio_ring.c
fs/btrfs/block-group.c
fs/btrfs/ctree.h
fs/btrfs/delalloc-space.c
fs/btrfs/disk-io.c
fs/btrfs/file.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/relocation.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/smb1ops.c
fs/cifs/smb2file.c
fs/cifs/transport.c
fs/dax.c
fs/fuse/Makefile
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/fuse/virtio_fs.c
fs/gfs2/ops_fstype.c
fs/io_uring.c
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/nfs4proc.c
include/acpi/processor.h
include/linux/cpufreq.h
include/linux/dynamic_debug.h
include/linux/efi.h
include/linux/export.h
include/linux/filter.h
include/linux/firmware/broadcom/tee_bnxt_fw.h [new file with mode: 0644]
include/linux/fsl/mc.h
include/linux/gfp.h
include/linux/if_macvlan.h
include/linux/if_team.h
include/linux/if_vlan.h
include/linux/mlx5/mlx5_ifc.h
include/linux/netdevice.h
include/linux/perf_event.h
include/linux/platform_data/dma-imx-sdma.h
include/linux/pm_qos.h
include/linux/security.h
include/linux/skbuff.h
include/linux/socket.h
include/linux/sunrpc/bc_xprt.h
include/linux/sysfs.h
include/linux/virtio_vsock.h
include/net/act_api.h
include/net/bonding.h
include/net/busy_poll.h
include/net/cfg80211.h
include/net/dsa.h
include/net/flow_dissector.h
include/net/fq.h
include/net/fq_impl.h
include/net/hwbm.h
include/net/ip.h
include/net/ip_vs.h
include/net/net_namespace.h
include/net/sch_generic.h
include/net/sock.h
include/net/vxlan.h
include/rdma/ib_verbs.h
include/soc/fsl/qman.h
include/sound/simple_card_utils.h
include/trace/events/bridge.h
include/trace/events/btrfs.h
include/uapi/linux/dcbnl.h
include/uapi/linux/fuse.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/tipc.h
kernel/bpf/core.c
kernel/bpf/devmap.c
kernel/bpf/syscall.c
kernel/cgroup/cpuset.c
kernel/events/core.c
kernel/gen_kheaders.sh
kernel/power/qos.c
kernel/sched/topology.c
kernel/stop_machine.c
kernel/time/hrtimer.c
kernel/time/posix-cpu-timers.c
kernel/time/sched_clock.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events_hist.c
lib/vdso/gettimeofday.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/atm/common.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v_ogm.c
net/batman-adv/hard-interface.c
net/batman-adv/soft-interface.c
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/af_bluetooth.c
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_input.c
net/bridge/br_private.h
net/bridge/br_switchdev.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/caif/caif_socket.c
net/core/datagram.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/ethtool.c
net/core/flow_dissector.c
net/core/lwt_bpf.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/sock.c
net/dccp/ipv4.c
net/decnet/af_decnet.c
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/master.c
net/dsa/slave.c
net/dsa/tag_8021q.c
net/ieee802154/6lowpan/core.c
net/ipv4/datagram.c
net/ipv4/fib_frontend.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/addrconf_core.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/udp.c
net/l2tp/l2tp_eth.c
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_ovf.c
net/netfilter/ipvs/ip_vs_pe.c
net/netfilter/ipvs/ip_vs_sched.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_offload.c
net/netfilter/nft_payload.c
net/netrom/af_netrom.c
net/nfc/llcp_sock.c
net/openvswitch/datapath.c
net/openvswitch/vport-internal_dev.c
net/phonet/socket.c
net/qrtr/tun.c
net/rose/af_rose.c
net/rxrpc/ar-internal.h
net/rxrpc/recvmsg.c
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_ct.c
net/sched/act_ctinfo.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_mpls.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/act_tunnel_key.c
net/sched/act_vlan.c
net/sched/cls_bpf.c
net/sched/sch_generic.c
net/sched/sch_hhf.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_taprio.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc_core.c
net/smc/smc_pnet.c
net/sunrpc/backchannel_rqst.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/backchannel.c
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/link.c
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/node.c
net/tipc/node.h
net/tipc/socket.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/wireless/chan.c
net/wireless/nl80211.c
net/wireless/reg.h
net/wireless/util.c
net/xdp/xdp_umem.c
scripts/mod/modpost.c
scripts/mod/modpost.h
scripts/nsdeps
scripts/setlocalversion
security/lockdown/lockdown.c
sound/core/timer.c
sound/firewire/bebob/bebob_stream.c
sound/hda/hdac_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/max98373.c
sound/soc/codecs/msm8916-wcd-digital.c
sound/soc/codecs/rt5651.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/boards/sof_rt5682.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/samsung/arndale_rt5631.c
sound/soc/sh/rcar/core.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/sof/control.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/bdw.c
sound/soc/sof/intel/byt.c
sound/soc/sof/intel/hda-ctrl.c
sound/soc/sof/intel/hda-loader.c
sound/soc/sof/intel/hda-stream.c
sound/soc/sof/intel/hda.c
sound/soc/sof/intel/hda.h
sound/soc/sof/loader.c
sound/soc/sof/pcm.c
sound/soc/sof/topology.c
sound/soc/stm/stm32_sai_sub.c
sound/usb/quirks.c
sound/usb/validate.c
tools/arch/x86/include/uapi/asm/svm.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/sched.h
tools/perf/builtin-c2c.c
tools/perf/builtin-kmem.c
tools/perf/jvmti/Build
tools/perf/util/annotate.c
tools/perf/util/copyfile.c
tools/perf/util/evlist.c
tools/perf/util/header.c
tools/perf/util/util.c
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/test_tc_edt.sh
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/include/x86_64/vmx.h
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/kvm/x86_64/sync_regs_test.c
tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/l2tp.sh [changed mode: 0644->0755]
tools/testing/selftests/net/reuseport_dualstack.c
tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
virt/kvm/arm/pmu.c
virt/kvm/kvm_main.c

index edcac87..83d7e75 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -196,7 +196,8 @@ Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
 Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
 Patrick Mochel <mochel@digitalimplant.org>
-Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
+Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
+Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
 Peter A Jonsson <pj@ludd.ltu.se>
 Peter Oruba <peter@oruba.de>
 Peter Oruba <peter.oruba@amd.com>
@@ -229,6 +230,7 @@ Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
 Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
 Shuah Khan <shuah@kernel.org> <shuahkh@osg.samsung.com>
 Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
+Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
index 397118d..55db278 100644 (file)
@@ -51,6 +51,14 @@ Description:
                packet processing. See the network driver for the exact
                meaning of this value.
 
+What:          /sys/class/<iface>/statistics/rx_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of receive errors on this network device.
+               See the network driver for the exact meaning of this value.
+
 What:          /sys/class/<iface>/statistics/rx_fifo_errors
 Date:          April 2005
 KernelVersion: 2.6.12
@@ -88,6 +96,14 @@ Description:
                due to lack of capacity in the receive side. See the network
                driver for the exact meaning of this value.
 
+What:          /sys/class/<iface>/statistics/rx_nohandler
+Date:          February 2016
+KernelVersion: 4.6
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of received packets that were dropped on
+               an inactive device by the network core.
+
 What:          /sys/class/<iface>/statistics/rx_over_errors
 Date:          April 2005
 KernelVersion: 2.6.12
index ab7ed2f..5a09661 100644 (file)
@@ -91,6 +91,11 @@ stable kernels.
 | ARM            | MMU-500         | #841119,826419  | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
+| Broadcom       | Brahma-B53      | N/A             | ARM64_ERRATUM_845719        |
++----------------+-----------------+-----------------+-----------------------------+
+| Broadcom       | Brahma-B53      | N/A             | ARM64_ERRATUM_843419        |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX ITS    | #22375,24313    | CAVIUM_ERRATUM_22375        |
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144        |
@@ -126,7 +131,7 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | Kryo/Falkor v1  | E1003           | QCOM_FALKOR_ERRATUM_1003    |
 +----------------+-----------------+-----------------+-----------------------------+
-| Qualcomm Tech. | Falkor v1       | E1009           | QCOM_FALKOR_ERRATUM_1009    |
+| Qualcomm Tech. | Kryo/Falkor v1  | E1009           | QCOM_FALKOR_ERRATUM_1009    |
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | QDF2400 ITS     | E0065           | QCOM_QDF2400_ERRATUM_0065   |
 +----------------+-----------------+-----------------+-----------------------------+
index c82c5e5..9c7e703 100644 (file)
@@ -496,12 +496,12 @@ properties:
 
       - description: Theobroma Systems RK3368-uQ7 with Haikou baseboard
         items:
-          - const: tsd,rk3368-uq7-haikou
+          - const: tsd,rk3368-lion-haikou
           - const: rockchip,rk3368
 
       - description: Theobroma Systems RK3399-Q7 with Haikou baseboard
         items:
-          - const: tsd,rk3399-q7-haikou
+          - const: tsd,rk3399-puma-haikou
           - const: rockchip,rk3399
 
       - description: Tronsmart Orion R68 Meta
index f4c5d34..7079d44 100644 (file)
@@ -1,8 +1,11 @@
 * Advanced Interrupt Controller (AIC)
 
 Required properties:
-- compatible: Should be "atmel,<chip>-aic"
-  <chip> can be "at91rm9200", "sama5d2", "sama5d3" or "sama5d4"
+- compatible: Should be:
+    - "atmel,<chip>-aic" where  <chip> can be "at91rm9200", "sama5d2",
+      "sama5d3" or "sama5d4"
+    - "microchip,<chip>-aic" where <chip> can be "sam9x60"
+
 - interrupt-controller: Identifies the node as an interrupt controller.
 - #interrupt-cells: The number of cells to define the interrupts. It should be 3.
   The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).
index 27f38ee..d3e423f 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/arm/allwinner,sun4i-a10-csi.yaml#
+$id: http://devicetree.org/schemas/media/allwinner,sun4i-a10-csi.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Allwinner A10 CMOS Sensor Interface (CSI) Device Tree Bindings
@@ -27,14 +27,12 @@ properties:
   clocks:
     items:
       - description: The CSI interface clock
-      - description: The CSI module clock
       - description: The CSI ISP clock
       - description: The CSI DRAM clock
 
   clock-names:
     items:
       - const: bus
-      - const: mod
       - const: isp
       - const: ram
 
@@ -89,9 +87,8 @@ examples:
         compatible = "allwinner,sun7i-a20-csi0";
         reg = <0x01c09000 0x1000>;
         interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
-        clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI0>,
-                 <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
-        clock-names = "bus", "mod", "isp", "ram";
+        clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
+        clock-names = "bus", "isp", "ram";
         resets = <&ccu RST_CSI0>;
 
         port {
diff --git a/Documentation/devicetree/bindings/net/nfc/pn532.txt b/Documentation/devicetree/bindings/net/nfc/pn532.txt
new file mode 100644 (file)
index 0000000..a5507dc
--- /dev/null
@@ -0,0 +1,46 @@
+* NXP Semiconductors PN532 NFC Controller
+
+Required properties:
+- compatible: Should be
+    - "nxp,pn532" Place a node with this inside the devicetree node of the bus
+                  where the NFC chip is connected to.
+                  Currently the kernel has phy bindings for uart and i2c.
+    - "nxp,pn532-i2c" (DEPRECATED) only works for the i2c binding.
+    - "nxp,pn533-i2c" (DEPRECATED) only works for the i2c binding.
+
+Required properties if connected on i2c:
+- clock-frequency: I²C work frequency.
+- reg: for the I²C bus address. This is fixed at 0x24 for the PN532.
+- interrupts: GPIO interrupt to which the chip is connected
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBone with PN532 on I2C2):
+
+&i2c2 {
+
+
+       pn532: nfc@24 {
+
+               compatible = "nxp,pn532";
+
+               reg = <0x24>;
+               clock-frequency = <400000>;
+
+               interrupt-parent = <&gpio1>;
+               interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
+
+       };
+};
+
+Example (for PN532 connected via uart):
+
+uart4: serial@49042000 {
+        compatible = "ti,omap3-uart";
+
+        pn532: nfc {
+                compatible = "nxp,pn532";
+        };
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
deleted file mode 100644 (file)
index 2efe388..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-* NXP Semiconductors PN532 NFC Controller
-
-Required properties:
-- compatible: Should be "nxp,pn532-i2c" or "nxp,pn533-i2c".
-- clock-frequency: I²C work frequency.
-- reg: address on the bus
-- interrupts: GPIO interrupt to which the chip is connected
-
-Optional SoC Specific Properties:
-- pinctrl-names: Contains only one value - "default".
-- pintctrl-0: Specifies the pin control groups used for this controller.
-
-Example (for ARM-based BeagleBone with PN532 on I2C2):
-
-&i2c2 {
-
-
-       pn532: pn532@24 {
-
-               compatible = "nxp,pn532-i2c";
-
-               reg = <0x24>;
-               clock-frequency = <400000>;
-
-               interrupt-parent = <&gpio1>;
-               interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
-
-       };
-};
index f83d888..064b7df 100644 (file)
@@ -33,13 +33,13 @@ patternProperties:
           allOf:
             - $ref: "/schemas/types.yaml#/definitions/string"
             - enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
-              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, ESPI,
-              ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1,
-              GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2,
-              GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12,
-              I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7,
-              I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC,
-              LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
+              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC,
+              ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0,
+              GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
+              GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11,
+              I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6,
+              I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ,
+              LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
               MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2,
               NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3,
               NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1,
@@ -48,47 +48,45 @@ patternProperties:
               PWM8, PWM9, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
               RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12,
               SALT13, SALT14, SALT15, SALT16, SALT2, SALT3, SALT4, SALT5,
-              SALT6, SALT7, SALT8, SALT9, SD1, SD2, SD3, SD3DAT4, SD3DAT5,
-              SD3DAT6, SD3DAT7, SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO,
-              SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1,
-              SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11,
-              TACH12, TACH13, TACH14, TACH15, TACH2, TACH3, TACH4, TACH5,
-              TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2, THRU3, TXD1,
-              TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13, UART6, UART7,
-              UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
-              WDTRST4, ]
+              SALT6, SALT7, SALT8, SALT9, SD1, SD2, SGPM1, SGPS1, SIOONCTRL,
+              SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
+              SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
+              TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
+              TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
+              THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13,
+              UART6, UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2,
+              WDTRST3, WDTRST4, ]
         groups:
           allOf:
             - $ref: "/schemas/types.yaml#/definitions/string"
             - enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
-              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, ESPI,
-              ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP, GPIT0,
-              GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
-              GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1,
-              I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3,
-              I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6,
-              JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ,
-              MACLINK1, MACLINK2, MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3,
-              MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4,
-              NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1,
-              NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE,
-              PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1, PWM12G0, PWM12G1,
-              PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0, PWM15G1, PWM2, PWM3,
-              PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1, PWM9G0, PWM9G1, QSPI1,
-              QSPI2, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
-              RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10G0, SALT10G1,
-              SALT11G0, SALT11G1, SALT12G0, SALT12G1, SALT13G0, SALT13G1,
-              SALT14G0, SALT14G1, SALT15G0, SALT15G1, SALT16G0, SALT16G1,
-              SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8, SALT9G0,
-              SALT9G1, SD1, SD2, SD3, SD3DAT4, SD3DAT5, SD3DAT6, SD3DAT7,
-              SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD,
-              SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1, SPI1WP, SPI2,
-              SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11, TACH12, TACH13,
-              TACH14, TACH15, TACH2, TACH3, TACH4, TACH5, TACH6, TACH7, TACH8,
-              TACH9, THRU0, THRU1, THRU2, THRU3, TXD1, TXD2, TXD3, TXD4,
-              UART10, UART11, UART12G0, UART12G1, UART13G0, UART13G1, UART6,
-              UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
-              WDTRST4, ]
+              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1,
+              EMMCG4, EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID,
+              FWQSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5,
+              GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5, GPIU6,
+              GPIU7, HVI3C3, HVI3C4, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14,
+              I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9,
+              I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD,
+              LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2, MACLINK3, MACLINK4,
+              MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1,
+              NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2,
+              NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4,
+              OSCCLK, PEWAKE, PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1,
+              PWM12G0, PWM12G1, PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0,
+              PWM15G1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1,
+              PWM9G0, PWM9G1, QSPI1, QSPI2, RGMII1, RGMII2, RGMII3, RGMII4,
+              RMII1, RMII2, RMII3, RMII4, RXD1, RXD2, RXD3, RXD4, SALT1,
+              SALT10G0, SALT10G1, SALT11G0, SALT11G1, SALT12G0, SALT12G1,
+              SALT13G0, SALT13G1, SALT14G0, SALT14G1, SALT15G0, SALT15G1,
+              SALT16G0, SALT16G1, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7,
+              SALT8, SALT9G0, SALT9G1, SD1, SD2, SD3, SGPM1, SGPS1, SIOONCTRL,
+              SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
+              SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
+              TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
+              TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
+              THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12G0,
+              UART12G1, UART13G0, UART13G1, UART6, UART7, UART8, UART9, VB,
+              VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4, ]
 
 required:
   - compatible
index a78150c..f324169 100644 (file)
@@ -30,8 +30,8 @@ if:
 properties:
   compatible:
     enum:
-      - const: regulator-fixed
-      - const: regulator-fixed-clock
+      - regulator-fixed
+      - regulator-fixed-clock
 
   regulator-name: true
 
index b261a30..04819ad 100644 (file)
@@ -24,15 +24,17 @@ description: |
 
 properties:
   compatible:
-    items:
-      - enum:
-          - sifive,rocket0
-          - sifive,e5
-          - sifive,e51
-          - sifive,u54-mc
-          - sifive,u54
-          - sifive,u5
-      - const: riscv
+    oneOf:
+      - items:
+          - enum:
+              - sifive,rocket0
+              - sifive,e5
+              - sifive,e51
+              - sifive,u54-mc
+              - sifive,u54
+              - sifive,u5
+          - const: riscv
+      - const: riscv    # Simulator only
     description:
       Identifies that the hart uses the RISC-V instruction set
       and identifies the type of the hart.
@@ -66,12 +68,8 @@ properties:
       insensitive, letters in the riscv,isa string must be all
       lowercase to simplify parsing.
 
-  timebase-frequency:
-    type: integer
-    minimum: 1
-    description:
-      Specifies the clock frequency of the system timer in Hz.
-      This value is common to all harts on a single system image.
+  # RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
+  timebase-frequency: false
 
   interrupt-controller:
     type: object
@@ -93,7 +91,6 @@ properties:
 
 required:
   - riscv,isa
-  - timebase-frequency
   - interrupt-controller
 
 examples:
diff --git a/Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst b/Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst
new file mode 100644 (file)
index 0000000..51e6624
--- /dev/null
@@ -0,0 +1,191 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+=======================
+DPAA2 MAC / PHY support
+=======================
+
+:Copyright: |copy| 2019 NXP
+
+Overview
+--------
+
+The DPAA2 MAC / PHY support consists of a set of APIs that help DPAA2 network
+drivers (dpaa2-eth, dpaa2-ethsw) interract with the PHY library.
+
+DPAA2 Software Architecture
+---------------------------
+
+Among other DPAA2 objects, the fsl-mc bus exports DPNI objects (abstracting a
+network interface) and DPMAC objects (abstracting a MAC). The dpaa2-eth driver
+probes on the DPNI object and connects to and configures a DPMAC object with
+the help of phylink.
+
+Data connections may be established between a DPNI and a DPMAC, or between two
+DPNIs. Depending on the connection type, the netif_carrier_[on/off] is handled
+directly by the dpaa2-eth driver or by phylink.
+
+.. code-block:: none
+
+  Sources of abstracted link state information presented by the MC firmware
+
+                                               +--------------------------------------+
+  +------------+                  +---------+  |                           xgmac_mdio |
+  | net_device |                  | phylink |--|  +-----+  +-----+  +-----+  +-----+  |
+  +------------+                  +---------+  |  | PHY |  | PHY |  | PHY |  | PHY |  |
+        |                             |        |  +-----+  +-----+  +-----+  +-----+  |
+      +------------------------------------+   |                    External MDIO bus |
+      |            dpaa2-eth               |   +--------------------------------------+
+      +------------------------------------+
+        |                             |                                           Linux
+  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+        |                             |                                     MC firmware
+        |              /|             V
+  +----------+        / |       +----------+
+  |          |       /  |       |          |
+  |          |       |  |       |          |
+  |   DPNI   |<------|  |<------|   DPMAC  |
+  |          |       |  |       |          |
+  |          |       \  |<---+  |          |
+  +----------+        \ |    |  +----------+
+                       \|    |
+                             |
+           +--------------------------------------+
+           | MC firmware polling MAC PCS for link |
+           |  +-----+  +-----+  +-----+  +-----+  |
+           |  | PCS |  | PCS |  | PCS |  | PCS |  |
+           |  +-----+  +-----+  +-----+  +-----+  |
+           |                    Internal MDIO bus |
+           +--------------------------------------+
+
+
+Depending on an MC firmware configuration setting, each MAC may be in one of two modes:
+
+- DPMAC_LINK_TYPE_FIXED: the link state management is handled exclusively by
+  the MC firmware by polling the MAC PCS. Without the need to register a
+  phylink instance, the dpaa2-eth driver will not bind to the connected dpmac
+  object at all.
+
+- DPMAC_LINK_TYPE_PHY: The MC firmware is left waiting for link state update
+  events, but those are in fact passed strictly between the dpaa2-mac (based on
+  phylink) and its attached net_device driver (dpaa2-eth, dpaa2-ethsw),
+  effectively bypassing the firmware.
+
+Implementation
+--------------
+
+At probe time or when a DPNI's endpoint is dynamically changed, the dpaa2-eth
+is responsible to find out if the peer object is a DPMAC and if this is the
+case, to integrate it with PHYLINK using the dpaa2_mac_connect() API, which
+will do the following:
+
+ - look up the device tree for PHYLINK-compatible of binding (phy-handle)
+ - will create a PHYLINK instance associated with the received net_device
+ - connect to the PHY using phylink_of_phy_connect()
+
+The following phylink_mac_ops callback are implemented:
+
+ - .validate() will populate the supported linkmodes with the MAC capabilities
+   only when the phy_interface_t is RGMII_* (at the moment, this is the only
+   link type supported by the driver).
+
+ - .mac_config() will configure the MAC in the new configuration using the
+   dpmac_set_link_state() MC firmware API.
+
+ - .mac_link_up() / .mac_link_down() will update the MAC link using the same
+   API described above.
+
+At driver unbind() or when the DPNI object is disconnected from the DPMAC, the
+dpaa2-eth driver calls dpaa2_mac_disconnect() which will, in turn, disconnect
+from the PHY and destroy the PHYLINK instance.
+
+In case of a DPNI-DPMAC connection, an 'ip link set dev eth0 up' would start
+the following sequence of operations:
+
+(1) phylink_start() called from .dev_open().
+(2) The .mac_config() and .mac_link_up() callbacks are called by PHYLINK.
+(3) In order to configure the HW MAC, the MC Firmware API
+    dpmac_set_link_state() is called.
+(4) The firmware will eventually setup the HW MAC in the new configuration.
+(5) A netif_carrier_on() call is made directly from PHYLINK on the associated
+    net_device.
+(6) The dpaa2-eth driver handles the LINK_STATE_CHANGE irq in order to
+    enable/disable Rx taildrop based on the pause frame settings.
+
+.. code-block:: none
+
+  +---------+               +---------+
+  | PHYLINK |-------------->|  eth0   |
+  +---------+           (5) +---------+
+  (1) ^  |
+      |  |
+      |  v (2)
+  +-----------------------------------+
+  |             dpaa2-eth             |
+  +-----------------------------------+
+         |                    ^ (6)
+         |                    |
+         v (3)                |
+  +---------+---------------+---------+
+  |  DPMAC  |               |  DPNI   |
+  +---------+               +---------+
+  |            MC Firmware            |
+  +-----------------------------------+
+         |
+         |
+         v (4)
+  +-----------------------------------+
+  |             HW MAC                |
+  +-----------------------------------+
+
+In case of a DPNI-DPNI connection, a usual sequence of operations looks like
+the following:
+
+(1) ip link set dev eth0 up
+(2) The dpni_enable() MC API called on the associated fsl_mc_device.
+(3) ip link set dev eth1 up
+(4) The dpni_enable() MC API called on the associated fsl_mc_device.
+(5) The LINK_STATE_CHANGED irq is received by both instances of the dpaa2-eth
+    driver because now the operational link state is up.
+(6) The netif_carrier_on() is called on the exported net_device from
+    link_state_update().
+
+.. code-block:: none
+
+  +---------+               +---------+
+  |  eth0   |               |  eth1   |
+  +---------+               +---------+
+      |  ^                     ^  |
+      |  |                     |  |
+  (1) v  | (6)             (6) |  v (3)
+  +---------+               +---------+
+  |dpaa2-eth|               |dpaa2-eth|
+  +---------+               +---------+
+      |  ^                     ^  |
+      |  |                     |  |
+  (2) v  | (5)             (5) |  v (4)
+  +---------+---------------+---------+
+  |  DPNI   |               |  DPNI   |
+  +---------+               +---------+
+  |            MC Firmware            |
+  +-----------------------------------+
+
+
+Exported API
+------------
+
+Any DPAA2 driver that drivers endpoints of DPMAC objects should service its
+_EVENT_ENDPOINT_CHANGED irq and connect/disconnect from the associated DPMAC
+when necessary using the below listed API::
+
+ - int dpaa2_mac_connect(struct dpaa2_mac *mac);
+ - void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+
+A phylink integration is necessary only when the partner DPMAC is not of TYPE_FIXED.
+One can check for this condition using the below API::
+
+ - bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,struct fsl_mc_io *mc_io);
+
+Before connection to a MAC, the caller must allocate and populate the
+dpaa2_mac structure with the associated net_device, a pointer to the MC portal
+to be used and the actual fsl_mc_device structure of the DPMAC.
index 2b9f488..caf023c 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==============================================================
-Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
-==============================================================
+=============================================================
+Linux Base Driver for the Intel(R) PRO/100 Family of Adapters
+=============================================================
 
 June 1, 2018
 
@@ -21,7 +21,7 @@ Contents
 In This Release
 ===============
 
-This file describes the Linux* Base Driver for the Intel(R) PRO/100 Family of
+This file describes the Linux Base Driver for the Intel(R) PRO/100 Family of
 Adapters. This driver includes support for Itanium(R)2-based systems.
 
 For questions related to hardware requirements, refer to the documentation
@@ -138,9 +138,9 @@ version 1.6 or later is required for this functionality.
 The latest release of ethtool can be found from
 https://www.kernel.org/pub/software/network/ethtool/
 
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is provided through the ethtool* utility.  For instructions on
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is provided through the ethtool utility.  For instructions on
 enabling WoL with ethtool, refer to the ethtool man page.  WoL will be
 enabled on the system during the next shut down or reboot.  For this
 driver version, in order to enable WoL, the e100 driver must be loaded
index 956560b..4aaae0f 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-===========================================================
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
+==========================================================
+Linux Base Driver for Intel(R) Ethernet Network Connection
+==========================================================
 
 Intel Gigabit Linux driver.
 Copyright(c) 1999 - 2013 Intel Corporation.
@@ -438,10 +438,10 @@ ethtool
   The latest release of ethtool can be found from
   https://www.kernel.org/pub/software/network/ethtool/
 
-Enabling Wake on LAN* (WoL)
----------------------------
+Enabling Wake on LAN (WoL)
+--------------------------
 
-  WoL is configured through the ethtool* utility.
+  WoL is configured through the ethtool utility.
 
   WoL will be enabled on the system during the next shut down or reboot.
   For this driver version, in order to enable WoL, the e1000 driver must be
index 01999f0..f49cd37 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-======================================================
-Linux* Driver for Intel(R) Ethernet Network Connection
-======================================================
+=====================================================
+Linux Driver for Intel(R) Ethernet Network Connection
+=====================================================
 
 Intel Gigabit Linux driver.
 Copyright(c) 2008-2018 Intel Corporation.
@@ -338,7 +338,7 @@ and higher cannot be forced. Use the autonegotiation advertising setting to
 manually set devices for 1 Gbps and higher.
 
 Speed, duplex, and autonegotiation advertising are configured through the
-ethtool* utility.
+ethtool utility.
 
 Caution: Only experienced network administrators should force speed and duplex
 or change autonegotiation advertising manually. The settings at the switch must
@@ -351,9 +351,9 @@ will not attempt to auto-negotiate with its link partner since those adapters
 operate only in full duplex and only at their native speed.
 
 
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is configured through the ethtool* utility.
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is configured through the ethtool utility.
 
 WoL will be enabled on the system during the next shut down or reboot. For
 this driver version, in order to enable WoL, the e1000e driver must be loaded
index ac3269e..4d279e6 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==============================================================
-Linux* Base Driver for Intel(R) Ethernet Multi-host Controller
-==============================================================
+=============================================================
+Linux Base Driver for Intel(R) Ethernet Multi-host Controller
+=============================================================
 
 August 20, 2018
 Copyright(c) 2015-2018 Intel Corporation.
@@ -120,8 +120,8 @@ rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 m|v|t|s|d|f|n|r
 Known Issues/Troubleshooting
 ============================
 
-Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS under Linux KVM
----------------------------------------------------------------------------------------
+Enabling SR-IOV in a 64-bit Microsoft Windows Server 2012/R2 guest OS under Linux KVM
+-------------------------------------------------------------------------------------
 KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
 includes traditional PCIe devices, as well as SR-IOV-capable devices based on
 the Intel Ethernet Controller XL710.
index 848fd38..8a9b185 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==================================================================
-Linux* Base Driver for the Intel(R) Ethernet Controller 700 Series
-==================================================================
+=================================================================
+Linux Base Driver for the Intel(R) Ethernet Controller 700 Series
+=================================================================
 
 Intel 40 Gigabit Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
@@ -384,7 +384,7 @@ NOTE: You cannot set the speed for devices based on the Intel(R) Ethernet
 Network Adapter XXV710 based devices.
 
 Speed, duplex, and autonegotiation advertising are configured through the
-ethtool* utility.
+ethtool utility.
 
 Caution: Only experienced network administrators should force speed and duplex
 or change autonegotiation advertising manually. The settings at the switch must
index cfc0884..84ac7e7 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==================================================================
-Linux* Base Driver for Intel(R) Ethernet Adaptive Virtual Function
-==================================================================
+=================================================================
+Linux Base Driver for Intel(R) Ethernet Adaptive Virtual Function
+=================================================================
 
 Intel Ethernet Adaptive Virtual Function Linux driver.
 Copyright(c) 2013-2018 Intel Corporation.
@@ -19,7 +19,7 @@ Contents
 Overview
 ========
 
-This file describes the iavf Linux* Base Driver. This driver was formerly
+This file describes the iavf Linux Base Driver. This driver was formerly
 called i40evf.
 
 The iavf driver supports the below mentioned virtual function devices and
index c220aa2..ee43ea5 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-===================================================================
-Linux* Base Driver for the Intel(R) Ethernet Connection E800 Series
-===================================================================
+==================================================================
+Linux Base Driver for the Intel(R) Ethernet Connection E800 Series
+==================================================================
 
 Intel ice Linux driver.
 Copyright(c) 2018 Intel Corporation.
index fc8cfaa..87e560f 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-===========================================================
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
+==========================================================
+Linux Base Driver for Intel(R) Ethernet Network Connection
+==========================================================
 
 Intel Gigabit Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
@@ -129,9 +129,9 @@ version is required for this functionality. Download it at:
 https://www.kernel.org/pub/software/network/ethtool/
 
 
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is configured through the ethtool* utility.
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is configured through the ethtool utility.
 
 WoL will be enabled on the system during the next shut down or reboot. For
 this driver version, in order to enable WoL, the igb driver must be loaded
index 9cddabe..557fc02 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-============================================================
-Linux* Base Virtual Function Driver for Intel(R) 1G Ethernet
-============================================================
+===========================================================
+Linux Base Virtual Function Driver for Intel(R) 1G Ethernet
+===========================================================
 
 Intel Gigabit Virtual Function Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
index c7d2548..f1d5233 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-=============================================================================
-Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
-=============================================================================
+===========================================================================
+Linux Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
+===========================================================================
 
 Intel 10 Gigabit Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
@@ -519,8 +519,8 @@ The offload is also supported for ixgbe's VFs, but the VF must be set as
 Known Issues/Troubleshooting
 ============================
 
-Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS
------------------------------------------------------------------------
+Enabling SR-IOV in a 64-bit Microsoft Windows Server 2012/R2 guest OS
+---------------------------------------------------------------------
 Linux KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM.
 This includes traditional PCIe devices, as well as SR-IOV-capable devices based
 on the Intel Ethernet Controller XL710.
index 5d49773..76bbde7 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-=============================================================
-Linux* Base Virtual Function Driver for Intel(R) 10G Ethernet
-=============================================================
+============================================================
+Linux Base Virtual Function Driver for Intel(R) 10G Ethernet
+============================================================
 
 Intel 10 Gigabit Virtual Function Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
index 1393589..c17d680 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==========================================================
-Linux* Driver for the Pensando(R) Ethernet adapter family
-==========================================================
+========================================================
+Linux Driver for the Pensando(R) Ethernet adapter family
+========================================================
 
 Pensando Linux Ethernet driver.
 Copyright(c) 2019 Pensando Systems, Inc
diff --git a/Documentation/networking/devlink-params-mv88e6xxx.txt b/Documentation/networking/devlink-params-mv88e6xxx.txt
new file mode 100644 (file)
index 0000000..21c4b35
--- /dev/null
@@ -0,0 +1,7 @@
+ATU_hash               [DEVICE, DRIVER-SPECIFIC]
+                       Select one of four possible hashing algorithms for
+                       MAC addresses in the Address Translation Unit.
+                       A value of 3 seems to work better than the default of
+                       1 when many MAC addresses have the same OUI.
+                       Configuration mode: runtime
+                       Type: u8. 0-3 valid.
index 49e95f4..8d4ad1d 100644 (file)
@@ -207,8 +207,8 @@ TCP variables:
 
 somaxconn - INTEGER
        Limit of socket listen() backlog, known in userspace as SOMAXCONN.
-       Defaults to 128.  See also tcp_max_syn_backlog for additional tuning
-       for TCP sockets.
+       Defaults to 4096. (Was 128 before linux-5.4)
+       See also tcp_max_syn_backlog for additional tuning for TCP sockets.
 
 tcp_abort_on_overflow - BOOLEAN
        If listening service is too slow to accept new connections,
@@ -408,11 +408,14 @@ tcp_max_orphans - INTEGER
        up to ~64K of unswappable memory.
 
 tcp_max_syn_backlog - INTEGER
-       Maximal number of remembered connection requests, which have not
-       received an acknowledgment from connecting client.
+       Maximal number of remembered connection requests (SYN_RECV),
+       which have not received an acknowledgment from connecting client.
+       This is a per-listener limit.
        The minimal value is 128 for low memory machines, and it will
        increase in proportion to the memory of machine.
        If server suffers from overload, try increasing this number.
+       Remember to also check /proc/sys/net/core/somaxconn
+       A SYN_RECV request socket consumes about 304 bytes of memory.
 
 tcp_max_tw_buckets - INTEGER
        Maximal number of timewait sockets held by system simultaneously.
index 7fc0746..c0024b2 100644 (file)
@@ -2330,11 +2330,13 @@ F:      drivers/edac/altera_edac.
 
 ARM/SPREADTRUM SoC SUPPORT
 M:     Orson Zhai <orsonzhai@gmail.com>
-M:     Baolin Wang <baolin.wang@linaro.org>
+M:     Baolin Wang <baolin.wang7@gmail.com>
 M:     Chunyan Zhang <zhang.lyra@gmail.com>
 S:     Maintained
 F:     arch/arm64/boot/dts/sprd
 N:     sprd
+N:     sc27xx
+N:     sc2731
 
 ARM/STI ARCHITECTURE
 M:     Patrice Chotard <patrice.chotard@st.com>
@@ -3103,7 +3105,7 @@ S:        Supported
 F:     arch/arm64/net/
 
 BPF JIT for MIPS (32-BIT AND 64-BIT)
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Maintained
@@ -3190,7 +3192,7 @@ N:        bcm216*
 N:     kona
 F:     arch/arm/mach-bcm/
 
-BROADCOM BCM2835 ARM ARCHITECTURE
+BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
 M:     Eric Anholt <eric@anholt.net>
 M:     Stefan Wahren <wahrenst@gmx.net>
 L:     bcm-kernel-feedback-list@broadcom.com
@@ -3198,6 +3200,7 @@ L:        linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://github.com/anholt/linux
 S:     Maintained
+N:     bcm2711
 N:     bcm2835
 F:     drivers/staging/vc04_services
 
@@ -3244,8 +3247,6 @@ S:        Maintained
 F:     drivers/usb/gadget/udc/bcm63xx_udc.*
 
 BROADCOM BCM7XXX ARM ARCHITECTURE
-M:     Brian Norris <computersforpeace@gmail.com>
-M:     Gregory Fong <gregory.0xf0@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     bcm-kernel-feedback-list@broadcom.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -5053,10 +5054,14 @@ M:      Ioana Radulescu <ruxandra.radulescu@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/freescale/dpaa2/dpaa2-eth*
+F:     drivers/net/ethernet/freescale/dpaa2/dpaa2-mac*
 F:     drivers/net/ethernet/freescale/dpaa2/dpni*
+F:     drivers/net/ethernet/freescale/dpaa2/dpmac*
 F:     drivers/net/ethernet/freescale/dpaa2/dpkg.h
 F:     drivers/net/ethernet/freescale/dpaa2/Makefile
 F:     drivers/net/ethernet/freescale/dpaa2/Kconfig
+F:     Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst
+F:     Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst
 
 DPAA2 ETHERNET SWITCH DRIVER
 M:     Ioana Radulescu <ruxandra.radulescu@nxp.com>
@@ -7450,8 +7455,8 @@ F:        drivers/platform/x86/tc1100-wmi.c
 
 HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
 M:     Jaroslav Kysela <perex@perex.cz>
-S:     Maintained
-F:     drivers/net/ethernet/hp/hp100.*
+S:     Obsolete
+F:     drivers/staging/hp/hp100.*
 
 HPET:  High Precision Event Timers driver
 M:     Clemens Ladisch <clemens@ladisch.de>
@@ -8008,7 +8013,7 @@ S:        Maintained
 F:     drivers/usb/atm/ueagle-atm.c
 
 IMGTEC ASCII LCD DRIVER
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 S:     Maintained
 F:     Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
 F:     drivers/auxdisplay/img-ascii-lcd.c
@@ -9744,6 +9749,7 @@ S:        Maintained
 F:     drivers/net/dsa/mv88e6xxx/
 F:     include/linux/platform_data/mv88e6xxx.h
 F:     Documentation/devicetree/bindings/net/dsa/marvell.txt
+F:     Documentation/networking/devlink-params-mv88e6xxx.txt
 
 MARVELL ARMADA DRM SUPPORT
 M:     Russell King <linux@armlinux.org.uk>
@@ -10835,7 +10841,7 @@ F:      drivers/usb/image/microtek.*
 
 MIPS
 M:     Ralf Baechle <ralf@linux-mips.org>
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 M:     James Hogan <jhogan@kernel.org>
 L:     linux-mips@vger.kernel.org
 W:     http://www.linux-mips.org/
@@ -10849,7 +10855,7 @@ F:      arch/mips/
 F:     drivers/platform/mips/
 
 MIPS BOSTON DEVELOPMENT BOARD
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/clock/img,boston-clock.txt
@@ -10859,7 +10865,7 @@ F:      drivers/clk/imgtec/clk-boston.c
 F:     include/dt-bindings/clock/boston-clock.h
 
 MIPS GENERIC PLATFORM
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 L:     linux-mips@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/power/mti,mips-cpc.txt
@@ -11414,7 +11420,6 @@ F:      include/trace/events/tcp.h
 NETWORKING [TLS]
 M:     Boris Pismenny <borisp@mellanox.com>
 M:     Aviad Yehezkel <aviadye@mellanox.com>
-M:     Dave Watson <davejwatson@fb.com>
 M:     John Fastabend <john.fastabend@gmail.com>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     Jakub Kicinski <jakub.kicinski@netronome.com>
@@ -13912,7 +13917,7 @@ F:      drivers/mtd/nand/raw/r852.h
 
 RISC-V ARCHITECTURE
 M:     Paul Walmsley <paul.walmsley@sifive.com>
-M:     Palmer Dabbelt <palmer@sifive.com>
+M:     Palmer Dabbelt <palmer@dabbelt.com>
 M:     Albert Ou <aou@eecs.berkeley.edu>
 L:     linux-riscv@lists.infradead.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
@@ -14789,7 +14794,7 @@ F:      drivers/media/usb/siano/
 F:     drivers/media/mmc/siano/
 
 SIFIVE DRIVERS
-M:     Palmer Dabbelt <palmer@sifive.com>
+M:     Palmer Dabbelt <palmer@dabbelt.com>
 M:     Paul Walmsley <paul.walmsley@sifive.com>
 L:     linux-riscv@lists.infradead.org
 T:     git git://github.com/sifive/riscv-linux.git
@@ -14799,7 +14804,7 @@ N:      sifive
 
 SIFIVE FU540 SYSTEM-ON-CHIP
 M:     Paul Walmsley <paul.walmsley@sifive.com>
-M:     Palmer Dabbelt <palmer@sifive.com>
+M:     Palmer Dabbelt <palmer@dabbelt.com>
 L:     linux-riscv@lists.infradead.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pjw/sifive.git
 S:     Supported
index ffd7a91..79be70b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
 VERSION = 5
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
-NAME = Nesting Opossum
+EXTRAVERSION = -rc5
+NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -1037,7 +1037,7 @@ export KBUILD_VMLINUX_OBJS := $(head-y) $(init-y) $(core-y) $(libs-y2) \
 export KBUILD_VMLINUX_LIBS := $(libs-y1)
 export KBUILD_LDS          := arch/$(SRCARCH)/kernel/vmlinux.lds
 export LDFLAGS_vmlinux
-# used by scripts/package/Makefile
+# used by scripts/Makefile.package
 export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) LICENSES arch include scripts tools)
 
 vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)
index bfc7f5f..9acbeba 100644 (file)
                clock-frequency = <33333333>;
        };
 
+       reg_5v0: regulator-5v0 {
+               compatible = "regulator-fixed";
+
+               regulator-name = "5v0-supply";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
        cpu_intc: cpu-interrupt-controller {
                compatible = "snps,archs-intc";
                interrupt-controller;
                        clocks = <&input_clk>;
                        cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
                                   <&creg_gpio 1 GPIO_ACTIVE_LOW>;
+
+                       spi-flash@0 {
+                               compatible = "sst26wf016b", "jedec,spi-nor";
+                               reg = <0>;
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               spi-max-frequency = <4000000>;
+                       };
+
+                       adc@1 {
+                               compatible = "ti,adc108s102";
+                               reg = <1>;
+                               vref-supply = <&reg_5v0>;
+                               spi-max-frequency = <1000000>;
+                       };
                };
 
                creg_gpio: gpio@14b0 {
index 9b9a744..0974226 100644 (file)
@@ -32,6 +32,8 @@ CONFIG_INET=y
 CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_SPI_NOR=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
@@ -55,6 +57,8 @@ CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_SNPS_CREG=y
 # CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_DRM=y
 # CONFIG_DRM_FBDEV_EMULATION is not set
 CONFIG_DRM_UDL=y
@@ -72,6 +76,8 @@ CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_DW=y
 CONFIG_DMADEVICES=y
 CONFIG_DW_AXI_DMAC=y
+CONFIG_IIO=y
+CONFIG_TI_ADC108S102=y
 CONFIG_EXT3_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
index 861a8ae..661fd84 100644 (file)
@@ -614,8 +614,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
        /* loop thru all available h/w condition indexes */
        for (i = 0; i < cc_bcr.c; i++) {
                write_aux_reg(ARC_REG_CC_INDEX, i);
-               cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
-               cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
+               cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
+               cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
 
                arc_pmu_map_hw_event(i, cc_name.str);
                arc_pmu_add_raw_event_attr(i, cc_name.str);
index 883fb85..1b4b2b0 100644 (file)
                reg = <0x70>;
                #address-cells = <1>;
                #size-cells = <0>;
+               i2c-mux-idle-disconnect;
 
                i2c@0 {
                        /* FMC A */
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <0>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@1 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <1>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@2 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <2>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@3 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <3>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@4 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <4>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@5 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <5>;
-                       i2c-mux-idle-disconnect;
 
                        ina230@40 { compatible = "ti,ina230"; reg = <0x40>; shunt-resistor = <5000>; };
                        ina230@41 { compatible = "ti,ina230"; reg = <0x41>; shunt-resistor = <5000>; };
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <6>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@7 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <7>;
-                       i2c-mux-idle-disconnect;
 
                        u41: pca9575@20 {
                                compatible = "nxp,pca9575";
index 09a088f..b75af21 100644 (file)
        #address-cells = <1>;
        #size-cells = <0>;
        pinctrl-0 = <&emmc_gpio34 &gpclk2_gpio43>;
+       bus-width = <4>;
        mmc-pwrseq = <&wifi_pwrseq>;
        non-removable;
        status = "okay";
index 7c3cb7e..925cb37 100644 (file)
@@ -9,6 +9,14 @@
                reg = <0 0x40000000>;
        };
 
+       leds {
+               /*
+                * Since there is no upstream GPIO driver yet,
+                * remove the incomplete node.
+                */
+               /delete-node/ act;
+       };
+
        reg_3v3: fixed-regulator {
                compatible = "regulator-fixed";
                regulator-name = "3V3";
index 7ceae35..547fb14 100644 (file)
        vin-supply = <&sw1c_reg>;
 };
 
+&snvs_poweroff {
+       status = "okay";
+};
+
 &iomuxc {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_hog>;
index 710f850..e2e604d 100644 (file)
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x302d0000 0x10000>;
                                interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
                                         <&clks IMX7D_GPT1_ROOT_CLK>;
                                clock-names = "ipg", "per";
                        };
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x302e0000 0x10000>;
                                interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
                                         <&clks IMX7D_GPT2_ROOT_CLK>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x302f0000 0x10000>;
                                interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
                                         <&clks IMX7D_GPT3_ROOT_CLK>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x30300000 0x10000>;
                                interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
                                         <&clks IMX7D_GPT4_ROOT_CLK>;
                                clock-names = "ipg", "per";
                                status = "disabled";
index 3fdd0a7..506b118 100644 (file)
 &twl_gpio {
        ti,use-leds;
 };
+
+&twl_keypad {
+       status = "disabled";
+};
index 4454449..a40fe8d 100644 (file)
                compatible = "ti,wl1285", "ti,wl1283";
                reg = <2>;
                /* gpio_100 with gpmc_wait2 pad as wakeirq */
-               interrupts-extended = <&gpio4 4 IRQ_TYPE_EDGE_RISING>,
+               interrupts-extended = <&gpio4 4 IRQ_TYPE_LEVEL_HIGH>,
                                      <&omap4_pmx_core 0x4e>;
                interrupt-names = "irq", "wakeup";
                ref-clock-frequency = <26000000>;
index 14be2ec..55ea8b6 100644 (file)
                compatible = "ti,wl1271";
                reg = <2>;
                /* gpio_53 with gpmc_ncs3 pad as wakeup */
-               interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_RISING>,
+               interrupts-extended = <&gpio2 21 IRQ_TYPE_LEVEL_HIGH>,
                                      <&omap4_pmx_core 0x3a>;
                interrupt-names = "irq", "wakeup";
                ref-clock-frequency = <38400000>;
index 3c27496..91480ac 100644 (file)
                compatible = "ti,wl1281";
                reg = <2>;
                interrupt-parent = <&gpio1>;
-               interrupts = <21 IRQ_TYPE_EDGE_RISING>; /* gpio 53 */
+               interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* gpio 53 */
                ref-clock-frequency = <26000000>;
                tcxo-clock-frequency = <26000000>;
        };
index 6dbbc9b..d003221 100644 (file)
@@ -69,7 +69,7 @@
                compatible = "ti,wl1271";
                reg = <2>;
                interrupt-parent = <&gpio2>;
-               interrupts = <9 IRQ_TYPE_EDGE_RISING>; /* gpio 41 */
+               interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; /* gpio 41 */
                ref-clock-frequency = <38400000>;
        };
 };
index 7fff555..68ac046 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&wlcore_irq_pin>;
                interrupt-parent = <&gpio1>;
-               interrupts = <14 IRQ_TYPE_EDGE_RISING>; /* gpio 14 */
+               interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;  /* gpio 14 */
                ref-clock-frequency = <26000000>;
        };
 };
index fac2e57..4791834 100644 (file)
                };
        };
 
-       gpu_cm: clock-controller@1500 {
+       gpu_cm: gpu_cm@1500 {
                compatible = "ti,omap4-cm";
                reg = <0x1500 0x100>;
                #address-cells = <1>;
index e4a0d51..0a3a7d6 100644 (file)
                                                 <STM32_PINMUX('F', 6, AF9)>; /* QSPI_BK1_IO3 */
                                        bias-disable;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                                pins2 {
                                        pinmux = <STM32_PINMUX('B', 6, AF10)>; /* QSPI_BK1_NCS */
                                        bias-pull-up;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                        };
 
                                                 <STM32_PINMUX('G', 7, AF11)>; /* QSPI_BK2_IO3 */
                                        bias-disable;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                                pins2 {
                                        pinmux = <STM32_PINMUX('C', 0, AF10)>; /* QSPI_BK2_NCS */
                                        bias-pull-up;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                        };
 
index 874231b..8aebefd 100644 (file)
                        compatible = "allwinner,sun7i-a20-csi0";
                        reg = <0x01c09000 0x1000>;
                        interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI0>,
-                                <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
-                       clock-names = "bus", "mod", "isp", "ram";
+                       clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
+                       clock-names = "bus", "isp", "ram";
                        resets = <&ccu RST_CSI0>;
                        status = "disabled";
                };
index dc8a5f3..c8ebb23 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
                reg = <0x70>;
+               i2c-mux-idle-disconnect;
 
                sff0_i2c: i2c@1 {
                        #address-cells = <1>;
                reg = <0x71>;
                #address-cells = <1>;
                #size-cells = <0>;
+               i2c-mux-idle-disconnect;
 
                sff5_i2c: i2c@1 {
                        #address-cells = <1>;
index 01e3c0f..231f897 100644 (file)
@@ -167,6 +167,7 @@ CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_DA8XX=y
 CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_GPIO=m
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
index 9bfffbe..0f7381e 100644 (file)
@@ -276,6 +276,7 @@ CONFIG_VIDEO_OV5640=m
 CONFIG_VIDEO_OV5645=m
 CONFIG_IMX_IPUV3_CORE=y
 CONFIG_DRM=y
+CONFIG_DRM_MSM=y
 CONFIG_DRM_PANEL_LVDS=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
index d3f5097..40d7f1a 100644 (file)
@@ -356,15 +356,15 @@ CONFIG_DRM_OMAP_CONNECTOR_HDMI=m
 CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV=m
 CONFIG_DRM_OMAP_PANEL_DPI=m
 CONFIG_DRM_OMAP_PANEL_DSI_CM=m
-CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM=m
-CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02=m
-CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01=m
-CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1=m
-CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m
-CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m
 CONFIG_DRM_TILCDC=m
 CONFIG_DRM_PANEL_SIMPLE=m
 CONFIG_DRM_TI_TFP410=m
+CONFIG_DRM_PANEL_LG_LB035Q02=m
+CONFIG_DRM_PANEL_NEC_NL8048HL11=m
+CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m
+CONFIG_DRM_PANEL_SONY_ACX565AKM=m
+CONFIG_DRM_PANEL_TPO_TD028TTEC1=m
+CONFIG_DRM_PANEL_TPO_TD043MTEA1=m
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
index 567dbed..f1d0a78 100644 (file)
@@ -82,7 +82,7 @@
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_CPU_CP15_MMU
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
 {
        unsigned int domain;
 
@@ -94,7 +94,7 @@ static inline unsigned int get_domain(void)
        return domain;
 }
 
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
 {
        asm volatile(
        "mcr    p15, 0, %0, c3, c0      @ set domain"
@@ -102,12 +102,12 @@ static inline void set_domain(unsigned val)
        isb();
 }
 #else
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
 {
        return 0;
 }
 
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
 {
 }
 #endif
index 303248e..98c6b91 100644 (file)
@@ -22,7 +22,7 @@
  * perform such accesses (eg, via list poison values) which could then
  * be exploited for priviledge escalation.
  */
-static inline unsigned int uaccess_save_and_enable(void)
+static __always_inline unsigned int uaccess_save_and_enable(void)
 {
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
        unsigned int old_domain = get_domain();
@@ -37,7 +37,7 @@ static inline unsigned int uaccess_save_and_enable(void)
 #endif
 }
 
-static inline void uaccess_restore(unsigned int flags)
+static __always_inline void uaccess_restore(unsigned int flags)
 {
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
        /* Restore the user access mask */
index a7810be..4a39828 100644 (file)
@@ -68,7 +68,7 @@ ENDPROC(__vet_atags)
  * The following fragment of code is executed with the MMU on in MMU mode,
  * and uses absolute addresses; this is not position independent.
  *
- *  r0  = cp#15 control register
+ *  r0  = cp#15 control register (exc_ret for M-class)
  *  r1  = machine ID
  *  r2  = atags/dtb pointer
  *  r9  = processor ID
@@ -137,7 +137,8 @@ __mmap_switched_data:
 #ifdef CONFIG_CPU_CP15
        .long   cr_alignment                    @ r3
 #else
-       .long   0                               @ r3
+M_CLASS(.long  exc_ret)                        @ r3
+AR_CLASS(.long 0)                              @ r3
 #endif
        .size   __mmap_switched_data, . - __mmap_switched_data
 
index afa350f..0fc814b 100644 (file)
@@ -201,6 +201,8 @@ M_CLASS(streq       r3, [r12, #PMSAv8_MAIR1])
        bic     r0, r0, #V7M_SCB_CCR_IC
 #endif
        str     r0, [r12, V7M_SCB_CCR]
+       /* Pass exc_ret to __mmap_switched */
+       mov     r0, r10
 #endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
        ret     lr
 ENDPROC(__after_proc_init)
index 8062412..9fc5c73 100644 (file)
@@ -462,8 +462,8 @@ static s8 dm365_queue_priority_mapping[][2] = {
 };
 
 static const struct dma_slave_map dm365_edma_map[] = {
-       { "davinci-mcbsp.0", "tx", EDMA_FILTER_PARAM(0, 2) },
-       { "davinci-mcbsp.0", "rx", EDMA_FILTER_PARAM(0, 3) },
+       { "davinci-mcbsp", "tx", EDMA_FILTER_PARAM(0, 2) },
+       { "davinci-mcbsp", "rx", EDMA_FILTER_PARAM(0, 3) },
        { "davinci_voicecodec", "tx", EDMA_FILTER_PARAM(0, 2) },
        { "davinci_voicecodec", "rx", EDMA_FILTER_PARAM(0, 3) },
        { "spi_davinci.2", "tx", EDMA_FILTER_PARAM(0, 10) },
index d942a33..2efd18e 100644 (file)
@@ -89,6 +89,13 @@ static struct iommu_platform_data omap3_iommu_pdata = {
        .reset_name = "mmu",
        .assert_reset = omap_device_assert_hardreset,
        .deassert_reset = omap_device_deassert_hardreset,
+       .device_enable = omap_device_enable,
+       .device_idle = omap_device_idle,
+};
+
+static struct iommu_platform_data omap3_iommu_isp_pdata = {
+       .device_enable = omap_device_enable,
+       .device_idle = omap_device_idle,
 };
 
 static int omap3_sbc_t3730_twl_callback(struct device *dev,
@@ -424,6 +431,8 @@ static struct iommu_platform_data omap4_iommu_pdata = {
        .reset_name = "mmu_cache",
        .assert_reset = omap_device_assert_hardreset,
        .deassert_reset = omap_device_deassert_hardreset,
+       .device_enable = omap_device_enable,
+       .device_idle = omap_device_idle,
 };
 #endif
 
@@ -617,6 +626,8 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
 #ifdef CONFIG_ARCH_OMAP3
        OF_DEV_AUXDATA("ti,omap2-iommu", 0x5d000000, "5d000000.mmu",
                       &omap3_iommu_pdata),
+       OF_DEV_AUXDATA("ti,omap2-iommu", 0x480bd400, "480bd400.mmu",
+                      &omap3_iommu_isp_pdata),
        OF_DEV_AUXDATA("ti,omap3-smartreflex-core", 0x480cb000,
                       "480cb000.smartreflex", &omap_sr_pdata[OMAP_SR_CORE]),
        OF_DEV_AUXDATA("ti,omap3-smartreflex-mpu-iva", 0x480c9000,
index 04b3643..788c5cf 100644 (file)
@@ -324,7 +324,7 @@ union offset_union {
        __put32_unaligned_check("strbt", val, addr)
 
 static void
-do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs *regs, union offset_union offset)
+do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, union offset_union offset)
 {
        if (!LDST_U_BIT(instr))
                offset.un = -offset.un;
@@ -337,7 +337,7 @@ do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs
 }
 
 static int
-do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldrhstrh(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd = RD_BITS(instr);
 
@@ -386,8 +386,7 @@ do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *r
 }
 
 static int
-do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
-                     struct pt_regs *regs)
+do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd = RD_BITS(instr);
        unsigned int rd2;
@@ -449,7 +448,7 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
 }
 
 static int
-do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldrstr(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd = RD_BITS(instr);
 
@@ -498,7 +497,7 @@ do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *reg
  * PU = 10             A                    B
  */
 static int
-do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd, rn, correction, nr_regs, regbits;
        unsigned long eaddr, newaddr;
@@ -539,7 +538,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
         * processor for us.
         */
        if (addr != eaddr) {
-               pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
+               pr_err("LDMSTM: PC = %08lx, instr = %08x, "
                        "addr = %08lx, eaddr = %08lx\n",
                         instruction_pointer(regs), instr, addr, eaddr);
                show_regs(regs);
@@ -716,10 +715,10 @@ thumb2arm(u16 tinstr)
  * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
  */
 static void *
-do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
+do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
                            union offset_union *poffset)
 {
-       unsigned long instr = *pinstr;
+       u32 instr = *pinstr;
        u16 tinst1 = (instr >> 16) & 0xffff;
        u16 tinst2 = instr & 0xffff;
 
@@ -767,17 +766,48 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
        return NULL;
 }
 
+static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
+{
+       u32 instr = 0;
+       int fault;
+
+       if (user_mode(regs))
+               fault = get_user(instr, ip);
+       else
+               fault = probe_kernel_address(ip, instr);
+
+       *inst = __mem_to_opcode_arm(instr);
+
+       return fault;
+}
+
+static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
+{
+       u16 instr = 0;
+       int fault;
+
+       if (user_mode(regs))
+               fault = get_user(instr, ip);
+       else
+               fault = probe_kernel_address(ip, instr);
+
+       *inst = __mem_to_opcode_thumb16(instr);
+
+       return fault;
+}
+
 static int
 do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
        union offset_union uninitialized_var(offset);
-       unsigned long instr = 0, instrptr;
-       int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
+       unsigned long instrptr;
+       int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
        unsigned int type;
-       unsigned int fault;
+       u32 instr = 0;
        u16 tinstr = 0;
        int isize = 4;
        int thumb2_32b = 0;
+       int fault;
 
        if (interrupts_enabled(regs))
                local_irq_enable();
@@ -786,15 +816,14 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        if (thumb_mode(regs)) {
                u16 *ptr = (u16 *)(instrptr & ~1);
-               fault = probe_kernel_address(ptr, tinstr);
-               tinstr = __mem_to_opcode_thumb16(tinstr);
+
+               fault = alignment_get_thumb(regs, ptr, &tinstr);
                if (!fault) {
                        if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
                            IS_T32(tinstr)) {
                                /* Thumb-2 32-bit */
-                               u16 tinst2 = 0;
-                               fault = probe_kernel_address(ptr + 1, tinst2);
-                               tinst2 = __mem_to_opcode_thumb16(tinst2);
+                               u16 tinst2;
+                               fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
                                instr = __opcode_thumb32_compose(tinstr, tinst2);
                                thumb2_32b = 1;
                        } else {
@@ -803,8 +832,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                        }
                }
        } else {
-               fault = probe_kernel_address((void *)instrptr, instr);
-               instr = __mem_to_opcode_arm(instr);
+               fault = alignment_get_arm(regs, (void *)instrptr, &instr);
        }
 
        if (fault) {
@@ -926,7 +954,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
         * Oops, we didn't handle the instruction.
         */
        pr_err("Alignment trap: not handling instruction "
-               "%0*lx at [<%08lx>]\n",
+               "%0*x at [<%08lx>]\n",
                isize << 1,
                isize == 2 ? tinstr : instr, instrptr);
        ai_skipped += 1;
@@ -936,7 +964,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        ai_user += 1;
 
        if (ai_usermode & UM_WARN)
-               printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
+               printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*x "
                       "Address=0x%08lx FSR 0x%03x\n", current->comm,
                        task_pid_nr(current), instrptr,
                        isize << 1,
index 1448f14..1a49d50 100644 (file)
@@ -132,13 +132,11 @@ __v7m_setup_cont:
        dsb
        mov     r6, lr                  @ save LR
        ldr     sp, =init_thread_union + THREAD_START_SP
-       stmia   sp, {r0-r3, r12}
        cpsie   i
        svc     #0
 1:     cpsid   i
-       ldr     r0, =exc_ret
-       orr     lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
-       str     lr, [r0]
+       /* Calculate exc_ret */
+       orr     r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK
        ldmia   sp, {r0-r3, r12}
        str     r5, [r12, #11 * 4]      @ restore the original SVC vector entry
        mov     lr, r6                  @ restore LR
index 24f1aac..d5b6e81 100644 (file)
                reg = <1>;
        };
 };
+
+&reg_dc1sw {
+       /*
+        * Ethernet PHY needs 30ms to properly power up and some more
+        * to initialize. 100ms should be plenty of time to finish
+        * whole process.
+        */
+       regulator-enable-ramp-delay = <100000>;
+};
index e6fb968..2509920 100644 (file)
 };
 
 &reg_dc1sw {
+       /*
+        * Ethernet PHY needs 30ms to properly power up and some more
+        * to initialize. 100ms should be plenty of time to finish
+        * whole process.
+        */
+       regulator-enable-ramp-delay = <100000>;
        regulator-name = "vcc-phy";
 };
 
index 3eccbdb..70f4cce 100644 (file)
                clock-output-names = "ext-osc32k";
        };
 
-       pmu {
-               compatible = "arm,cortex-a53-pmu";
-               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
-       };
-
        psci {
                compatible = "arm,psci-0.2";
                method = "smc";
index 8a3a770..56789cc 100644 (file)
 
                pinmux: pinmux@14029c {
                        compatible = "pinctrl-single";
-                       reg = <0x0014029c 0x250>;
+                       reg = <0x0014029c 0x26c>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        pinctrl-single,register-width = <32>;
                        pinctrl-single,function-mask = <0xf>;
                        pinctrl-single,gpio-range = <
-                               &range 0 154 MODE_GPIO
+                               &range 0  91 MODE_GPIO
+                               &range 95 60 MODE_GPIO
                                >;
                        range: gpio-range {
                                #pinctrl-single,gpio-range-cells = <3>;
index 71e2e34..0098dfd 100644 (file)
                                        <&pinmux 108 16 27>,
                                        <&pinmux 135 77 6>,
                                        <&pinmux 141 67 4>,
-                                       <&pinmux 145 149 6>,
-                                       <&pinmux 151 91 4>;
+                                       <&pinmux 145 149 6>;
                };
 
                i2c1: i2c@e0000 {
index 408e0ec..b032f38 100644 (file)
@@ -33,7 +33,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster0_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@1 {
@@ -49,7 +49,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster0_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@100 {
@@ -65,7 +65,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster1_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@101 {
@@ -81,7 +81,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster1_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@200 {
@@ -97,7 +97,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster2_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@201 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster2_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@300 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster3_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@301 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster3_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@400 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster4_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@401 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster4_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@500 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster5_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@501 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster5_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@600 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster6_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@601 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster6_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@700 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster7_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@701 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster7_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cluster0_l2: l2-cache0 {
                        cache-level = <2>;
                };
 
-               cpu_pw20: cpu-pw20 {
+               cpu_pw15: cpu-pw15 {
                        compatible = "arm,idle-state";
-                       idle-state-name = "PW20";
+                       idle-state-name = "PW15";
                        arm,psci-suspend-param = <0x0>;
                        entry-latency-us = <2000>;
                        exit-latency-us = <2000>;
index 5f9d0da..58b8cd0 100644 (file)
                                compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b40000 0x10000>;
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MM_CLK_DUMMY>,
+                               clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
                                         <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MM_CLK_USDHC1_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b50000 0x10000>;
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MM_CLK_DUMMY>,
+                               clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
                                         <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MM_CLK_USDHC2_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b60000 0x10000>;
                                interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MM_CLK_DUMMY>,
+                               clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
                                         <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MM_CLK_USDHC3_ROOT>;
                                clock-names = "ipg", "ahb", "per";
index 785f4c4..98496f5 100644 (file)
                                compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b40000 0x10000>;
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MN_CLK_DUMMY>,
+                               clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
                                         <&clk IMX8MN_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MN_CLK_USDHC1_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b50000 0x10000>;
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MN_CLK_DUMMY>,
+                               clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
                                         <&clk IMX8MN_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MN_CLK_USDHC2_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b60000 0x10000>;
                                interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MN_CLK_DUMMY>,
+                               clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
                                         <&clk IMX8MN_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MN_CLK_USDHC3_ROOT>;
                                clock-names = "ipg", "ahb", "per";
index af99473..087b5b6 100644 (file)
@@ -89,8 +89,8 @@
                regulator-min-microvolt = <900000>;
                regulator-max-microvolt = <1000000>;
                gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
-               states = <1000000 0x0
-                          900000 0x1>;
+               states = <1000000 0x1
+                          900000 0x0>;
                regulator-always-on;
        };
 };
index 04115ca..55a3d1c 100644 (file)
                                             "fsl,imx7d-usdhc";
                                reg = <0x30b40000 0x10000>;
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MQ_CLK_DUMMY>,
+                               clocks = <&clk IMX8MQ_CLK_IPG_ROOT>,
                                         <&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MQ_CLK_USDHC1_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                             "fsl,imx7d-usdhc";
                                reg = <0x30b50000 0x10000>;
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MQ_CLK_DUMMY>,
+                               clocks = <&clk IMX8MQ_CLK_IPG_ROOT>,
                                         <&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MQ_CLK_USDHC2_ROOT>;
                                clock-names = "ipg", "ahb", "per";
index d105986..5f350cc 100644 (file)
                gpio = <&gpiosb 0 GPIO_ACTIVE_HIGH>;
        };
 
-       usb3_phy: usb3-phy {
-               compatible = "usb-nop-xceiv";
-               vcc-supply = <&exp_usb3_vbus>;
-       };
-
        vsdc_reg: vsdc-reg {
                compatible = "regulator-gpio";
                regulator-name = "vsdc";
        status = "okay";
 };
 
+&comphy2 {
+       connector {
+               compatible = "usb-a-connector";
+               phy-supply = <&exp_usb3_vbus>;
+       };
+};
+
 &usb3 {
        status = "okay";
        phys = <&comphy2 0>;
-       usb-phy = <&usb3_phy>;
 };
 
 &mdio {
index e152b0c..b806686 100644 (file)
@@ -44,7 +44,7 @@
                power-supply = <&pp3300_disp>;
 
                panel-timing {
-                       clock-frequency = <266604720>;
+                       clock-frequency = <266666667>;
                        hactive = <2400>;
                        hfront-porch = <48>;
                        hback-porch = <84>;
index 0d1f5f9..c133e8d 100644 (file)
        status = "okay";
 
        u2phy0_host: host-port {
-               phy-supply = <&vcc5v0_host>;
+               phy-supply = <&vcc5v0_typec>;
                status = "okay";
        };
 
 
 &usbdrd_dwc3_0 {
        status = "okay";
-       dr_mode = "otg";
+       dr_mode = "host";
 };
 
 &usbdrd3_1 {
index 0401d4e..e544deb 100644 (file)
                regulator-always-on;
                regulator-boot-on;
                regulator-min-microvolt = <800000>;
-               regulator-max-microvolt = <1400000>;
+               regulator-max-microvolt = <1700000>;
                vin-supply = <&vcc5v0_sys>;
        };
 };
        rk808: pmic@1b {
                compatible = "rockchip,rk808";
                reg = <0x1b>;
-               interrupt-parent = <&gpio1>;
-               interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
                #clock-cells = <1>;
                clock-output-names = "xin32k", "rk808-clkout2";
                pinctrl-names = "default";
 
        pmic {
                pmic_int_l: pmic-int-l {
-                       rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+                       rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
                };
 
                vsel1_gpio: vsel1-gpio {
 
 &sdmmc {
        bus-width = <4>;
-       cap-mmc-highspeed;
        cap-sd-highspeed;
        cd-gpios = <&gpio0 7 GPIO_ACTIVE_LOW>;
        disable-wp;
 
 &sdhci {
        bus-width = <8>;
-       mmc-hs400-1_8v;
-       mmc-hs400-enhanced-strobe;
+       mmc-hs200-1_8v;
        non-removable;
        status = "okay";
 };
index b1454d1..aca07c2 100644 (file)
@@ -79,6 +79,7 @@
 #define CAVIUM_CPU_PART_THUNDERX_83XX  0x0A3
 #define CAVIUM_CPU_PART_THUNDERX2      0x0AF
 
+#define BRCM_CPU_PART_BRAHMA_B53       0x100
 #define BRCM_CPU_PART_VULCAN           0x516
 
 #define QCOM_CPU_PART_FALKOR_V1                0x800
 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
 #define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_BRAHMA_B53 MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_BRAHMA_B53)
 #define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
 #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
 #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
index 9a21b84..8dc6c5c 100644 (file)
 #define PROT_DEFAULT           (_PROT_DEFAULT | PTE_MAYBE_NG)
 #define PROT_SECT_DEFAULT      (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
 
-#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
-#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
-#define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
+#define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
 
 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_SECT_NORMAL       (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -80,8 +80,9 @@
 #define PAGE_S2_DEVICE         __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN)
 
 #define PAGE_NONE              __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_SHARED            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
-#define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
+#define PAGE_SHARED            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+#define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_READONLY          __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_READONLY_EXEC     __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
 #define PAGE_EXECONLY          __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
index 6c3b10a..93f34b4 100644 (file)
@@ -489,6 +489,7 @@ static const struct midr_range arm64_ssb_cpus[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
        {},
 };
 
@@ -573,6 +574,7 @@ static const struct midr_range spectre_v2_safe_list[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
        { /* sentinel */ }
 };
 
@@ -659,17 +661,23 @@ static const struct midr_range arm64_harden_el2_vectors[] = {
 #endif
 
 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
-
-static const struct midr_range arm64_repeat_tlbi_cpus[] = {
+static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
-       MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
+       {
+               ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
+       },
+       {
+               .midr_range.model = MIDR_QCOM_KRYO,
+               .matches = is_kryo_midr,
+       },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_1286807
-       MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+       {
+               ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+       },
 #endif
        {},
 };
-
 #endif
 
 #ifdef CONFIG_CAVIUM_ERRATUM_27456
@@ -737,6 +745,33 @@ static const struct midr_range erratum_1418040_list[] = {
 };
 #endif
 
+#ifdef CONFIG_ARM64_ERRATUM_845719
+static const struct midr_range erratum_845719_list[] = {
+       /* Cortex-A53 r0p[01234] */
+       MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+       /* Brahma-B53 r0p[0] */
+       MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+       {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_843419
+static const struct arm64_cpu_capabilities erratum_843419_list[] = {
+       {
+               /* Cortex-A53 r0p[01234] */
+               .matches = is_affected_midr_range,
+               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+               MIDR_FIXED(0x4, BIT(8)),
+       },
+       {
+               /* Brahma-B53 r0p[0] */
+               .matches = is_affected_midr_range,
+               ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+       },
+       {},
+};
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
@@ -768,19 +803,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_843419
        {
-       /* Cortex-A53 r0p[01234] */
                .desc = "ARM erratum 843419",
                .capability = ARM64_WORKAROUND_843419,
-               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
-               MIDR_FIXED(0x4, BIT(8)),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = cpucap_multi_entry_cap_matches,
+               .match_list = erratum_843419_list,
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_845719
        {
-       /* Cortex-A53 r0p[01234] */
                .desc = "ARM erratum 845719",
                .capability = ARM64_WORKAROUND_845719,
-               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+               ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
        },
 #endif
 #ifdef CONFIG_CAVIUM_ERRATUM_23154
@@ -816,6 +850,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        {
                .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
                .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = cpucap_multi_entry_cap_matches,
                .match_list = qcom_erratum_1003_list,
        },
@@ -824,7 +859,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        {
                .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
                .capability = ARM64_WORKAROUND_REPEAT_TLBI,
-               ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = cpucap_multi_entry_cap_matches,
+               .match_list = arm64_repeat_tlbi_list,
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_858921
index 2071260..46822af 100644 (file)
@@ -632,6 +632,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
         */
        val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
               | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
+       if (!system_supports_32bit_el0())
+               val |= ARMV8_PMU_PMCR_LC;
        __vcpu_sys_reg(vcpu, r->reg) = val;
 }
 
@@ -682,6 +684,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                val = __vcpu_sys_reg(vcpu, PMCR_EL0);
                val &= ~ARMV8_PMU_PMCR_MASK;
                val |= p->regval & ARMV8_PMU_PMCR_MASK;
+               if (!system_supports_32bit_el0())
+                       val |= ARMV8_PMU_PMCR_LC;
                __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
                kvm_pmu_handle_pmcr(vcpu, val);
                kvm_vcpu_pmu_restore_guest(vcpu);
index 77a836e..df69eaa 100644 (file)
@@ -84,7 +84,7 @@ void __init prom_init(void)
                 * Here we will start up CPU1 in the background and ask it to
                 * reconfigure itself then go back to sleep.
                 */
-               memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20);
+               memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20);
                __sync();
                set_c0_cause(C_SW0);
                cpumask_set_cpu(1, &bmips_booted_mask);
index bf6a8af..581a6a3 100644 (file)
@@ -75,11 +75,11 @@ static inline int register_bmips_smp_ops(void)
 #endif
 }
 
-extern char bmips_reset_nmi_vec;
-extern char bmips_reset_nmi_vec_end;
-extern char bmips_smp_movevec;
-extern char bmips_smp_int_vec;
-extern char bmips_smp_int_vec_end;
+extern char bmips_reset_nmi_vec[];
+extern char bmips_reset_nmi_vec_end[];
+extern char bmips_smp_movevec[];
+extern char bmips_smp_int_vec[];
+extern char bmips_smp_int_vec_end[];
 
 extern int bmips_smp_enabled;
 extern int bmips_cpu_offset;
index e78462e..b088255 100644 (file)
@@ -24,6 +24,8 @@
 
 #define VDSO_HAS_CLOCK_GETRES          1
 
+#define __VDSO_USE_SYSCALL             ULLONG_MAX
+
 #ifdef CONFIG_MIPS_CLOCK_VSYSCALL
 
 static __always_inline long gettimeofday_fallback(
@@ -205,7 +207,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
                break;
 #endif
        default:
-               cycle_now = 0;
+               cycle_now = __VDSO_USE_SYSCALL;
                break;
        }
 
index 76fae9b..712c15d 100644 (file)
@@ -464,10 +464,10 @@ static void bmips_wr_vec(unsigned long dst, char *start, char *end)
 
 static inline void bmips_nmi_handler_setup(void)
 {
-       bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
-               &bmips_reset_nmi_vec_end);
-       bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
-               &bmips_smp_int_vec_end);
+       bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec,
+               bmips_reset_nmi_vec_end);
+       bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec,
+               bmips_smp_int_vec_end);
 }
 
 struct reset_vec_info {
index e01cb33..41bb91f 100644 (file)
@@ -653,6 +653,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
                                   int restore_scratch)
 {
        if (restore_scratch) {
+               /*
+                * Ensure the MFC0 below observes the value written to the
+                * KScratch register by the prior MTC0.
+                */
+               if (scratch_reg >= 0)
+                       uasm_i_ehb(p);
+
                /* Reset default page size */
                if (PM_DEFAULT_MASK >> 16) {
                        uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
@@ -667,12 +674,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
                        uasm_i_mtc0(p, 0, C0_PAGEMASK);
                        uasm_il_b(p, r, lid);
                }
-               if (scratch_reg >= 0) {
-                       uasm_i_ehb(p);
+               if (scratch_reg >= 0)
                        UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-               } else {
+               else
                        UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-               }
        } else {
                /* Reset default page size */
                if (PM_DEFAULT_MASK >> 16) {
@@ -921,6 +926,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
        }
        if (mode != not_refill && check_for_high_segbits) {
                uasm_l_large_segbits_fault(l, *p);
+
+               if (mode == refill_scratch && scratch_reg >= 0)
+                       uasm_i_ehb(p);
+
                /*
                 * We get here if we are an xsseg address, or if we are
                 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
@@ -939,12 +948,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
                uasm_i_jr(p, ptr);
 
                if (mode == refill_scratch) {
-                       if (scratch_reg >= 0) {
-                               uasm_i_ehb(p);
+                       if (scratch_reg >= 0)
                                UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-                       } else {
+                       else
                                UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-                       }
                } else {
                        uasm_i_nop(p);
                }
index 1d1d748..b96d744 100644 (file)
@@ -2125,7 +2125,7 @@ ftrace_regs_caller:
        copy    %rp, %r26
        LDREG   -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
        ldo     -8(%r25), %r25
-       copy    %r3, %arg2
+       ldo     -FTRACE_FRAME_SIZE(%r1), %arg2
        b,l     ftrace_function_trampoline, %rp
        copy    %r1, %arg3 /* struct pt_regs */
 
index 591bfb4..a3f9c66 100644 (file)
@@ -1217,6 +1217,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
        struct kvmppc_xive *xive = dev->private;
        struct kvmppc_xive_vcpu *xc;
        int i, r = -EBUSY;
+       u32 vp_id;
 
        pr_devel("connect_vcpu(cpu=%d)\n", cpu);
 
@@ -1228,25 +1229,32 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
                return -EPERM;
        if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
                return -EBUSY;
-       if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
-               pr_devel("Duplicate !\n");
-               return -EEXIST;
-       }
        if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
                pr_devel("Out of bounds !\n");
                return -EINVAL;
        }
-       xc = kzalloc(sizeof(*xc), GFP_KERNEL);
-       if (!xc)
-               return -ENOMEM;
 
        /* We need to synchronize with queue provisioning */
        mutex_lock(&xive->lock);
+
+       vp_id = kvmppc_xive_vp(xive, cpu);
+       if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
+               pr_devel("Duplicate !\n");
+               r = -EEXIST;
+               goto bail;
+       }
+
+       xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+       if (!xc) {
+               r = -ENOMEM;
+               goto bail;
+       }
+
        vcpu->arch.xive_vcpu = xc;
        xc->xive = xive;
        xc->vcpu = vcpu;
        xc->server_num = cpu;
-       xc->vp_id = kvmppc_xive_vp(xive, cpu);
+       xc->vp_id = vp_id;
        xc->mfrr = 0xff;
        xc->valid = true;
 
index 955b820..fe3ed50 100644 (file)
@@ -220,6 +220,18 @@ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
        return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
 }
 
+static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
+{
+       struct kvm_vcpu *vcpu = NULL;
+       int i;
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
+                       return true;
+       }
+       return false;
+}
+
 /*
  * Mapping between guest priorities and host priorities
  * is as follow.
index 248c1ea..78b906f 100644 (file)
@@ -106,6 +106,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
        struct kvmppc_xive *xive = dev->private;
        struct kvmppc_xive_vcpu *xc = NULL;
        int rc;
+       u32 vp_id;
 
        pr_devel("native_connect_vcpu(server=%d)\n", server_num);
 
@@ -124,7 +125,8 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
 
        mutex_lock(&xive->lock);
 
-       if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
+       vp_id = kvmppc_xive_vp(xive, server_num);
+       if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
                pr_devel("Duplicate !\n");
                rc = -EEXIST;
                goto bail;
@@ -141,7 +143,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
        xc->vcpu = vcpu;
        xc->server_num = server_num;
 
-       xc->vp_id = kvmppc_xive_vp(xive, server_num);
+       xc->vp_id = vp_id;
        xc->valid = true;
        vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
 
index 07ceee8..75604fe 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <asm/asm.h>
 
-#ifdef CONFIG_GENERIC_BUG
 #define __INSN_LENGTH_MASK  _UL(0x3)
 #define __INSN_LENGTH_32    _UL(0x3)
 #define __COMPRESSED_INSN_MASK _UL(0xffff)
@@ -20,7 +19,6 @@
 #define __BUG_INSN_32  _UL(0x00100073) /* ebreak */
 #define __BUG_INSN_16  _UL(0x9002) /* c.ebreak */
 
-#ifndef __ASSEMBLY__
 typedef u32 bug_insn_t;
 
 #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
@@ -43,6 +41,7 @@ typedef u32 bug_insn_t;
        RISCV_SHORT " %2"
 #endif
 
+#ifdef CONFIG_GENERIC_BUG
 #define __BUG_FLAGS(flags)                                     \
 do {                                                           \
        __asm__ __volatile__ (                                  \
@@ -58,14 +57,10 @@ do {                                                                \
                  "i" (flags),                                  \
                  "i" (sizeof(struct bug_entry)));              \
 } while (0)
-
-#endif /* !__ASSEMBLY__ */
 #else /* CONFIG_GENERIC_BUG */
-#ifndef __ASSEMBLY__
 #define __BUG_FLAGS(flags) do {                                        \
        __asm__ __volatile__ ("ebreak\n");                      \
 } while (0)
-#endif /* !__ASSEMBLY__ */
 #endif /* CONFIG_GENERIC_BUG */
 
 #define BUG() do {                                             \
@@ -79,15 +74,10 @@ do {                                                                \
 
 #include <asm-generic/bug.h>
 
-#ifndef __ASSEMBLY__
-
 struct pt_regs;
 struct task_struct;
 
-extern void die(struct pt_regs *regs, const char *str);
-extern void do_trap(struct pt_regs *regs, int signo, int code,
-       unsigned long addr);
-
-#endif /* !__ASSEMBLY__ */
+void die(struct pt_regs *regs, const char *str);
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
 
 #endif /* _ASM_RISCV_BUG_H */
index fc1189a..3ba4d93 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <asm/mmiowb.h>
+#include <asm/pgtable.h>
 
 extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 
@@ -161,6 +162,12 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
 #define writeq(v,c)    ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); })
 #endif
 
+/*
+ *  I/O port access constants.
+ */
+#define IO_SPACE_LIMIT         (PCI_IO_SIZE - 1)
+#define PCI_IOBASE             ((void __iomem *)PCI_IO_START)
+
 /*
  * Emulation routines for the port-mapped IO space used by some PCI drivers.
  * These are defined as being "fully synchronous", but also "not guaranteed to
index 7557642..6e1b0e0 100644 (file)
@@ -7,6 +7,9 @@
 #ifndef _ASM_RISCV_IRQ_H
 #define _ASM_RISCV_IRQ_H
 
+#include <linux/interrupt.h>
+#include <linux/linkage.h>
+
 #define NR_IRQS         0
 
 void riscv_timer_interrupt(void);
index 42292d9..d322101 100644 (file)
@@ -7,6 +7,7 @@
 #define _ASM_RISCV_PGTABLE_H
 
 #include <linux/mmzone.h>
+#include <linux/sizes.h>
 
 #include <asm/pgtable-bits.h>
 
@@ -86,6 +87,7 @@ extern pgd_t swapper_pg_dir[];
 #define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
 #define VMALLOC_END      (PAGE_OFFSET - 1)
 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
+#define PCI_IO_SIZE      SZ_16M
 
 /*
  * Roughly size the vmemmap space to be large enough to fit enough
@@ -100,7 +102,10 @@ extern pgd_t swapper_pg_dir[];
 
 #define vmemmap                ((struct page *)VMEMMAP_START)
 
-#define FIXADDR_TOP      (VMEMMAP_START)
+#define PCI_IO_END       VMEMMAP_START
+#define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
+#define FIXADDR_TOP      PCI_IO_START
+
 #ifdef CONFIG_64BIT
 #define FIXADDR_SIZE     PMD_SIZE
 #else
@@ -184,10 +189,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
        return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
 }
 
-static inline pte_t mk_pte(struct page *page, pgprot_t prot)
-{
-       return pfn_pte(page_to_pfn(page), prot);
-}
+#define mk_pte(page, prot)       pfn_pte(page_to_pfn(page), prot)
 
 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
@@ -428,9 +430,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
-#ifdef CONFIG_FLATMEM
 #define kern_addr_valid(addr)   (1) /* FIXME */
-#endif
 
 extern void *dtb_early_va;
 extern void setup_bootmem(void);
index f0227bd..ee4f0ac 100644 (file)
@@ -6,6 +6,7 @@
 #ifndef _ASM_RISCV_SWITCH_TO_H
 #define _ASM_RISCV_SWITCH_TO_H
 
+#include <linux/sched/task_stack.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <asm/csr.h>
index b1ade9a..a5ad000 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/processor.h>
 #include <asm/hwcap.h>
 #include <asm/smp.h>
+#include <asm/switch_to.h>
 
 unsigned long elf_hwcap __read_mostly;
 #ifdef CONFIG_FPU
diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h
new file mode 100644 (file)
index 0000000..105fb04
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 SiFive, Inc.
+ */
+#ifndef __ASM_HEAD_H
+#define __ASM_HEAD_H
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+extern atomic_t hart_lottery;
+
+asmlinkage void do_page_fault(struct pt_regs *regs);
+asmlinkage void __init setup_vm(uintptr_t dtb_pa);
+
+extern void *__cpu_up_stack_pointer[];
+extern void *__cpu_up_task_pointer[];
+
+void __init parse_dtb(void);
+
+#endif /* __ASM_HEAD_H */
index 6d86593..fffac6d 100644 (file)
@@ -24,7 +24,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        return 0;
 }
 
-asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
+asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
 
index c9ae483..e264e59 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/elf.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/moduleloader.h>
 
 unsigned long module_emit_got_entry(struct module *mod, unsigned long val)
 {
index fb3a082..85e3c39 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
@@ -19,6 +20,7 @@
 #include <asm/csr.h>
 #include <asm/string.h>
 #include <asm/switch_to.h>
+#include <asm/thread_info.h>
 
 extern asmlinkage void ret_from_fork(void);
 extern asmlinkage void ret_from_kernel_thread(void);
index 3687514..1252113 100644 (file)
@@ -148,7 +148,7 @@ long arch_ptrace(struct task_struct *child, long request,
  * Allows PTRACE_SYSCALL to work.  These are called from entry.S in
  * {handle,ret_from}_syscall.
  */
-void do_syscall_trace_enter(struct pt_regs *regs)
+__visible void do_syscall_trace_enter(struct pt_regs *regs)
 {
        if (test_thread_flag(TIF_SYSCALL_TRACE))
                if (tracehook_report_syscall_entry(regs))
@@ -162,7 +162,7 @@ void do_syscall_trace_enter(struct pt_regs *regs)
        audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
 }
 
-void do_syscall_trace_exit(struct pt_regs *regs)
+__visible void do_syscall_trace_exit(struct pt_regs *regs)
 {
        audit_syscall_exit(regs);
 
index d0fe623..aa56bb1 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/reboot.h>
+#include <linux/pm.h>
 #include <asm/sbi.h>
 
 static void default_power_off(void)
index a990a6c..845ae0e 100644 (file)
@@ -24,6 +24,8 @@
 #include <asm/tlbflush.h>
 #include <asm/thread_info.h>
 
+#include "head.h"
+
 #ifdef CONFIG_DUMMY_CONSOLE
 struct screen_info screen_info = {
        .orig_video_lines       = 30,
index b14d764..d0f6f21 100644 (file)
@@ -26,7 +26,7 @@ struct rt_sigframe {
 
 #ifdef CONFIG_FPU
 static long restore_fp_state(struct pt_regs *regs,
-                            union __riscv_fp_state *sc_fpregs)
+                            union __riscv_fp_state __user *sc_fpregs)
 {
        long err;
        struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
@@ -53,7 +53,7 @@ static long restore_fp_state(struct pt_regs *regs,
 }
 
 static long save_fp_state(struct pt_regs *regs,
-                         union __riscv_fp_state *sc_fpregs)
+                         union __riscv_fp_state __user *sc_fpregs)
 {
        long err;
        struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
@@ -292,8 +292,8 @@ static void do_signal(struct pt_regs *regs)
  * notification of userspace execution resumption
  * - triggered by the _TIF_WORK_MASK flags
  */
-asmlinkage void do_notify_resume(struct pt_regs *regs,
-       unsigned long thread_info_flags)
+asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
+                                          unsigned long thread_info_flags)
 {
        /* Handle pending signal delivery */
        if (thread_info_flags & _TIF_SIGPENDING)
index b18cd6c..5c9ec78 100644 (file)
@@ -8,7 +8,9 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/cpu.h>
 #include <linux/interrupt.h>
+#include <linux/profile.h>
 #include <linux/smp.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
index 18ae6da..261f408 100644 (file)
@@ -29,6 +29,9 @@
 #include <asm/tlbflush.h>
 #include <asm/sections.h>
 #include <asm/sbi.h>
+#include <asm/smp.h>
+
+#include "head.h"
 
 void *__cpu_up_stack_pointer[NR_CPUS];
 void *__cpu_up_task_pointer[NR_CPUS];
@@ -130,7 +133,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
 /*
  * C entry point for a secondary processor.
  */
-asmlinkage void __init smp_callin(void)
+asmlinkage __visible void __init smp_callin(void)
 {
        struct mm_struct *mm = &init_mm;
 
index e5dd52d..f1ead9d 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/syscalls.h>
 #include <asm-generic/syscalls.h>
 #include <asm/vdso.h>
+#include <asm/syscall.h>
 
 #undef __SYSCALL
 #define __SYSCALL(nr, call)    [nr] = (call),
index 9dd1f2e..6a53c02 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/clocksource.h>
 #include <linux/delay.h>
 #include <asm/sbi.h>
+#include <asm/processor.h>
 
 unsigned long riscv_timebase;
 EXPORT_SYMBOL_GPL(riscv_timebase);
index 1ac75f7..473de3a 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright (C) 2012 Regents of the University of California
  */
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/sched.h>
@@ -83,7 +84,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
 }
 
 #define DO_ERROR_INFO(name, signo, code, str)                          \
-asmlinkage void name(struct pt_regs *regs)                             \
+asmlinkage __visible void name(struct pt_regs *regs)                   \
 {                                                                      \
        do_trap_error(regs, signo, code, regs->sepc, "Oops - " str);    \
 }
@@ -111,7 +112,6 @@ DO_ERROR_INFO(do_trap_ecall_s,
 DO_ERROR_INFO(do_trap_ecall_m,
        SIGILL, ILL_ILLTRP, "environment call from M-mode");
 
-#ifdef CONFIG_GENERIC_BUG
 static inline unsigned long get_break_insn_length(unsigned long pc)
 {
        bug_insn_t insn;
@@ -120,28 +120,15 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
                return 0;
        return (((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? 4UL : 2UL);
 }
-#endif /* CONFIG_GENERIC_BUG */
 
-asmlinkage void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible void do_trap_break(struct pt_regs *regs)
 {
-       if (user_mode(regs)) {
-               force_sig_fault(SIGTRAP, TRAP_BRKPT,
-                               (void __user *)(regs->sepc));
-               return;
-       }
-#ifdef CONFIG_GENERIC_BUG
-       {
-               enum bug_trap_type type;
-
-               type = report_bug(regs->sepc, regs);
-               if (type == BUG_TRAP_TYPE_WARN) {
-                       regs->sepc += get_break_insn_length(regs->sepc);
-                       return;
-               }
-       }
-#endif /* CONFIG_GENERIC_BUG */
-
-       die(regs, "Kernel BUG");
+       if (user_mode(regs))
+               force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->sepc);
+       else if (report_bug(regs->sepc, regs) == BUG_TRAP_TYPE_WARN)
+               regs->sepc += get_break_insn_length(regs->sepc);
+       else
+               die(regs, "Kernel BUG");
 }
 
 #ifdef CONFIG_GENERIC_BUG
index c9c21e0..484d95a 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2015 Regents of the University of California
  */
 
+#include <linux/elf.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/binfmts.h>
@@ -25,7 +26,7 @@ static union {
        struct vdso_data        data;
        u8                      page[PAGE_SIZE];
 } vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = &vdso_data_store.data;
+static struct vdso_data *vdso_data = &vdso_data_store.data;
 
 static int __init vdso_init(void)
 {
index beeb5d7..ca66d44 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/mm.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
 
 /*
  * When necessary, performs a deferred icache flush for the given MM context,
index 96add14..247b8c8 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/ptrace.h>
 #include <asm/tlbflush.h>
 
+#include "../kernel/head.h"
+
 /*
  * This routine handles page faults.  It determines the address and the
  * problem, and then passes it off to one of the appropriate routines.
index 83f7d12..573463d 100644 (file)
@@ -19,6 +19,8 @@
 #include <asm/pgtable.h>
 #include <asm/io.h>
 
+#include "../kernel/head.h"
+
 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
                                                        __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
@@ -337,8 +339,7 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
  */
 
 #ifndef __riscv_cmodel_medany
-#error "setup_vm() is called from head.S before relocate so it should "
-       "not use absolute addressing."
+#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
 #endif
 
 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
@@ -458,7 +459,7 @@ void __init paging_init(void)
        zone_sizes_init();
 }
 
-#ifdef CONFIG_SPARSEMEM
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
                               struct vmem_altmap *altmap)
 {
index 2e637ad..a9ffff3 100644 (file)
@@ -142,7 +142,7 @@ static irqreturn_t l2_int_handler(int irq, void *device)
        return IRQ_HANDLED;
 }
 
-int __init sifive_l2_init(void)
+static int __init sifive_l2_init(void)
 {
        struct device_node *np;
        struct resource res;
index 596ca7c..5367950 100644 (file)
@@ -101,10 +101,18 @@ static void handle_relocs(unsigned long offset)
        dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
        for (rela = rela_start; rela < rela_end; rela++) {
                loc = rela->r_offset + offset;
-               val = rela->r_addend + offset;
+               val = rela->r_addend;
                r_sym = ELF64_R_SYM(rela->r_info);
-               if (r_sym)
-                       val += dynsym[r_sym].st_value;
+               if (r_sym) {
+                       if (dynsym[r_sym].st_shndx != SHN_UNDEF)
+                               val += dynsym[r_sym].st_value + offset;
+               } else {
+                       /*
+                        * 0 == undefined symbol table index (STN_UNDEF),
+                        * used for R_390_RELATIVE, only add KASLR offset
+                        */
+                       val += offset;
+               }
                r_type = ELF64_R_TYPE(rela->r_info);
                rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
                if (rc)
index 3b664cb..d5035de 100644 (file)
@@ -27,6 +27,7 @@ int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
                *(u32 *)loc = val;
                break;
        case R_390_64:          /* Direct 64 bit.  */
+       case R_390_GLOB_DAT:
                *(u64 *)loc = val;
                break;
        case R_390_PC16:        /* PC relative 16 bit.  */
index 612535c..6627d7c 100644 (file)
@@ -1403,8 +1403,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        spin_unlock_irq(&ubd_dev->lock);
 
-       if (ret < 0)
-               blk_mq_requeue_request(req, true);
+       if (ret < 0) {
+               if (ret == -ENOMEM)
+                       res = BLK_STS_RESOURCE;
+               else
+                       res = BLK_STS_DEV_RESOURCE;
+       }
 
        return res;
 }
index 149795c..25019d4 100644 (file)
  */
 struct mem_vector immovable_mem[MAX_NUMNODES*2];
 
-/*
- * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
- * digits, and '\0' for termination.
- */
-#define MAX_ADDR_LEN 19
-
-static acpi_physical_address get_cmdline_acpi_rsdp(void)
-{
-       acpi_physical_address addr = 0;
-
-#ifdef CONFIG_KEXEC
-       char val[MAX_ADDR_LEN] = { };
-       int ret;
-
-       ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
-       if (ret < 0)
-               return 0;
-
-       if (kstrtoull(val, 16, &addr))
-               return 0;
-#endif
-       return addr;
-}
-
 /*
  * Search EFI system tables for RSDP.  If both ACPI_20_TABLE_GUID and
  * ACPI_TABLE_GUID are found, take the former, which has more features.
@@ -298,6 +274,30 @@ acpi_physical_address get_rsdp_addr(void)
 }
 
 #if defined(CONFIG_RANDOMIZE_BASE) && defined(CONFIG_MEMORY_HOTREMOVE)
+/*
+ * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
+ * digits, and '\0' for termination.
+ */
+#define MAX_ADDR_LEN 19
+
+static acpi_physical_address get_cmdline_acpi_rsdp(void)
+{
+       acpi_physical_address addr = 0;
+
+#ifdef CONFIG_KEXEC
+       char val[MAX_ADDR_LEN] = { };
+       int ret;
+
+       ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
+       if (ret < 0)
+               return 0;
+
+       if (kstrtoull(val, 16, &addr))
+               return 0;
+#endif
+       return addr;
+}
+
 /* Compute SRAT address from RSDP. */
 static unsigned long get_acpi_srat_table(void)
 {
index d6662fd..82bc60c 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/e820/types.h>
 #include <asm/setup.h>
 #include <asm/desc.h>
+#include <asm/boot.h>
 
 #include "../string.h"
 #include "eboot.h"
@@ -813,7 +814,8 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
                status = efi_relocate_kernel(sys_table, &bzimage_addr,
                                             hdr->init_size, hdr->init_size,
                                             hdr->pref_address,
-                                            hdr->kernel_alignment);
+                                            hdr->kernel_alignment,
+                                            LOAD_PHYSICAL_ADDR);
                if (status != EFI_SUCCESS) {
                        efi_printk(sys_table, "efi_relocate_kernel() failed!\n");
                        goto fail;
index 53ac0cb..9652d5c 100644 (file)
@@ -345,6 +345,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
 {
        const unsigned long kernel_total_size = VO__end - VO__text;
        unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+       unsigned long needed_size;
 
        /* Retain x86 boot parameters pointer passed from startup_32/64. */
        boot_params = rmode;
@@ -379,26 +380,38 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
        free_mem_ptr     = heap;        /* Heap */
        free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
 
+       /*
+        * The memory hole needed for the kernel is the larger of either
+        * the entire decompressed kernel plus relocation table, or the
+        * entire decompressed kernel plus .bss and .brk sections.
+        *
+        * On X86_64, the memory is mapped with PMD pages. Round the
+        * size up so that the full extent of PMD pages mapped is
+        * included in the check against the valid memory table
+        * entries. This ensures the full mapped area is usable RAM
+        * and doesn't include any reserved areas.
+        */
+       needed_size = max(output_len, kernel_total_size);
+#ifdef CONFIG_X86_64
+       needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
+#endif
+
        /* Report initial kernel position details. */
        debug_putaddr(input_data);
        debug_putaddr(input_len);
        debug_putaddr(output);
        debug_putaddr(output_len);
        debug_putaddr(kernel_total_size);
+       debug_putaddr(needed_size);
 
 #ifdef CONFIG_X86_64
        /* Report address of 32-bit trampoline */
        debug_putaddr(trampoline_32bit);
 #endif
 
-       /*
-        * The memory hole needed for the kernel is the larger of either
-        * the entire decompressed kernel plus relocation table, or the
-        * entire decompressed kernel plus .bss and .brk sections.
-        */
        choose_random_location((unsigned long)input_data, input_len,
                                (unsigned long *)&output,
-                               max(output_len, kernel_total_size),
+                               needed_size,
                                &virt_addr);
 
        /* Validate memory location choices. */
index 5b35b7e..26c3635 100644 (file)
@@ -377,7 +377,8 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
                                          struct hw_perf_event *hwc, u64 config)
 {
        config &= ~perf_ibs->cnt_mask;
-       wrmsrl(hwc->config_base, config);
+       if (boot_cpu_data.x86 == 0x10)
+               wrmsrl(hwc->config_base, config);
        config &= ~perf_ibs->enable_mask;
        wrmsrl(hwc->config_base, config);
 }
@@ -553,7 +554,8 @@ static struct perf_ibs perf_ibs_op = {
        },
        .msr                    = MSR_AMD64_IBSOPCTL,
        .config_mask            = IBS_OP_CONFIG_MASK,
-       .cnt_mask               = IBS_OP_MAX_CNT,
+       .cnt_mask               = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
+                                 IBS_OP_CUR_CNT_RAND,
        .enable_mask            = IBS_OP_ENABLE,
        .valid_mask             = IBS_OP_VAL,
        .max_period             = IBS_OP_MAX_CNT << 4,
@@ -614,7 +616,7 @@ fail:
        if (event->attr.sample_type & PERF_SAMPLE_RAW)
                offset_max = perf_ibs->offset_max;
        else if (check_rip)
-               offset_max = 2;
+               offset_max = 3;
        else
                offset_max = 1;
        do {
index 74e80ed..05e43d0 100644 (file)
@@ -627,7 +627,7 @@ static struct topa *topa_alloc(int cpu, gfp_t gfp)
         * link as the 2nd entry in the table
         */
        if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
-               TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p);
+               TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT;
                TOPA_ENTRY(&tp->topa, 1)->end = 1;
        }
 
index 6fc2e06..86467f8 100644 (file)
@@ -502,10 +502,8 @@ void uncore_pmu_event_start(struct perf_event *event, int flags)
        local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
        uncore_enable_event(box, event);
 
-       if (box->n_active == 1) {
-               uncore_enable_box(box);
+       if (box->n_active == 1)
                uncore_pmu_start_hrtimer(box);
-       }
 }
 
 void uncore_pmu_event_stop(struct perf_event *event, int flags)
@@ -529,10 +527,8 @@ void uncore_pmu_event_stop(struct perf_event *event, int flags)
                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
                hwc->state |= PERF_HES_STOPPED;
 
-               if (box->n_active == 0) {
-                       uncore_disable_box(box);
+               if (box->n_active == 0)
                        uncore_pmu_cancel_hrtimer(box);
-               }
        }
 
        if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
@@ -778,6 +774,40 @@ static int uncore_pmu_event_init(struct perf_event *event)
        return ret;
 }
 
+static void uncore_pmu_enable(struct pmu *pmu)
+{
+       struct intel_uncore_pmu *uncore_pmu;
+       struct intel_uncore_box *box;
+
+       uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
+       if (!uncore_pmu)
+               return;
+
+       box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
+       if (!box)
+               return;
+
+       if (uncore_pmu->type->ops->enable_box)
+               uncore_pmu->type->ops->enable_box(box);
+}
+
+static void uncore_pmu_disable(struct pmu *pmu)
+{
+       struct intel_uncore_pmu *uncore_pmu;
+       struct intel_uncore_box *box;
+
+       uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
+       if (!uncore_pmu)
+               return;
+
+       box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
+       if (!box)
+               return;
+
+       if (uncore_pmu->type->ops->disable_box)
+               uncore_pmu->type->ops->disable_box(box);
+}
+
 static ssize_t uncore_get_attr_cpumask(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
@@ -803,6 +833,8 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
                pmu->pmu = (struct pmu) {
                        .attr_groups    = pmu->type->attr_groups,
                        .task_ctx_nr    = perf_invalid_context,
+                       .pmu_enable     = uncore_pmu_enable,
+                       .pmu_disable    = uncore_pmu_disable,
                        .event_init     = uncore_pmu_event_init,
                        .add            = uncore_pmu_event_add,
                        .del            = uncore_pmu_event_del,
index f36f7be..bbfdaa7 100644 (file)
@@ -441,18 +441,6 @@ static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
        return -EINVAL;
 }
 
-static inline void uncore_disable_box(struct intel_uncore_box *box)
-{
-       if (box->pmu->type->ops->disable_box)
-               box->pmu->type->ops->disable_box(box);
-}
-
-static inline void uncore_enable_box(struct intel_uncore_box *box)
-{
-       if (box->pmu->type->ops->enable_box)
-               box->pmu->type->ops->enable_box(box);
-}
-
 static inline void uncore_disable_event(struct intel_uncore_box *box,
                                struct perf_event *event)
 {
index 5c056b8..e01078e 100644 (file)
@@ -260,11 +260,21 @@ void __init hv_apic_init(void)
        }
 
        if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
-               pr_info("Hyper-V: Using MSR based APIC access\n");
+               pr_info("Hyper-V: Using enlightened APIC (%s mode)",
+                       x2apic_enabled() ? "x2apic" : "xapic");
+               /*
+                * With x2apic, architectural x2apic MSRs are equivalent to the
+                * respective synthetic MSRs, so there's no need to override
+                * the apic accessors.  The only exception is
+                * hv_apic_eoi_write, because it benefits from lazy EOI when
+                * available, but it works for both xapic and x2apic modes.
+                */
                apic_set_eoi_write(hv_apic_eoi_write);
-               apic->read      = hv_apic_read;
-               apic->write     = hv_apic_write;
-               apic->icr_write = hv_apic_icr_write;
-               apic->icr_read  = hv_apic_icr_read;
+               if (!x2apic_enabled()) {
+                       apic->read      = hv_apic_read;
+                       apic->write     = hv_apic_write;
+                       apic->icr_write = hv_apic_icr_write;
+                       apic->icr_read  = hv_apic_icr_read;
+               }
        }
 }
index 50eb430..24d6598 100644 (file)
@@ -1189,7 +1189,7 @@ struct kvm_x86_ops {
        int (*set_nested_state)(struct kvm_vcpu *vcpu,
                                struct kvm_nested_state __user *user_kvm_nested_state,
                                struct kvm_nested_state *kvm_state);
-       void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+       bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
 
        int (*smi_allowed)(struct kvm_vcpu *vcpu);
        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
index e00c9e8..ac9fc51 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <asm/cpufeatures.h>
 #include <asm/alternative.h>
+#include <linux/stringify.h>
 
 /*
  * The hypercall definitions differ in the low word of the %edx argument
@@ -20,8 +21,8 @@
  */
 
 /* Old port-based version */
-#define VMWARE_HYPERVISOR_PORT    "0x5658"
-#define VMWARE_HYPERVISOR_PORT_HB "0x5659"
+#define VMWARE_HYPERVISOR_PORT    0x5658
+#define VMWARE_HYPERVISOR_PORT_HB 0x5659
 
 /* Current vmcall / vmmcall version */
 #define VMWARE_HYPERVISOR_HB   BIT(0)
@@ -29,7 +30,8 @@
 
 /* The low bandwidth call. The low word of edx is presumed clear. */
 #define VMWARE_HYPERCALL                                               \
-       ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT ", %%dx; inl (%%dx)", \
+       ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT) ", %%dx; " \
+                     "inl (%%dx), %%eax",                              \
                      "vmcall", X86_FEATURE_VMCALL,                     \
                      "vmmcall", X86_FEATURE_VMW_VMMCALL)
 
@@ -38,7 +40,8 @@
  * HB and OUT bits set.
  */
 #define VMWARE_HYPERCALL_HB_OUT                                                \
-       ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT_HB ", %%dx; rep outsb", \
+       ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT_HB) ", %%dx; " \
+                     "rep outsb",                                      \
                      "vmcall", X86_FEATURE_VMCALL,                     \
                      "vmmcall", X86_FEATURE_VMW_VMMCALL)
 
@@ -47,7 +50,8 @@
  * HB bit set.
  */
 #define VMWARE_HYPERCALL_HB_IN                                         \
-       ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT_HB ", %%dx; rep insb", \
+       ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT_HB) ", %%dx; " \
+                     "rep insb",                                       \
                      "vmcall", X86_FEATURE_VMCALL,                     \
                      "vmmcall", X86_FEATURE_VMW_VMMCALL)
 #endif
index 45e92cb..b0889c4 100644 (file)
@@ -156,7 +156,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
 {
        struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
 
-       cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+       if (cmsk)
+               cpumask_clear_cpu(dead_cpu, &cmsk->mask);
        free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
        return 0;
 }
index 267daad..c656d92 100644 (file)
@@ -216,6 +216,10 @@ static void __init ms_hyperv_init_platform(void)
        int hv_host_info_ecx;
        int hv_host_info_edx;
 
+#ifdef CONFIG_PARAVIRT
+       pv_info.name = "Hyper-V";
+#endif
+
        /*
         * Extract the features and hints
         */
index 29ffa49..206a4b6 100644 (file)
@@ -222,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
         * we might write invalid pmds, when the kernel is relocated
         * cleanup_highmap() fixes this up along with the mappings
         * beyond _end.
+        *
+        * Only the region occupied by the kernel image has so far
+        * been checked against the table of usable memory regions
+        * provided by the firmware, so invalidate pages outside that
+        * region. A page table entry that maps to a reserved area of
+        * memory would allow processor speculation into that area,
+        * and on some hardware (particularly the UV platform) even
+        * speculative access to some reserved areas is caught as an
+        * error, causing the BIOS to halt the system.
         */
 
        pmd = fixup_pointer(level2_kernel_pgt, physaddr);
-       for (i = 0; i < PTRS_PER_PMD; i++) {
+
+       /* invalidate pages before the kernel image */
+       for (i = 0; i < pmd_index((unsigned long)_text); i++)
+               pmd[i] &= ~_PAGE_PRESENT;
+
+       /* fixup pages that are part of the kernel image */
+       for (; i <= pmd_index((unsigned long)_end); i++)
                if (pmd[i] & _PAGE_PRESENT)
                        pmd[i] += load_delta;
-       }
+
+       /* invalidate pages after the kernel image */
+       for (; i < PTRS_PER_PMD; i++)
+               pmd[i] &= ~_PAGE_PRESENT;
 
        /*
         * Fixup phys_base - remove the memory encryption mask to obtain
index 9c5029c..f68c0c7 100644 (file)
@@ -363,7 +363,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
 
        /* cpuid 7.0.ecx*/
        const u32 kvm_cpuid_7_0_ecx_x86_features =
-               F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
+               F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
                F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
                F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
                F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
index 87b0fcc..b29d00b 100644 (file)
@@ -111,11 +111,6 @@ static inline int apic_enabled(struct kvm_lapic *apic)
        (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
         APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
 
-static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
-{
-       return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
-}
-
 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
 {
        return apic->vcpu->vcpu_id;
index 2aad7e2..1f50148 100644 (file)
@@ -242,4 +242,9 @@ static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
        return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
 }
 
+static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
+{
+       return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
+}
+
 #endif
index f8ecb6d..c5673bd 100644 (file)
@@ -734,8 +734,14 @@ static int get_npt_level(struct kvm_vcpu *vcpu)
 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        vcpu->arch.efer = efer;
-       if (!npt_enabled && !(efer & EFER_LMA))
-               efer &= ~EFER_LME;
+
+       if (!npt_enabled) {
+               /* Shadow paging assumes NX to be available.  */
+               efer |= EFER_NX;
+
+               if (!(efer & EFER_LMA))
+                       efer &= ~EFER_LME;
+       }
 
        to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
        mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
@@ -4591,6 +4597,7 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
        int ret = 0;
        struct vcpu_svm *svm = to_svm(vcpu);
        u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
+       u32 id = kvm_xapic_id(vcpu->arch.apic);
 
        if (ldr == svm->ldr_reg)
                return 0;
@@ -4598,7 +4605,7 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
        avic_invalidate_logical_id_entry(vcpu);
 
        if (ldr)
-               ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr);
+               ret = avic_ldr_write(vcpu, id, ldr);
 
        if (!ret)
                svm->ldr_reg = ldr;
@@ -4610,8 +4617,7 @@ static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
 {
        u64 *old, *new;
        struct vcpu_svm *svm = to_svm(vcpu);
-       u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
-       u32 id = (apic_id_reg >> 24) & 0xff;
+       u32 id = kvm_xapic_id(vcpu->arch.apic);
 
        if (vcpu->vcpu_id == id)
                return 0;
index e76eb4f..0e7c930 100644 (file)
@@ -2917,7 +2917,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
                                                 struct vmcs12 *vmcs12);
 
-static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 {
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2937,19 +2937,18 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                        vmx->nested.apic_access_page = NULL;
                }
                page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
-               /*
-                * If translation failed, no matter: This feature asks
-                * to exit when accessing the given address, and if it
-                * can never be accessed, this feature won't do
-                * anything anyway.
-                */
                if (!is_error_page(page)) {
                        vmx->nested.apic_access_page = page;
                        hpa = page_to_phys(vmx->nested.apic_access_page);
                        vmcs_write64(APIC_ACCESS_ADDR, hpa);
                } else {
-                       secondary_exec_controls_clearbit(vmx,
-                               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+                       pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
+                                            __func__);
+                       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       vcpu->run->internal.suberror =
+                               KVM_INTERNAL_ERROR_EMULATION;
+                       vcpu->run->internal.ndata = 0;
+                       return false;
                }
        }
 
@@ -2994,6 +2993,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
        else
                exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+       return true;
 }
 
 /*
@@ -3032,13 +3032,15 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 /*
  * If from_vmentry is false, this is being called from state restore (either RSM
  * or KVM_SET_NESTED_STATE).  Otherwise it's called from vmlaunch/vmresume.
-+ *
-+ * Returns:
-+ *   0 - success, i.e. proceed with actual VMEnter
-+ *   1 - consistency check VMExit
-+ *  -1 - consistency check VMFail
+ *
+ * Returns:
+ *     NVMX_ENTRY_SUCCESS: Entered VMX non-root mode
+ *     NVMX_ENTRY_VMFAIL:  Consistency check VMFail
+ *     NVMX_ENTRY_VMEXIT:  Consistency check VMExit
+ *     NVMX_ENTRY_KVM_INTERNAL_ERROR: KVM internal error
  */
-int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
+enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+                                                       bool from_vmentry)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -3081,11 +3083,12 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
        prepare_vmcs02_early(vmx, vmcs12);
 
        if (from_vmentry) {
-               nested_get_vmcs12_pages(vcpu);
+               if (unlikely(!nested_get_vmcs12_pages(vcpu)))
+                       return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
 
                if (nested_vmx_check_vmentry_hw(vcpu)) {
                        vmx_switch_vmcs(vcpu, &vmx->vmcs01);
-                       return -1;
+                       return NVMX_VMENTRY_VMFAIL;
                }
 
                if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
@@ -3149,7 +3152,7 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
         * returned as far as L1 is concerned. It will only return (and set
         * the success flag) when L2 exits (see nested_vmx_vmexit()).
         */
-       return 0;
+       return NVMX_VMENTRY_SUCCESS;
 
        /*
         * A failed consistency check that leads to a VMExit during L1's
@@ -3165,14 +3168,14 @@ vmentry_fail_vmexit:
        vmx_switch_vmcs(vcpu, &vmx->vmcs01);
 
        if (!from_vmentry)
-               return 1;
+               return NVMX_VMENTRY_VMEXIT;
 
        load_vmcs12_host_state(vcpu, vmcs12);
        vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
        vmcs12->exit_qualification = exit_qual;
        if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
                vmx->nested.need_vmcs12_to_shadow_sync = true;
-       return 1;
+       return NVMX_VMENTRY_VMEXIT;
 }
 
 /*
@@ -3182,9 +3185,9 @@ vmentry_fail_vmexit:
 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 {
        struct vmcs12 *vmcs12;
+       enum nvmx_vmentry_status status;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
-       int ret;
 
        if (!nested_vmx_check_permission(vcpu))
                return 1;
@@ -3244,13 +3247,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
         * the nested entry.
         */
        vmx->nested.nested_run_pending = 1;
-       ret = nested_vmx_enter_non_root_mode(vcpu, true);
-       vmx->nested.nested_run_pending = !ret;
-       if (ret > 0)
-               return 1;
-       else if (ret)
-               return nested_vmx_failValid(vcpu,
-                       VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+       status = nested_vmx_enter_non_root_mode(vcpu, true);
+       if (unlikely(status != NVMX_VMENTRY_SUCCESS))
+               goto vmentry_failed;
 
        /* Hide L1D cache contents from the nested guest.  */
        vmx->vcpu.arch.l1tf_flush_l1d = true;
@@ -3281,6 +3280,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
                return kvm_vcpu_halt(vcpu);
        }
        return 1;
+
+vmentry_failed:
+       vmx->nested.nested_run_pending = 0;
+       if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
+               return 0;
+       if (status == NVMX_VMENTRY_VMEXIT)
+               return 1;
+       WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
+       return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 }
 
 /*
index 187d39b..6280f33 100644 (file)
@@ -6,6 +6,16 @@
 #include "vmcs12.h"
 #include "vmx.h"
 
+/*
+ * Status returned by nested_vmx_enter_non_root_mode():
+ */
+enum nvmx_vmentry_status {
+       NVMX_VMENTRY_SUCCESS,           /* Entered VMX non-root mode */
+       NVMX_VMENTRY_VMFAIL,            /* Consistency check VMFail */
+       NVMX_VMENTRY_VMEXIT,            /* Consistency check VMExit */
+       NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
+};
+
 void vmx_leave_nested(struct kvm_vcpu *vcpu);
 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
                                bool apicv);
@@ -13,7 +23,8 @@ void nested_vmx_hardware_unsetup(void);
 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
 void nested_vmx_vcpu_setup(void);
 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
-int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry);
+enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+                                                    bool from_vmentry);
 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                       u32 exit_intr_info, unsigned long exit_qualification);
index e7970a2..5d21a4a 100644 (file)
@@ -969,17 +969,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
        u64 guest_efer = vmx->vcpu.arch.efer;
        u64 ignore_bits = 0;
 
-       if (!enable_ept) {
-               /*
-                * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
-                * host CPUID is more efficient than testing guest CPUID
-                * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
-                */
-               if (boot_cpu_has(X86_FEATURE_SMEP))
-                       guest_efer |= EFER_NX;
-               else if (!(guest_efer & EFER_NX))
-                       ignore_bits |= EFER_NX;
-       }
+       /* Shadow paging assumes NX to be available.  */
+       if (!enable_ept)
+               guest_efer |= EFER_NX;
 
        /*
         * LMA and LME handled by hardware; SCE meaningless outside long mode.
@@ -5543,14 +5535,6 @@ static int handle_encls(struct kvm_vcpu *vcpu)
        return 1;
 }
 
-static int handle_unexpected_vmexit(struct kvm_vcpu *vcpu)
-{
-       kvm_skip_emulated_instruction(vcpu);
-       WARN_ONCE(1, "Unexpected VM-Exit Reason = 0x%x",
-               vmcs_read32(VM_EXIT_REASON));
-       return 1;
-}
-
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -5602,15 +5586,11 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [EXIT_REASON_INVVPID]                 = handle_vmx_instruction,
        [EXIT_REASON_RDRAND]                  = handle_invalid_op,
        [EXIT_REASON_RDSEED]                  = handle_invalid_op,
-       [EXIT_REASON_XSAVES]                  = handle_unexpected_vmexit,
-       [EXIT_REASON_XRSTORS]                 = handle_unexpected_vmexit,
        [EXIT_REASON_PML_FULL]                = handle_pml_full,
        [EXIT_REASON_INVPCID]                 = handle_invpcid,
        [EXIT_REASON_VMFUNC]                  = handle_vmx_instruction,
        [EXIT_REASON_PREEMPTION_TIMER]        = handle_preemption_timer,
        [EXIT_REASON_ENCLS]                   = handle_encls,
-       [EXIT_REASON_UMWAIT]                  = handle_unexpected_vmexit,
-       [EXIT_REASON_TPAUSE]                  = handle_unexpected_vmexit,
 };
 
 static const int kvm_vmx_max_exit_handlers =
index 661e2bf..ff395f8 100644 (file)
@@ -360,8 +360,7 @@ EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 asmlinkage __visible void kvm_spurious_fault(void)
 {
        /* Fault while not rebooting.  We want the trace. */
-       if (!kvm_rebooting)
-               BUG();
+       BUG_ON(!kvm_rebooting);
 }
 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
 
@@ -2537,6 +2536,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 static void kvmclock_reset(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.pv_time_enabled = false;
+       vcpu->arch.time = 0;
 }
 
 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
@@ -2702,8 +2702,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_KVM_SYSTEM_TIME: {
                struct kvm_arch *ka = &vcpu->kvm->arch;
 
-               kvmclock_reset(vcpu);
-
                if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
                        bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
 
@@ -2717,14 +2715,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
 
                /* we verify if the enable bit is set... */
+               vcpu->arch.pv_time_enabled = false;
                if (!(data & 1))
                        break;
 
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+               if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
                     &vcpu->arch.pv_time, data & ~1ULL,
                     sizeof(struct pvclock_vcpu_time_info)))
-                       vcpu->arch.pv_time_enabled = false;
-               else
                        vcpu->arch.pv_time_enabled = true;
 
                break;
@@ -7941,8 +7938,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        bool req_immediate_exit = false;
 
        if (kvm_request_pending(vcpu)) {
-               if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
-                       kvm_x86_ops->get_vmcs12_pages(vcpu);
+               if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
+                       if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) {
+                               r = 0;
+                               goto out;
+                       }
+               }
                if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
                        kvm_mmu_unload(vcpu);
                if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
index 58f79ab..5bfea37 100644 (file)
@@ -117,6 +117,14 @@ static void __init xen_banner(void)
        printk(KERN_INFO "Xen version: %d.%d%s%s\n",
               version >> 16, version & 0xffff, extra.extraversion,
               xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
+
+#ifdef CONFIG_X86_32
+       pr_warn("WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n"
+               "Support for running as 32-bit PV-guest under Xen will soon be removed\n"
+               "from the Linux kernel!\n"
+               "Please use either a 64-bit kernel or switch to HVM or PVH mode!\n"
+               "WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n");
+#endif
 }
 
 static void __init xen_pv_init_platform(void)
index 2a3db80..a7ed434 100644 (file)
@@ -2110,10 +2110,10 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
                        goto einval;
        }
 
-       spin_lock_irq(&iocg->ioc->lock);
+       spin_lock(&iocg->ioc->lock);
        iocg->cfg_weight = v;
        weight_updated(iocg);
-       spin_unlock_irq(&iocg->ioc->lock);
+       spin_unlock(&iocg->ioc->lock);
 
        blkg_conf_finish(&ctx);
        return nbytes;
index 1413324..14e68f2 100644 (file)
@@ -1322,7 +1322,7 @@ static ssize_t scrub_show(struct device *dev,
        nfit_device_lock(dev);
        nd_desc = dev_get_drvdata(dev);
        if (!nd_desc) {
-               device_unlock(dev);
+               nfit_device_unlock(dev);
                return rc;
        }
        acpi_desc = to_acpi_desc(nd_desc);
index 08da9c2..62114a0 100644 (file)
@@ -290,14 +290,13 @@ static int acpi_processor_notifier(struct notifier_block *nb,
                                   unsigned long event, void *data)
 {
        struct cpufreq_policy *policy = data;
-       int cpu = policy->cpu;
 
        if (event == CPUFREQ_CREATE_POLICY) {
-               acpi_thermal_cpufreq_init(cpu);
-               acpi_processor_ppc_init(cpu);
+               acpi_thermal_cpufreq_init(policy);
+               acpi_processor_ppc_init(policy);
        } else if (event == CPUFREQ_REMOVE_POLICY) {
-               acpi_processor_ppc_exit(cpu);
-               acpi_thermal_cpufreq_exit(cpu);
+               acpi_processor_ppc_exit(policy);
+               acpi_thermal_cpufreq_exit(policy);
        }
 
        return 0;
index 930a49f..5909e8f 100644 (file)
@@ -81,10 +81,10 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
        pr->performance_platform_limit = (int)ppc;
 
        if (ppc >= pr->performance->state_count ||
-           unlikely(!dev_pm_qos_request_active(&pr->perflib_req)))
+           unlikely(!freq_qos_request_active(&pr->perflib_req)))
                return 0;
 
-       ret = dev_pm_qos_update_request(&pr->perflib_req,
+       ret = freq_qos_update_request(&pr->perflib_req,
                        pr->performance->states[ppc].core_frequency * 1000);
        if (ret < 0) {
                pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
@@ -157,28 +157,36 @@ void acpi_processor_ignore_ppc_init(void)
                ignore_ppc = 0;
 }
 
-void acpi_processor_ppc_init(int cpu)
+void acpi_processor_ppc_init(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
-       int ret;
+       unsigned int cpu;
 
-       if (!pr)
-               return;
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, cpu);
+               int ret;
+
+               if (!pr)
+                       continue;
 
-       ret = dev_pm_qos_add_request(get_cpu_device(cpu),
-                                    &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
-                                    INT_MAX);
-       if (ret < 0)
-               pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
-                      ret);
+               ret = freq_qos_add_request(&policy->constraints,
+                                          &pr->perflib_req,
+                                          FREQ_QOS_MAX, INT_MAX);
+               if (ret < 0)
+                       pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+                              cpu, ret);
+       }
 }
 
-void acpi_processor_ppc_exit(int cpu)
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
+       unsigned int cpu;
 
-       if (pr)
-               dev_pm_qos_remove_request(&pr->perflib_req);
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, cpu);
+
+               if (pr)
+                       freq_qos_remove_request(&pr->perflib_req);
+       }
 }
 
 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
index 8227c7d..41feb88 100644 (file)
@@ -105,7 +105,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
 
                pr = per_cpu(processors, i);
 
-               if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req)))
+               if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
                        continue;
 
                policy = cpufreq_cpu_get(i);
@@ -116,7 +116,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
 
                cpufreq_cpu_put(policy);
 
-               ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq);
+               ret = freq_qos_update_request(&pr->thermal_req, max_freq);
                if (ret < 0) {
                        pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
                                pr->id, ret);
@@ -125,28 +125,36 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
        return 0;
 }
 
-void acpi_thermal_cpufreq_init(int cpu)
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
-       int ret;
+       unsigned int cpu;
 
-       if (!pr)
-               return;
-
-       ret = dev_pm_qos_add_request(get_cpu_device(cpu),
-                                    &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
-                                    INT_MAX);
-       if (ret < 0)
-               pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
-                      ret);
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, cpu);
+               int ret;
+
+               if (!pr)
+                       continue;
+
+               ret = freq_qos_add_request(&policy->constraints,
+                                          &pr->thermal_req,
+                                          FREQ_QOS_MAX, INT_MAX);
+               if (ret < 0)
+                       pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+                              cpu, ret);
+       }
 }
 
-void acpi_thermal_cpufreq_exit(int cpu)
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
+       unsigned int cpu;
+
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, policy->cpu);
 
-       if (pr)
-               dev_pm_qos_remove_request(&pr->thermal_req);
+               if (pr)
+                       freq_qos_remove_request(&pr->thermal_req);
+       }
 }
 #else                          /* ! CONFIG_CPU_FREQ */
 static int cpufreq_get_max_state(unsigned int cpu)
index f39f075..fe15236 100644 (file)
@@ -409,9 +409,11 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
                 */
                rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
                if (IS_ERR(rstc)) {
-                       if (PTR_ERR(rstc) != -EPROBE_DEFER)
-                               dev_err(&dev->dev, "Can't get amba reset!\n");
-                       return PTR_ERR(rstc);
+                       ret = PTR_ERR(rstc);
+                       if (ret != -EPROBE_DEFER)
+                               dev_err(&dev->dev, "can't get reset: %d\n",
+                                       ret);
+                       goto err_reset;
                }
                reset_control_deassert(rstc);
                reset_control_put(rstc);
@@ -472,6 +474,12 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
        release_resource(&dev->res);
  err_out:
        return ret;
+
+ err_reset:
+       amba_put_disable_pclk(dev);
+       iounmap(tmp);
+       dev_pm_domain_detach(&dev->dev, true);
+       goto err_release;
 }
 
 /*
index 5b9ac21..265d9dd 100644 (file)
@@ -97,10 +97,6 @@ DEFINE_SHOW_ATTRIBUTE(proc);
 #define SZ_1K                               0x400
 #endif
 
-#ifndef SZ_4M
-#define SZ_4M                               0x400000
-#endif
-
 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
 
 enum {
@@ -5177,9 +5173,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
        if (proc->tsk != current->group_leader)
                return -EINVAL;
 
-       if ((vma->vm_end - vma->vm_start) > SZ_4M)
-               vma->vm_end = vma->vm_start + SZ_4M;
-
        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
                     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
                     __func__, proc->pid, vma->vm_start, vma->vm_end,
index d42a8b2..eb76a82 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cacheflush.h>
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
+#include <linux/sizes.h>
 #include "binder_alloc.h"
 #include "binder_trace.h"
 
@@ -689,7 +690,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        alloc->buffer = (void __user *)vma->vm_start;
        mutex_unlock(&binder_alloc_mmap_lock);
 
-       alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
+       alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+                                  SZ_4M);
+       alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
                               sizeof(alloc->pages[0]),
                               GFP_KERNEL);
        if (alloc->pages == NULL) {
@@ -697,7 +700,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                failure_string = "alloc page array";
                goto err_alloc_pages_failed;
        }
-       alloc->buffer_size = vma->vm_end - vma->vm_start;
 
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        if (!buffer) {
index e742780..8befce0 100644 (file)
@@ -153,17 +153,13 @@ int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv)
 {
        int rc, i;
 
-       if (hpriv->ahci_regulator) {
-               rc = regulator_enable(hpriv->ahci_regulator);
-               if (rc)
-                       return rc;
-       }
+       rc = regulator_enable(hpriv->ahci_regulator);
+       if (rc)
+               return rc;
 
-       if (hpriv->phy_regulator) {
-               rc = regulator_enable(hpriv->phy_regulator);
-               if (rc)
-                       goto disable_ahci_pwrs;
-       }
+       rc = regulator_enable(hpriv->phy_regulator);
+       if (rc)
+               goto disable_ahci_pwrs;
 
        for (i = 0; i < hpriv->nports; i++) {
                if (!hpriv->target_pwrs[i])
@@ -181,11 +177,9 @@ disable_target_pwrs:
                if (hpriv->target_pwrs[i])
                        regulator_disable(hpriv->target_pwrs[i]);
 
-       if (hpriv->phy_regulator)
-               regulator_disable(hpriv->phy_regulator);
+       regulator_disable(hpriv->phy_regulator);
 disable_ahci_pwrs:
-       if (hpriv->ahci_regulator)
-               regulator_disable(hpriv->ahci_regulator);
+       regulator_disable(hpriv->ahci_regulator);
        return rc;
 }
 EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
@@ -207,10 +201,8 @@ void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
                regulator_disable(hpriv->target_pwrs[i]);
        }
 
-       if (hpriv->ahci_regulator)
-               regulator_disable(hpriv->ahci_regulator);
-       if (hpriv->phy_regulator)
-               regulator_disable(hpriv->phy_regulator);
+       regulator_disable(hpriv->ahci_regulator);
+       regulator_disable(hpriv->phy_regulator);
 }
 EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
 /**
@@ -359,7 +351,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
        struct regulator *target_pwr;
        int rc = 0;
 
-       target_pwr = regulator_get_optional(dev, "target");
+       target_pwr = regulator_get(dev, "target");
 
        if (!IS_ERR(target_pwr))
                hpriv->target_pwrs[port] = target_pwr;
@@ -436,16 +428,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
                hpriv->clks[i] = clk;
        }
 
-       hpriv->ahci_regulator = devm_regulator_get_optional(dev, "ahci");
+       hpriv->ahci_regulator = devm_regulator_get(dev, "ahci");
        if (IS_ERR(hpriv->ahci_regulator)) {
                rc = PTR_ERR(hpriv->ahci_regulator);
-               if (rc == -EPROBE_DEFER)
+               if (rc != 0)
                        goto err_out;
-               rc = 0;
-               hpriv->ahci_regulator = NULL;
        }
 
-       hpriv->phy_regulator = devm_regulator_get_optional(dev, "phy");
+       hpriv->phy_regulator = devm_regulator_get(dev, "phy");
        if (IS_ERR(hpriv->phy_regulator)) {
                rc = PTR_ERR(hpriv->phy_regulator);
                if (rc == -EPROBE_DEFER)
index 2bbab02..aad00d2 100644 (file)
@@ -1070,7 +1070,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
                                        RC_FLAGS_BFPS_BFP * bfp |
                                        RC_FLAGS_RXBM_PSB, 0, 0);
                        break;
-               };
+               }
                if (IS_FS50 (dev)) {
                        submit_command (dev, &dev->hp_txq, 
                                        QE_CMD_REG_WR | QE_CMD_IMM_INQ,
index 6c90fd7..350dcaf 100644 (file)
@@ -115,20 +115,10 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
 
        spin_lock_irqsave(&dev->power.lock, flags);
 
-       switch (type) {
-       case DEV_PM_QOS_RESUME_LATENCY:
+       if (type == DEV_PM_QOS_RESUME_LATENCY) {
                ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
                        : pm_qos_read_value(&qos->resume_latency);
-               break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
-                       : pm_qos_read_value(&qos->min_frequency);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
-                       : pm_qos_read_value(&qos->max_frequency);
-               break;
-       default:
+       } else {
                WARN_ON(1);
                ret = 0;
        }
@@ -169,14 +159,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
                        req->dev->power.set_latency_tolerance(req->dev, value);
                }
                break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = pm_qos_update_target(&qos->min_frequency,
-                                          &req->data.pnode, action, value);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = pm_qos_update_target(&qos->max_frequency,
-                                          &req->data.pnode, action, value);
-               break;
        case DEV_PM_QOS_FLAGS:
                ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
                                          action, value);
@@ -227,24 +209,6 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
        c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
        c->type = PM_QOS_MIN;
 
-       c = &qos->min_frequency;
-       plist_head_init(&c->list);
-       c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       c->type = PM_QOS_MAX;
-       c->notifiers = ++n;
-       BLOCKING_INIT_NOTIFIER_HEAD(n);
-
-       c = &qos->max_frequency;
-       plist_head_init(&c->list);
-       c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
-       c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
-       c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
-       c->type = PM_QOS_MIN;
-       c->notifiers = ++n;
-       BLOCKING_INIT_NOTIFIER_HEAD(n);
-
        INIT_LIST_HEAD(&qos->flags.list);
 
        spin_lock_irq(&dev->power.lock);
@@ -305,18 +269,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
                memset(req, 0, sizeof(*req));
        }
 
-       c = &qos->min_frequency;
-       plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
-               apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
-               memset(req, 0, sizeof(*req));
-       }
-
-       c = &qos->max_frequency;
-       plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
-               apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
-               memset(req, 0, sizeof(*req));
-       }
-
        f = &qos->flags;
        list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
                apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -428,8 +380,6 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
        switch(req->type) {
        case DEV_PM_QOS_RESUME_LATENCY:
        case DEV_PM_QOS_LATENCY_TOLERANCE:
-       case DEV_PM_QOS_MIN_FREQUENCY:
-       case DEV_PM_QOS_MAX_FREQUENCY:
                curr_value = req->data.pnode.prio;
                break;
        case DEV_PM_QOS_FLAGS:
@@ -557,14 +507,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
                ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
                                                       notifier);
                break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers,
-                                                      notifier);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers,
-                                                      notifier);
-               break;
        default:
                WARN_ON(1);
                ret = -EINVAL;
@@ -604,14 +546,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
                ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
                                                         notifier);
                break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers,
-                                                        notifier);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers,
-                                                        notifier);
-               break;
        default:
                WARN_ON(1);
                ret = -EINVAL;
index 478aa86..a94ee45 100644 (file)
@@ -385,17 +385,16 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        struct nbd_device *nbd = cmd->nbd;
        struct nbd_config *config;
 
+       if (!mutex_trylock(&cmd->lock))
+               return BLK_EH_RESET_TIMER;
+
        if (!refcount_inc_not_zero(&nbd->config_refs)) {
                cmd->status = BLK_STS_TIMEOUT;
+               mutex_unlock(&cmd->lock);
                goto done;
        }
        config = nbd->config;
 
-       if (!mutex_trylock(&cmd->lock)) {
-               nbd_config_put(nbd);
-               return BLK_EH_RESET_TIMER;
-       }
-
        if (config->num_connections > 1) {
                dev_err_ratelimited(nbd_to_dev(nbd),
                                    "Connection timed out, retrying (%d/%d alive)\n",
@@ -711,6 +710,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                ret = -ENOENT;
                goto out;
        }
+       if (cmd->status != BLK_STS_OK) {
+               dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
+                       req);
+               ret = -ENOENT;
+               goto out;
+       }
        if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
                dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
                        req);
@@ -792,7 +797,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
 
+       mutex_lock(&cmd->lock);
        cmd->status = BLK_STS_IOERR;
+       mutex_unlock(&cmd->lock);
+
        blk_mq_complete_request(req);
        return true;
 }
@@ -972,6 +980,25 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
        return ret;
 }
 
+static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
+                                    int *err)
+{
+       struct socket *sock;
+
+       *err = 0;
+       sock = sockfd_lookup(fd, err);
+       if (!sock)
+               return NULL;
+
+       if (sock->ops->shutdown == sock_no_shutdown) {
+               dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
+               *err = -EINVAL;
+               return NULL;
+       }
+
+       return sock;
+}
+
 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
                          bool netlink)
 {
@@ -981,7 +1008,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
        struct nbd_sock *nsock;
        int err;
 
-       sock = sockfd_lookup(arg, &err);
+       sock = nbd_get_socket(nbd, arg, &err);
        if (!sock)
                return err;
 
@@ -1033,7 +1060,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
        int i;
        int err;
 
-       sock = sockfd_lookup(arg, &err);
+       sock = nbd_get_socket(nbd, arg, &err);
        if (!sock)
                return err;
 
index 52c7e15..c8b1c38 100644 (file)
@@ -104,10 +104,8 @@ static int __fsl_mc_device_match(struct device *dev, void *data)
        return fsl_mc_device_match(mc_dev, obj_desc);
 }
 
-static struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc
-                                                               *obj_desc,
-                                                 struct fsl_mc_device
-                                                               *mc_bus_dev)
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+                                          struct fsl_mc_device *mc_bus_dev)
 {
        struct device *dev;
 
index 0fe3f52..602f030 100644 (file)
@@ -554,3 +554,56 @@ int dprc_get_container_id(struct fsl_mc_io *mc_io,
 
        return 0;
 }
+
+/**
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ *                     exists.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state:     Returned link state:
+ *             1 - link is up;
+ *             0 - link is down;
+ *             -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return:     '0' on Success; -ENOTCONN if connection does not exist.
+ */
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       const struct dprc_endpoint *endpoint1,
+                       struct dprc_endpoint *endpoint2,
+                       int *state)
+{
+       struct dprc_cmd_get_connection *cmd_params;
+       struct dprc_rsp_get_connection *rsp_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err, i;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+       cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+       cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
+       for (i = 0; i < 16; i++)
+               cmd_params->ep1_type[i] = endpoint1->type[i];
+
+       /* send command to mc */
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return -ENOTCONN;
+
+       /* retrieve response parameters */
+       rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+       endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+       endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
+       *state = le32_to_cpu(rsp_params->state);
+       for (i = 0; i < 16; i++)
+               endpoint2->type[i] = rsp_params->ep2_type[i];
+
+       return 0;
+}
index 5c9bf2e..a07cc19 100644 (file)
@@ -166,42 +166,52 @@ EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
 struct device_type fsl_mc_bus_dprc_type = {
        .name = "fsl_mc_bus_dprc"
 };
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
 
 struct device_type fsl_mc_bus_dpni_type = {
        .name = "fsl_mc_bus_dpni"
 };
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
 
 struct device_type fsl_mc_bus_dpio_type = {
        .name = "fsl_mc_bus_dpio"
 };
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
 
 struct device_type fsl_mc_bus_dpsw_type = {
        .name = "fsl_mc_bus_dpsw"
 };
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
 
 struct device_type fsl_mc_bus_dpbp_type = {
        .name = "fsl_mc_bus_dpbp"
 };
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
 
 struct device_type fsl_mc_bus_dpcon_type = {
        .name = "fsl_mc_bus_dpcon"
 };
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
 
 struct device_type fsl_mc_bus_dpmcp_type = {
        .name = "fsl_mc_bus_dpmcp"
 };
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
 
 struct device_type fsl_mc_bus_dpmac_type = {
        .name = "fsl_mc_bus_dpmac"
 };
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
 
 struct device_type fsl_mc_bus_dprtc_type = {
        .name = "fsl_mc_bus_dprtc"
 };
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
 
 struct device_type fsl_mc_bus_dpseci_type = {
        .name = "fsl_mc_bus_dpseci"
 };
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
 
 static struct device_type *fsl_mc_get_device_type(const char *type)
 {
@@ -702,6 +712,39 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
 }
 EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
 
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
+{
+       struct fsl_mc_device *mc_bus_dev, *endpoint;
+       struct fsl_mc_obj_desc endpoint_desc = { 0 };
+       struct dprc_endpoint endpoint1 = { 0 };
+       struct dprc_endpoint endpoint2 = { 0 };
+       int state, err;
+
+       mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+       strcpy(endpoint1.type, mc_dev->obj_desc.type);
+       endpoint1.id = mc_dev->obj_desc.id;
+
+       err = dprc_get_connection(mc_bus_dev->mc_io, 0,
+                                 mc_bus_dev->mc_handle,
+                                 &endpoint1, &endpoint2,
+                                 &state);
+
+       if (err == -ENOTCONN || state == -1)
+               return ERR_PTR(-ENOTCONN);
+
+       if (err < 0) {
+               dev_err(&mc_bus_dev->dev, "dprc_get_connection() = %d\n", err);
+               return ERR_PTR(err);
+       }
+
+       strcpy(endpoint_desc.type, endpoint2.type);
+       endpoint_desc.id = endpoint2.id;
+       endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+
+       return endpoint;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
+
 static int parse_mc_ranges(struct device *dev,
                           int *paddr_cells,
                           int *mc_addr_cells,
index 020fcc0..21ca8c7 100644 (file)
@@ -105,6 +105,8 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
 #define DPRC_CMDID_GET_OBJ_REG_V2               DPRC_CMD_V2(0x15E)
 #define DPRC_CMDID_SET_OBJ_IRQ                  DPRC_CMD(0x15F)
 
+#define DPRC_CMDID_GET_CONNECTION               DPRC_CMD(0x16C)
+
 struct dprc_cmd_open {
        __le32 container_id;
 };
@@ -228,6 +230,22 @@ struct dprc_cmd_set_obj_irq {
        u8 obj_type[16];
 };
 
+struct dprc_cmd_get_connection {
+       __le32 ep1_id;
+       __le16 ep1_interface_id;
+       u8 pad[2];
+       u8 ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+       __le64 pad[3];
+       __le32 ep2_id;
+       __le16 ep2_interface_id;
+       __le16 pad1;
+       u8 ep2_type[16];
+       __le32 state;
+};
+
 /*
  * DPRC API for managing and querying DPAA resources
  */
@@ -392,6 +410,27 @@ int dprc_get_container_id(struct fsl_mc_io *mc_io,
                          u32 cmd_flags,
                          int *container_id);
 
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ *                     operations
+ * @type:      Endpoint object type: NULL terminated string
+ * @id:                Endpoint object ID
+ * @if_id:     Interface ID; should be set for endpoints with multiple
+ *             interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+       char type[16];
+       int id;
+       u16 if_id;
+};
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       const struct dprc_endpoint *endpoint1,
+                       struct dprc_endpoint *endpoint2,
+                       int *state);
+
 /*
  * Data Path Buffer Pool (DPBP) API
  */
@@ -574,4 +613,7 @@ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
 
 bool fsl_mc_is_root_dprc(struct device *dev);
 
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+                                          struct fsl_mc_device *mc_bus_dev);
+
 #endif /* _FSL_MC_PRIVATE_H_ */
index ad50efb..2b6670d 100644 (file)
@@ -74,6 +74,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
  * @clk_disable_quirk: module specific clock disable quirk
  * @reset_done_quirk: module specific reset done quirk
  * @module_enable_quirk: module specific enable quirk
+ * @module_disable_quirk: module specific disable quirk
  */
 struct sysc {
        struct device *dev;
@@ -100,6 +101,7 @@ struct sysc {
        void (*clk_disable_quirk)(struct sysc *sysc);
        void (*reset_done_quirk)(struct sysc *sysc);
        void (*module_enable_quirk)(struct sysc *sysc);
+       void (*module_disable_quirk)(struct sysc *sysc);
 };
 
 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -959,6 +961,9 @@ static int sysc_disable_module(struct device *dev)
        if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
                return 0;
 
+       if (ddata->module_disable_quirk)
+               ddata->module_disable_quirk(ddata);
+
        regbits = ddata->cap->regbits;
        reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
 
@@ -1248,6 +1253,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_MODULE_QUIRK_SGX),
        SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
                   SYSC_MODULE_QUIRK_WDT),
+       /* Watchdog on am3 and am4 */
+       SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+                  SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
 
 #ifdef DEBUG
        SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
@@ -1440,14 +1448,14 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
                                   !(val & 0x10), 100,
                                   MAX_MODULE_SOFTRESET_WAIT);
        if (error)
-               dev_warn(ddata->dev, "wdt disable spr failed\n");
+               dev_warn(ddata->dev, "wdt disable step1 failed\n");
 
-       sysc_write(ddata, wps, 0x5555);
+       sysc_write(ddata, spr, 0x5555);
        error = readl_poll_timeout(ddata->module_va + wps, val,
                                   !(val & 0x10), 100,
                                   MAX_MODULE_SOFTRESET_WAIT);
        if (error)
-               dev_warn(ddata->dev, "wdt disable wps failed\n");
+               dev_warn(ddata->dev, "wdt disable step2 failed\n");
 }
 
 static void sysc_init_module_quirks(struct sysc *ddata)
@@ -1471,8 +1479,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
        if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
                ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
 
-       if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT)
+       if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
                ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
+               ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
+       }
 }
 
 static int sysc_clockdomain_init(struct sysc *ddata)
index bffc11b..48a224a 100644 (file)
@@ -720,7 +720,7 @@ static ssize_t store_##file_name                                    \
        if (ret != 1)                                                   \
                return -EINVAL;                                         \
                                                                        \
-       ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\
+       ret = freq_qos_update_request(policy->object##_freq_req, val);\
        return ret >= 0 ? count : ret;                                  \
 }
 
@@ -1202,19 +1202,21 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
                goto err_free_real_cpus;
        }
 
+       freq_constraints_init(&policy->constraints);
+
        policy->nb_min.notifier_call = cpufreq_notifier_min;
        policy->nb_max.notifier_call = cpufreq_notifier_max;
 
-       ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
-                                     DEV_PM_QOS_MIN_FREQUENCY);
+       ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
+                                   &policy->nb_min);
        if (ret) {
                dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
                        ret, cpumask_pr_args(policy->cpus));
                goto err_kobj_remove;
        }
 
-       ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
-                                     DEV_PM_QOS_MAX_FREQUENCY);
+       ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
+                                   &policy->nb_max);
        if (ret) {
                dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
                        ret, cpumask_pr_args(policy->cpus));
@@ -1232,8 +1234,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
        return policy;
 
 err_min_qos_notifier:
-       dev_pm_qos_remove_notifier(dev, &policy->nb_min,
-                                  DEV_PM_QOS_MIN_FREQUENCY);
+       freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+                                &policy->nb_min);
 err_kobj_remove:
        cpufreq_policy_put_kobj(policy);
 err_free_real_cpus:
@@ -1250,7 +1252,6 @@ err_free_policy:
 
 static void cpufreq_policy_free(struct cpufreq_policy *policy)
 {
-       struct device *dev = get_cpu_device(policy->cpu);
        unsigned long flags;
        int cpu;
 
@@ -1262,10 +1263,13 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
                per_cpu(cpufreq_cpu_data, cpu) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       dev_pm_qos_remove_notifier(dev, &policy->nb_max,
-                                  DEV_PM_QOS_MAX_FREQUENCY);
-       dev_pm_qos_remove_notifier(dev, &policy->nb_min,
-                                  DEV_PM_QOS_MIN_FREQUENCY);
+       freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
+                                &policy->nb_max);
+       freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+                                &policy->nb_min);
+
+       /* Cancel any pending policy->update work before freeing the policy. */
+       cancel_work_sync(&policy->update);
 
        if (policy->max_freq_req) {
                /*
@@ -1274,10 +1278,10 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
                 */
                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                             CPUFREQ_REMOVE_POLICY, policy);
-               dev_pm_qos_remove_request(policy->max_freq_req);
+               freq_qos_remove_request(policy->max_freq_req);
        }
 
-       dev_pm_qos_remove_request(policy->min_freq_req);
+       freq_qos_remove_request(policy->min_freq_req);
        kfree(policy->min_freq_req);
 
        cpufreq_policy_put_kobj(policy);
@@ -1357,8 +1361,6 @@ static int cpufreq_online(unsigned int cpu)
        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
        if (new_policy) {
-               struct device *dev = get_cpu_device(cpu);
-
                for_each_cpu(j, policy->related_cpus) {
                        per_cpu(cpufreq_cpu_data, j) = policy;
                        add_cpu_dev_symlink(policy, j);
@@ -1369,36 +1371,31 @@ static int cpufreq_online(unsigned int cpu)
                if (!policy->min_freq_req)
                        goto out_destroy_policy;
 
-               ret = dev_pm_qos_add_request(dev, policy->min_freq_req,
-                                            DEV_PM_QOS_MIN_FREQUENCY,
-                                            policy->min);
+               ret = freq_qos_add_request(&policy->constraints,
+                                          policy->min_freq_req, FREQ_QOS_MIN,
+                                          policy->min);
                if (ret < 0) {
                        /*
-                        * So we don't call dev_pm_qos_remove_request() for an
+                        * So we don't call freq_qos_remove_request() for an
                         * uninitialized request.
                         */
                        kfree(policy->min_freq_req);
                        policy->min_freq_req = NULL;
-
-                       dev_err(dev, "Failed to add min-freq constraint (%d)\n",
-                               ret);
                        goto out_destroy_policy;
                }
 
                /*
                 * This must be initialized right here to avoid calling
-                * dev_pm_qos_remove_request() on uninitialized request in case
+                * freq_qos_remove_request() on uninitialized request in case
                 * of errors.
                 */
                policy->max_freq_req = policy->min_freq_req + 1;
 
-               ret = dev_pm_qos_add_request(dev, policy->max_freq_req,
-                                            DEV_PM_QOS_MAX_FREQUENCY,
-                                            policy->max);
+               ret = freq_qos_add_request(&policy->constraints,
+                                          policy->max_freq_req, FREQ_QOS_MAX,
+                                          policy->max);
                if (ret < 0) {
                        policy->max_freq_req = NULL;
-                       dev_err(dev, "Failed to add max-freq constraint (%d)\n",
-                               ret);
                        goto out_destroy_policy;
                }
 
@@ -2374,7 +2371,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
                       struct cpufreq_policy *new_policy)
 {
        struct cpufreq_governor *old_gov;
-       struct device *cpu_dev = get_cpu_device(policy->cpu);
        int ret;
 
        pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
@@ -2386,8 +2382,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
         * PM QoS framework collects all the requests from users and provide us
         * the final aggregated value here.
         */
-       new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
-       new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
+       new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+       new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
 
        /* verify the cpu speed can be set within this limit */
        ret = cpufreq_driver->verify(new_policy);
@@ -2518,7 +2514,7 @@ static int cpufreq_boost_set_sw(int state)
                        break;
                }
 
-               ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
+               ret = freq_qos_update_request(policy->max_freq_req, policy->max);
                if (ret < 0)
                        break;
        }
index 9f02de9..53a51c1 100644 (file)
@@ -1088,10 +1088,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
 
 static struct cpufreq_driver intel_pstate;
 
-static void update_qos_request(enum dev_pm_qos_req_type type)
+static void update_qos_request(enum freq_qos_req_type type)
 {
        int max_state, turbo_max, freq, i, perf_pct;
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        struct cpufreq_policy *policy;
 
        for_each_possible_cpu(i) {
@@ -1112,7 +1112,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type)
                else
                        turbo_max = cpu->pstate.turbo_pstate;
 
-               if (type == DEV_PM_QOS_MIN_FREQUENCY) {
+               if (type == FREQ_QOS_MIN) {
                        perf_pct = global.min_perf_pct;
                } else {
                        req++;
@@ -1122,7 +1122,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type)
                freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
                freq *= cpu->pstate.scaling;
 
-               if (dev_pm_qos_update_request(req, freq) < 0)
+               if (freq_qos_update_request(req, freq) < 0)
                        pr_warn("Failed to update freq constraint: CPU%d\n", i);
        }
 }
@@ -1153,7 +1153,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
        if (intel_pstate_driver == &intel_pstate)
                intel_pstate_update_policies();
        else
-               update_qos_request(DEV_PM_QOS_MAX_FREQUENCY);
+               update_qos_request(FREQ_QOS_MAX);
 
        mutex_unlock(&intel_pstate_driver_lock);
 
@@ -1187,7 +1187,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
        if (intel_pstate_driver == &intel_pstate)
                intel_pstate_update_policies();
        else
-               update_qos_request(DEV_PM_QOS_MIN_FREQUENCY);
+               update_qos_request(FREQ_QOS_MIN);
 
        mutex_unlock(&intel_pstate_driver_lock);
 
@@ -2381,7 +2381,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
        int max_state, turbo_max, min_freq, max_freq, ret;
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        struct cpudata *cpu;
        struct device *dev;
 
@@ -2416,15 +2416,15 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
        max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
        max_freq *= cpu->pstate.scaling;
 
-       ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY,
-                                    min_freq);
+       ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
+                                  min_freq);
        if (ret < 0) {
                dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
                goto free_req;
        }
 
-       ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY,
-                                    max_freq);
+       ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
+                                  max_freq);
        if (ret < 0) {
                dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
                goto remove_min_req;
@@ -2435,7 +2435,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
        return 0;
 
 remove_min_req:
-       dev_pm_qos_remove_request(req);
+       freq_qos_remove_request(req);
 free_req:
        kfree(req);
 pstate_exit:
@@ -2446,12 +2446,12 @@ pstate_exit:
 
 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
 
        req = policy->driver_data;
 
-       dev_pm_qos_remove_request(req + 1);
-       dev_pm_qos_remove_request(req);
+       freq_qos_remove_request(req + 1);
+       freq_qos_remove_request(req);
        kfree(req);
 
        return intel_pstate_cpu_exit(policy);
index bc9dd30..037fe23 100644 (file)
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
 static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
 {
        struct cpufreq_policy *policy;
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        u8 node, slow_mode;
        int cpu, ret;
 
@@ -86,7 +86,7 @@ static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
 
        req = policy->driver_data;
 
-       ret = dev_pm_qos_update_request(req,
+       ret = freq_qos_update_request(req,
                        policy->freq_table[slow_mode].frequency);
        if (ret < 0)
                pr_warn("Failed to update freq constraint: %d\n", ret);
@@ -103,7 +103,7 @@ static struct pmi_handler cbe_pmi_handler = {
 
 void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
 {
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        int ret;
 
        if (!cbe_cpufreq_has_pmi)
@@ -113,9 +113,8 @@ void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
        if (!req)
                return;
 
-       ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req,
-                                    DEV_PM_QOS_MAX_FREQUENCY,
-                                    policy->freq_table[0].frequency);
+       ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
+                                  policy->freq_table[0].frequency);
        if (ret < 0) {
                pr_err("Failed to add freq constraint (%d)\n", ret);
                kfree(req);
@@ -128,10 +127,10 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
 
 void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
 {
-       struct dev_pm_qos_request *req = policy->driver_data;
+       struct freq_qos_request *req = policy->driver_data;
 
        if (cbe_cpufreq_has_pmi) {
-               dev_pm_qos_remove_request(req);
+               freq_qos_remove_request(req);
                kfree(req);
        }
 }
index 932390b..b0ce9bc 100644 (file)
@@ -95,6 +95,10 @@ static int __init haltpoll_init(void)
        int ret;
        struct cpuidle_driver *drv = &haltpoll_driver;
 
+       /* Do not load haltpoll if idle= is passed */
+       if (boot_option_idle_override != IDLE_NO_OVERRIDE)
+               return -ENODEV;
+
        cpuidle_poll_state_init(drv);
 
        if (!kvm_para_available() ||
index 774d991..aca7523 100644 (file)
@@ -1297,7 +1297,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
        tp->write_seq = snd_isn;
        tp->snd_nxt = snd_isn;
        tp->snd_una = snd_isn;
-       inet_sk(sk)->inet_id = tp->write_seq ^ jiffies;
+       inet_sk(sk)->inet_id = prandom_u32();
        assign_rxopt(sk, opt);
 
        if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
index 0891ab8..98bc5a4 100644 (file)
@@ -1702,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                return peekmsg(sk, msg, len, nonblock, flags);
 
        if (sk_can_busy_loop(sk) &&
-           skb_queue_empty(&sk->sk_receive_queue) &&
+           skb_queue_empty_lockless(&sk->sk_receive_queue) &&
            sk->sk_state == TCP_ESTABLISHED)
                sk_busy_loop(sk, nonblock);
 
index 9ba74ab..c27e206 100644 (file)
@@ -1707,6 +1707,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
        if (!sdma->script_number)
                sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
 
+       if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
+                                 / sizeof(s32)) {
+               dev_err(sdma->dev,
+                       "SDMA script number %d not match with firmware.\n",
+                       sdma->script_number);
+               return;
+       }
+
        for (i = 0; i < sdma->script_number; i++)
                if (addr_arr[i] > 0)
                        saddr_arr[i] = addr_arr[i];
index 8e90a40..ef73f65 100644 (file)
@@ -694,6 +694,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan)
 
        /* remove all transactions, including active transaction */
        spin_lock_irqsave(&bchan->vc.lock, flag);
+       /*
+        * If we have transactions queued, then some might be committed to the
+        * hardware in the desc fifo.  The only way to reset the desc fifo is
+        * to do a hardware reset (either by pipe or the entire block).
+        * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
+        * pipe.  If the pipe is left disabled (default state after pipe reset)
+        * and is accessed by a connected hardware engine, a fatal error in
+        * the BAM will occur.  There is a small window where this could happen
+        * with bam_chan_init_hw(), but it is assumed that the caller has
+        * stopped activity on any attached hardware engine.  Make sure to do
+        * this first so that the BAM hardware doesn't cause memory corruption
+        * by accessing freed resources.
+        */
+       if (!list_empty(&bchan->desc_list)) {
+               async_desc = list_first_entry(&bchan->desc_list,
+                                             struct bam_async_desc, desc_node);
+               bam_chan_init_hw(bchan, async_desc->dir);
+       }
+
        list_for_each_entry_safe(async_desc, tmp,
                                 &bchan->desc_list, desc_node) {
                list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
index 525dc73..8546ad0 100644 (file)
 #define SPRD_DMA_SRC_TRSF_STEP_OFFSET  0
 #define SPRD_DMA_TRSF_STEP_MASK                GENMASK(15, 0)
 
+/* SPRD DMA_SRC_BLK_STEP register definition */
+#define SPRD_DMA_LLIST_HIGH_MASK       GENMASK(31, 28)
+#define SPRD_DMA_LLIST_HIGH_SHIFT      28
+
 /* define DMA channel mode & trigger mode mask */
 #define SPRD_DMA_CHN_MODE_MASK         GENMASK(7, 0)
 #define SPRD_DMA_TRG_MODE_MASK         GENMASK(7, 0)
@@ -208,6 +212,7 @@ struct sprd_dma_dev {
        struct sprd_dma_chn     channels[0];
 };
 
+static void sprd_dma_free_desc(struct virt_dma_desc *vd);
 static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
 static struct of_dma_filter_info sprd_dma_info = {
        .filter_fn = sprd_dma_filter_fn,
@@ -609,12 +614,19 @@ static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
 static void sprd_dma_free_chan_resources(struct dma_chan *chan)
 {
        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+       struct virt_dma_desc *cur_vd = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&schan->vc.lock, flags);
+       if (schan->cur_desc)
+               cur_vd = &schan->cur_desc->vd;
+
        sprd_dma_stop(schan);
        spin_unlock_irqrestore(&schan->vc.lock, flags);
 
+       if (cur_vd)
+               sprd_dma_free_desc(cur_vd);
+
        vchan_free_chan_resources(&schan->vc);
        pm_runtime_put(chan->device->dev);
 }
@@ -717,6 +729,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
        u32 int_mode = flags & SPRD_DMA_INT_MASK;
        int src_datawidth, dst_datawidth, src_step, dst_step;
        u32 temp, fix_mode = 0, fix_en = 0;
+       phys_addr_t llist_ptr;
 
        if (dir == DMA_MEM_TO_DEV) {
                src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
@@ -814,13 +827,16 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
                 * Set the link-list pointer point to next link-list
                 * configuration's physical address.
                 */
-               hw->llist_ptr = schan->linklist.phy_addr + temp;
+               llist_ptr = schan->linklist.phy_addr + temp;
+               hw->llist_ptr = lower_32_bits(llist_ptr);
+               hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
+                       SPRD_DMA_LLIST_HIGH_MASK;
        } else {
                hw->llist_ptr = 0;
+               hw->src_blk_step = 0;
        }
 
        hw->frg_step = 0;
-       hw->src_blk_step = 0;
        hw->des_blk_step = 0;
        return 0;
 }
@@ -1023,15 +1039,22 @@ static int sprd_dma_resume(struct dma_chan *chan)
 static int sprd_dma_terminate_all(struct dma_chan *chan)
 {
        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+       struct virt_dma_desc *cur_vd = NULL;
        unsigned long flags;
        LIST_HEAD(head);
 
        spin_lock_irqsave(&schan->vc.lock, flags);
+       if (schan->cur_desc)
+               cur_vd = &schan->cur_desc->vd;
+
        sprd_dma_stop(schan);
 
        vchan_get_all_descriptors(&schan->vc, &head);
        spin_unlock_irqrestore(&schan->vc.lock, flags);
 
+       if (cur_vd)
+               sprd_dma_free_desc(cur_vd);
+
        vchan_dma_desc_free_list(&schan->vc, &head);
        return 0;
 }
index 5f8adf5..6e12685 100644 (file)
@@ -40,6 +40,7 @@
 #define ADMA_CH_CONFIG_MAX_BURST_SIZE                   16
 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val)             ((val) & 0xf)
 #define ADMA_CH_CONFIG_MAX_BUFS                                8
+#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
 
 #define ADMA_CH_FIFO_CTRL                              0x2c
 #define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val)         (((val) & 0xf) << 8)
@@ -77,6 +78,7 @@ struct tegra_adma;
  * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
  * @ch_req_rx_shift: Register offset for AHUB receive channel select.
  * @ch_base_offset: Register offset of DMA channel registers.
+ * @has_outstanding_reqs: If DMA channel can have outstanding requests.
  * @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
  * @ch_req_mask: Mask for Tx or Rx channel select.
  * @ch_req_max: Maximum number of Tx or Rx channels available.
@@ -95,6 +97,7 @@ struct tegra_adma_chip_data {
        unsigned int ch_req_max;
        unsigned int ch_reg_size;
        unsigned int nr_channels;
+       bool has_outstanding_reqs;
 };
 
 /*
@@ -594,6 +597,8 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
                         ADMA_CH_CTRL_FLOWCTRL_EN;
        ch_regs->config |= cdata->adma_get_burst_config(burst_size);
        ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
+       if (cdata->has_outstanding_reqs)
+               ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
        ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
        ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
 
@@ -778,6 +783,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
        .ch_req_tx_shift        = 28,
        .ch_req_rx_shift        = 24,
        .ch_base_offset         = 0,
+       .has_outstanding_reqs   = false,
        .ch_fifo_ctrl           = TEGRA210_FIFO_CTRL_DEFAULT,
        .ch_req_mask            = 0xf,
        .ch_req_max             = 10,
@@ -792,6 +798,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
        .ch_req_tx_shift        = 27,
        .ch_req_rx_shift        = 22,
        .ch_base_offset         = 0x10000,
+       .has_outstanding_reqs   = true,
        .ch_fifo_ctrl           = TEGRA186_FIFO_CTRL_DEFAULT,
        .ch_req_mask            = 0x1f,
        .ch_req_max             = 20,
index 2f946f5..8c2f7eb 100644 (file)
@@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
        enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
 {
        struct cppi41_channel *c = to_cpp41_chan(chan);
+       struct dma_async_tx_descriptor *txd = NULL;
+       struct cppi41_dd *cdd = c->cdd;
        struct cppi41_desc *d;
        struct scatterlist *sg;
        unsigned int i;
+       int error;
+
+       error = pm_runtime_get(cdd->ddev.dev);
+       if (error < 0) {
+               pm_runtime_put_noidle(cdd->ddev.dev);
+
+               return NULL;
+       }
+
+       if (cdd->is_suspended)
+               goto err_out_not_ready;
 
        d = c->desc;
        for_each_sg(sgl, sg, sg_len, i) {
@@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
                d++;
        }
 
-       return &c->txd;
+       txd = &c->txd;
+
+err_out_not_ready:
+       pm_runtime_mark_last_busy(cdd->ddev.dev);
+       pm_runtime_put_autosuspend(cdd->ddev.dev);
+
+       return txd;
 }
 
 static void cppi41_compute_td_desc(struct cppi41_desc *d)
index e7dc3c4..5d56f1e 100644 (file)
@@ -68,6 +68,9 @@
 #define XILINX_DMA_DMACR_CIRC_EN               BIT(1)
 #define XILINX_DMA_DMACR_RUNSTOP               BIT(0)
 #define XILINX_DMA_DMACR_FSYNCSRC_MASK         GENMASK(6, 5)
+#define XILINX_DMA_DMACR_DELAY_MASK            GENMASK(31, 24)
+#define XILINX_DMA_DMACR_FRAME_COUNT_MASK      GENMASK(23, 16)
+#define XILINX_DMA_DMACR_MASTER_MASK           GENMASK(11, 8)
 
 #define XILINX_DMA_REG_DMASR                   0x0004
 #define XILINX_DMA_DMASR_EOL_LATE_ERR          BIT(15)
@@ -1354,7 +1357,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
                                           node);
                hw = &segment->hw;
 
-               xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+               xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
+                            xilinx_prep_dma_addr_t(hw->buf_addr));
 
                /* Start the transfer */
                dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
@@ -2117,8 +2121,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
        chan->config.gen_lock = cfg->gen_lock;
        chan->config.master = cfg->master;
 
+       dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
        if (cfg->gen_lock && chan->genlock) {
                dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+               dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
                dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
        }
 
@@ -2134,11 +2140,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
        chan->config.delay = cfg->delay;
 
        if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+               dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
                dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
                chan->config.coalesc = cfg->coalesc;
        }
 
        if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+               dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
                dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
                chan->config.delay = cfg->delay;
        }
index d413a0b..0bb6285 100644 (file)
@@ -553,7 +553,11 @@ void ghes_edac_unregister(struct ghes *ghes)
        if (!ghes_pvt)
                return;
 
+       if (atomic_dec_return(&ghes_init))
+               return;
+
        mci = ghes_pvt->mci;
+       ghes_pvt = NULL;
        edac_mc_del_mc(mci->pdev);
        edac_mc_free(mci);
 }
index d03ed8e..8e3d355 100644 (file)
@@ -22,3 +22,11 @@ config BCM47XX_SPROM
          In case of SoC devices SPROM content is stored on a flash used by
          bootloader firmware CFE. This driver provides method to ssb and bcma
          drivers to read SPROM on SoC.
+
+config TEE_BNXT_FW
+       tristate "Broadcom BNXT firmware manager"
+       depends on (ARCH_BCM_IPROC && OPTEE) || (COMPILE_TEST && TEE)
+       default ARCH_BCM_IPROC
+       help
+         This module help to manage firmware on Broadcom BNXT device. The module
+         registers on tee bus and invoke calls to manage firmware on BNXT device.
index 72c7fdc..17c5061 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_BCM47XX_NVRAM)            += bcm47xx_nvram.o
 obj-$(CONFIG_BCM47XX_SPROM)            += bcm47xx_sprom.o
+obj-$(CONFIG_TEE_BNXT_FW)              += tee_bnxt_fw.o
diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c
new file mode 100644 (file)
index 0000000..5b7ef89
--- /dev/null
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#include <linux/firmware/broadcom/tee_bnxt_fw.h>
+
+#define MAX_SHM_MEM_SZ SZ_4M
+
+#define MAX_TEE_PARAM_ARRY_MEMB                4
+
+enum ta_cmd {
+       /*
+        * TA_CMD_BNXT_FASTBOOT - boot bnxt device by copying f/w into sram
+        *
+        *      param[0] unused
+        *      param[1] unused
+        *      param[2] unused
+        *      param[3] unused
+        *
+        * Result:
+        *      TEE_SUCCESS - Invoke command success
+        *      TEE_ERROR_ITEM_NOT_FOUND - Corrupt f/w image found on memory
+        */
+       TA_CMD_BNXT_FASTBOOT = 0,
+
+       /*
+        * TA_CMD_BNXT_COPY_COREDUMP - copy the core dump into shm
+        *
+        *      param[0] (inout memref) - Coredump buffer memory reference
+        *      param[1] (in value) - value.a: offset, data to be copied from
+        *                            value.b: size of data to be copied
+        *      param[2] unused
+        *      param[3] unused
+        *
+        * Result:
+        *      TEE_SUCCESS - Invoke command success
+        *      TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+        *      TEE_ERROR_ITEM_NOT_FOUND - Corrupt core dump
+        */
+       TA_CMD_BNXT_COPY_COREDUMP = 3,
+};
+
+/**
+ * struct tee_bnxt_fw_private - OP-TEE bnxt private data
+ * @dev:               OP-TEE based bnxt device.
+ * @ctx:               OP-TEE context handler.
+ * @session_id:                TA session identifier.
+ */
+struct tee_bnxt_fw_private {
+       struct device *dev;
+       struct tee_context *ctx;
+       u32 session_id;
+       struct tee_shm *fw_shm_pool;
+};
+
+static struct tee_bnxt_fw_private pvt_data;
+
+static void prepare_args(int cmd,
+                        struct tee_ioctl_invoke_arg *arg,
+                        struct tee_param *param)
+{
+       memset(arg, 0, sizeof(*arg));
+       memset(param, 0, MAX_TEE_PARAM_ARRY_MEMB * sizeof(*param));
+
+       arg->func = cmd;
+       arg->session = pvt_data.session_id;
+       arg->num_params = MAX_TEE_PARAM_ARRY_MEMB;
+
+       /* Fill invoke cmd params */
+       switch (cmd) {
+       case TA_CMD_BNXT_COPY_COREDUMP:
+               param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+               param[0].u.memref.shm = pvt_data.fw_shm_pool;
+               param[0].u.memref.size = MAX_SHM_MEM_SZ;
+               param[0].u.memref.shm_offs = 0;
+               param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+               break;
+       case TA_CMD_BNXT_FASTBOOT:
+       default:
+               /* Nothing to do */
+               break;
+       }
+}
+
+/**
+ * tee_bnxt_fw_load() - Load the bnxt firmware
+ *                 Uses an OP-TEE call to start a secure
+ *                 boot process.
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_fw_load(void)
+{
+       int ret = 0;
+       struct tee_ioctl_invoke_arg arg;
+       struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+
+       if (!pvt_data.ctx)
+               return -ENODEV;
+
+       prepare_args(TA_CMD_BNXT_FASTBOOT, &arg, param);
+
+       ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+       if (ret < 0 || arg.ret != 0) {
+               dev_err(pvt_data.dev,
+                       "TA_CMD_BNXT_FASTBOOT invoke failed TEE err: %x, ret:%x\n",
+                       arg.ret, ret);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_fw_load);
+
+/**
+ * tee_bnxt_copy_coredump() - Copy coredump from the allocated memory
+ *                         Uses an OP-TEE call to copy coredump
+ * @buf:       destination buffer where core dump is copied into
+ * @offset:    offset from the base address of core dump area
+ * @size:      size of the dump
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size)
+{
+       struct tee_ioctl_invoke_arg arg;
+       struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+       void *core_data;
+       u32 rbytes = size;
+       u32 nbytes = 0;
+       int ret = 0;
+
+       if (!pvt_data.ctx)
+               return -ENODEV;
+
+       prepare_args(TA_CMD_BNXT_COPY_COREDUMP, &arg, param);
+
+       while (rbytes)  {
+               nbytes = rbytes;
+
+               nbytes = min_t(u32, rbytes, param[0].u.memref.size);
+
+               /* Fill additional invoke cmd params */
+               param[1].u.value.a = offset;
+               param[1].u.value.b = nbytes;
+
+               ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+               if (ret < 0 || arg.ret != 0) {
+                       dev_err(pvt_data.dev,
+                               "TA_CMD_BNXT_COPY_COREDUMP invoke failed TEE err: %x, ret:%x\n",
+                               arg.ret, ret);
+                       return -EINVAL;
+               }
+
+               core_data = tee_shm_get_va(pvt_data.fw_shm_pool, 0);
+               if (IS_ERR(core_data)) {
+                       dev_err(pvt_data.dev, "tee_shm_get_va failed\n");
+                       return PTR_ERR(core_data);
+               }
+
+               memcpy(buf, core_data, nbytes);
+
+               rbytes -= nbytes;
+               buf += nbytes;
+               offset += nbytes;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_copy_coredump);
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+       return (ver->impl_id == TEE_IMPL_ID_OPTEE);
+}
+
+static int tee_bnxt_fw_probe(struct device *dev)
+{
+       struct tee_client_device *bnxt_device = to_tee_client_device(dev);
+       int ret, err = -ENODEV;
+       struct tee_ioctl_open_session_arg sess_arg;
+       struct tee_shm *fw_shm_pool;
+
+       memset(&sess_arg, 0, sizeof(sess_arg));
+
+       /* Open context with TEE driver */
+       pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+                                              NULL);
+       if (IS_ERR(pvt_data.ctx))
+               return -ENODEV;
+
+       /* Open session with Bnxt load Trusted App */
+       memcpy(sess_arg.uuid, bnxt_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+       sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+       sess_arg.num_params = 0;
+
+       ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+       if (ret < 0 || sess_arg.ret != 0) {
+               dev_err(dev, "tee_client_open_session failed, err: %x\n",
+                       sess_arg.ret);
+               err = -EINVAL;
+               goto out_ctx;
+       }
+       pvt_data.session_id = sess_arg.session;
+
+       pvt_data.dev = dev;
+
+       fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
+                                   TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+       if (IS_ERR(fw_shm_pool)) {
+               tee_client_close_context(pvt_data.ctx);
+               dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
+               err = PTR_ERR(fw_shm_pool);
+               goto out_sess;
+       }
+
+       pvt_data.fw_shm_pool = fw_shm_pool;
+
+       return 0;
+
+out_sess:
+       tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+       tee_client_close_context(pvt_data.ctx);
+
+       return err;
+}
+
+static int tee_bnxt_fw_remove(struct device *dev)
+{
+       tee_shm_free(pvt_data.fw_shm_pool);
+       tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+       tee_client_close_context(pvt_data.ctx);
+       pvt_data.ctx = NULL;
+
+       return 0;
+}
+
+static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
+       {UUID_INIT(0x6272636D, 0x2019, 0x0716,
+                   0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
+       {}
+};
+
+MODULE_DEVICE_TABLE(tee, tee_bnxt_fw_id_table);
+
+static struct tee_client_driver tee_bnxt_fw_driver = {
+       .id_table       = tee_bnxt_fw_id_table,
+       .driver         = {
+               .name           = KBUILD_MODNAME,
+               .bus            = &tee_bus_type,
+               .probe          = tee_bnxt_fw_probe,
+               .remove         = tee_bnxt_fw_remove,
+       },
+};
+
+static int __init tee_bnxt_fw_mod_init(void)
+{
+       return driver_register(&tee_bnxt_fw_driver.driver);
+}
+
+static void __exit tee_bnxt_fw_mod_exit(void)
+{
+       driver_unregister(&tee_bnxt_fw_driver.driver);
+}
+
+module_init(tee_bnxt_fw_mod_init);
+module_exit(tee_bnxt_fw_mod_exit);
+
+MODULE_AUTHOR("Vikas Gupta <vikas.gupta@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom bnxt firmware manager");
+MODULE_LICENSE("GPL v2");
index 178ee81..b248870 100644 (file)
@@ -182,6 +182,7 @@ config RESET_ATTACK_MITIGATION
 
 config EFI_RCI2_TABLE
        bool "EFI Runtime Configuration Interface Table Version 2 Support"
+       depends on X86 || COMPILE_TEST
        help
          Displays the content of the Runtime Configuration Interface
          Table version 2 on Dell EMC PowerEdge systems as a binary
index 69f00f7..e98bbf8 100644 (file)
@@ -554,7 +554,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
                                              sizeof(*seed) + size);
                        if (seed != NULL) {
                                pr_notice("seeding entropy pool\n");
-                               add_device_randomness(seed->bits, seed->size);
+                               add_bootloader_randomness(seed->bits, seed->size);
                                early_memunmap(seed, sizeof(*seed) + size);
                        } else {
                                pr_err("Could not map UEFI random seed!\n");
index 0460c75..ee0661d 100644 (file)
@@ -52,6 +52,7 @@ lib-$(CONFIG_EFI_ARMSTUB)     += arm-stub.o fdt.o string.o random.o \
 
 lib-$(CONFIG_ARM)              += arm32-stub.o
 lib-$(CONFIG_ARM64)            += arm64-stub.o
+CFLAGS_arm32-stub.o            := -DTEXT_OFFSET=$(TEXT_OFFSET)
 CFLAGS_arm64-stub.o            := -DTEXT_OFFSET=$(TEXT_OFFSET)
 
 #
index e8f7aef..41213bf 100644 (file)
@@ -195,6 +195,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
                                 unsigned long dram_base,
                                 efi_loaded_image_t *image)
 {
+       unsigned long kernel_base;
        efi_status_t status;
 
        /*
@@ -204,9 +205,18 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
         * loaded. These assumptions are made by the decompressor,
         * before any memory map is available.
         */
-       dram_base = round_up(dram_base, SZ_128M);
+       kernel_base = round_up(dram_base, SZ_128M);
 
-       status = reserve_kernel_base(sys_table, dram_base, reserve_addr,
+       /*
+        * Note that some platforms (notably, the Raspberry Pi 2) put
+        * spin-tables and other pieces of firmware at the base of RAM,
+        * abusing the fact that the window of TEXT_OFFSET bytes at the
+        * base of the kernel image is only partially used at the moment.
+        * (Up to 5 pages are used for the swapper page tables)
+        */
+       kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
+
+       status = reserve_kernel_base(sys_table, kernel_base, reserve_addr,
                                     reserve_size);
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
@@ -220,7 +230,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
        *image_size = image->image_size;
        status = efi_relocate_kernel(sys_table, image_addr, *image_size,
                                     *image_size,
-                                    dram_base + MAX_UNCOMP_KERNEL_SIZE, 0);
+                                    kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table, "Failed to relocate kernel.\n");
                efi_free(sys_table, *reserve_size, *reserve_addr);
index 3caae7f..35dbc27 100644 (file)
@@ -260,11 +260,11 @@ fail:
 }
 
 /*
- * Allocate at the lowest possible address.
+ * Allocate at the lowest possible address that is not below 'min'.
  */
-efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
-                          unsigned long size, unsigned long align,
-                          unsigned long *addr)
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+                                unsigned long size, unsigned long align,
+                                unsigned long *addr, unsigned long min)
 {
        unsigned long map_size, desc_size, buff_size;
        efi_memory_desc_t *map;
@@ -311,13 +311,8 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                start = desc->phys_addr;
                end = start + desc->num_pages * EFI_PAGE_SIZE;
 
-               /*
-                * Don't allocate at 0x0. It will confuse code that
-                * checks pointers against NULL. Skip the first 8
-                * bytes so we start at a nice even number.
-                */
-               if (start == 0x0)
-                       start += 8;
+               if (start < min)
+                       start = min;
 
                start = round_up(start, align);
                if ((start + size) > end)
@@ -698,7 +693,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
                                 unsigned long image_size,
                                 unsigned long alloc_size,
                                 unsigned long preferred_addr,
-                                unsigned long alignment)
+                                unsigned long alignment,
+                                unsigned long min_addr)
 {
        unsigned long cur_image_addr;
        unsigned long new_addr = 0;
@@ -731,8 +727,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
         * possible.
         */
        if (status != EFI_SUCCESS) {
-               status = efi_low_alloc(sys_table_arg, alloc_size, alignment,
-                                      &new_addr);
+               status = efi_low_alloc_above(sys_table_arg, alloc_size,
+                                            alignment, &new_addr, min_addr);
        }
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
index 877745c..7baf48c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/efi.h>
+#include <linux/security.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
@@ -717,6 +718,13 @@ static long efi_test_ioctl(struct file *file, unsigned int cmd,
 
 static int efi_test_open(struct inode *inode, struct file *file)
 {
+       int ret = security_locked_down(LOCKDOWN_EFI_TEST);
+
+       if (ret)
+               return ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
        /*
         * nothing special to do here
         * We do accept multiple open files at the same time as we
index ebd7977..31f9f0e 100644 (file)
@@ -88,6 +88,7 @@ int __init efi_tpm_eventlog_init(void)
 
        if (tbl_size < 0) {
                pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+               ret = -EINVAL;
                goto out_calc;
        }
 
index 61e38e4..85b0515 100644 (file)
@@ -140,7 +140,12 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
        return 0;
 
 error_free:
-       while (i--) {
+       for (i = 0; i < last_entry; ++i) {
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
+
+               amdgpu_bo_unref(&bo);
+       }
+       for (i = first_userptr; i < num_entries; ++i) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
 
                amdgpu_bo_unref(&bo);
index 2e53fee..82823d9 100644 (file)
@@ -536,7 +536,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 
        list_for_each_entry(lobj, validated, tv.head) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
-               bool binding_userptr = false;
                struct mm_struct *usermm;
 
                usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
@@ -553,7 +552,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 
                        amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
                                                     lobj->user_pages);
-                       binding_userptr = true;
                }
 
                if (p->evictable == lobj)
@@ -563,10 +561,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                if (r)
                        return r;
 
-               if (binding_userptr) {
-                       kvfree(lobj->user_pages);
-                       lobj->user_pages = NULL;
-               }
+               kvfree(lobj->user_pages);
+               lobj->user_pages = NULL;
        }
        return 0;
 }
index 9d76e09..96b2a31 100644 (file)
@@ -218,7 +218,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
        struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
        struct dma_fence *fence = NULL, *finished;
        struct amdgpu_job *job;
-       int r;
+       int r = 0;
 
        job = to_amdgpu_job(sched_job);
        finished = &job->base.s_fence->finished;
@@ -243,6 +243,8 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
        job->fence = dma_fence_get(fence);
 
        amdgpu_job_free_resources(job);
+
+       fence = r ? ERR_PTR(r) : fence;
        return fence;
 }
 
index 1fead0e..7289e1b 100644 (file)
@@ -453,7 +453,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
                .interruptible = (bp->type != ttm_bo_type_kernel),
                .no_wait_gpu = false,
                .resv = bp->resv,
-               .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+               .flags = bp->type != ttm_bo_type_kernel ?
+                       TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
        };
        struct amdgpu_bo *bo;
        unsigned long page_align, size = bp->size;
index b70b3c4..65044b1 100644 (file)
@@ -429,13 +429,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
  * Open up a stream for HW test
  */
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                             struct amdgpu_bo *bo,
                              struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 1024;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -444,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 
        ib = &job->ibs[0];
 
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        /* stitch together an VCE create msg */
        ib->length_dw = 0;
@@ -476,8 +477,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 
        ib->ptr[ib->length_dw++] = 0x00000014; /* len */
        ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
        ib->ptr[ib->length_dw++] = 0x00000001;
 
        for (i = ib->length_dw; i < ib_size_dw; ++i)
@@ -1110,13 +1111,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
        /* skip vce ring1/2 ib test for now, since it's not reliable */
        if (ring != &ring->adev->vce.ring[0])
                return 0;
 
-       r = amdgpu_vce_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
@@ -1132,5 +1140,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
index 30ea54d..e802f7d 100644 (file)
@@ -59,6 +59,7 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                             struct amdgpu_bo *bo,
                              struct dma_fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
                               bool direct, struct dma_fence **fence);
index 7a6beb2..3199e4a 100644 (file)
@@ -569,13 +569,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 }
 
 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-                             struct dma_fence **fence)
+                                        struct amdgpu_bo *bo,
+                                        struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -583,14 +584,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
        ib->ptr[ib->length_dw++] = 0x0000000b;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
@@ -621,13 +622,14 @@ err:
 }
 
 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                               struct dma_fence **fence)
+                                         struct amdgpu_bo *bo,
+                                         struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -635,14 +637,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001;
        ib->ptr[ib->length_dw++] = handle;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
        ib->ptr[ib->length_dw++] = 0x0000000b;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
@@ -675,13 +677,20 @@ err:
 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
-       r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
-       r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
+       r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
        if (r)
                goto error;
 
@@ -693,6 +702,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
 
index 957811b..8dfc775 100644 (file)
@@ -93,7 +93,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
 {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -140,7 +140,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -179,7 +179,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
index 8b789f7..db10640 100644 (file)
@@ -151,6 +151,15 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
        WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
 
        tmp = mmGCVM_L2_CNTL3_DEFAULT;
+       if (adev->gmc.translate_further) {
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+       } else {
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+       }
        WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
 
        tmp = mmGCVM_L2_CNTL4_DEFAULT;
index 241a4e5..354e620 100644 (file)
@@ -309,6 +309,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 
        job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
        job->vm_needs_flush = true;
+       job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        r = amdgpu_job_submit(job, &adev->mman.entity,
                              AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
index 3542c20..b39bea6 100644 (file)
@@ -137,6 +137,15 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
 
        tmp = mmMMVM_L2_CNTL3_DEFAULT;
+       if (adev->gmc.translate_further) {
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+       } else {
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+       }
        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
 
        tmp = mmMMVM_L2_CNTL4_DEFAULT;
index 78452cf..4554e72 100644 (file)
@@ -254,6 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
index 670784a..217084d 100644 (file)
@@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
  * Open up a stream for HW test
  */
 static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                                      struct amdgpu_bo *bo,
                                       struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00010000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -268,13 +269,14 @@ err:
  */
 static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
                                        uint32_t handle,
+                                       struct amdgpu_bo *bo,
                                        struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00010000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -327,13 +329,20 @@ err:
 static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
-       r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
-       r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
+       r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
        if (r)
                goto error;
 
@@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
 
index 01f658f..0995378 100644 (file)
@@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
  * Open up a stream for HW test
  */
 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                                      struct amdgpu_bo *bo,
                                       struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00000000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -275,13 +276,14 @@ err:
  * Close up a stream for HW test or if userspace failed to do so
  */
 static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                               struct dma_fence **fence)
+                                       struct amdgpu_bo *bo,
+                                       struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001;
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00000000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002;
@@ -334,13 +336,20 @@ err:
 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
-       r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
-       r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
+       r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
        if (r)
                goto error;
 
@@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
 
index 985633c..26c6d73 100644 (file)
 # It calculates Bandwidth and Watermarks values for HW programming
 #
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+calcs_ccflags := -mhard-float -msse
 
-calcs_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+calcs_ccflags += -mpreferred-stack-boundary=4
+else
 calcs_ccflags += -msse2
 endif
 
index 5d1aded..4b8819c 100644 (file)
@@ -580,6 +580,10 @@ static bool construct(struct dc *dc,
 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
        // Allocate memory for the vm_helper
        dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
+       if (!dc->vm_helper) {
+               dm_error("%s: failed to create dc->vm_helper\n", __func__);
+               goto fail;
+       }
 
 #endif
        memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
index ca20b15..9c58670 100644 (file)
@@ -2767,6 +2767,15 @@ void core_link_enable_stream(
                                        CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
                                        COLOR_DEPTH_UNDEFINED);
 
+               /* This second call is needed to reconfigure the DIG
+                * as a workaround for the incorrect value being applied
+                * from transmitter control.
+                */
+               if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+                       stream->link->link_enc->funcs->setup(
+                               stream->link->link_enc,
+                               pipe_ctx->stream->signal);
+
 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
                if (pipe_ctx->stream->timing.flags.DSC) {
                        if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
index 505967b..51991bf 100644 (file)
@@ -374,6 +374,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
        enum display_dongle_type *dongle = &sink_cap->dongle_type;
        uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
        bool is_type2_dongle = false;
+       int retry_count = 2;
        struct dp_hdmi_dongle_signature_data *dongle_signature;
 
        /* Assume we have no valid DP passive dongle connected */
@@ -386,13 +387,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
                DP_HDMI_DONGLE_ADDRESS,
                type2_dongle_buf,
                sizeof(type2_dongle_buf))) {
-               *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
-               sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+               /* Passive HDMI dongles can sometimes fail here without retrying*/
+               while (retry_count > 0) {
+                       if (i2c_read(ddc,
+                               DP_HDMI_DONGLE_ADDRESS,
+                               type2_dongle_buf,
+                               sizeof(type2_dongle_buf)))
+                               break;
+                       retry_count--;
+               }
+               if (retry_count == 0) {
+                       *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+                       sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
 
-               CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
-                               "DP-DVI passive dongle %dMhz: ",
-                               DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
-               return;
+                       CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+                                       "DP-DVI passive dongle %dMhz: ",
+                                       DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+                       return;
+               }
        }
 
        /* Check if Type 2 dongle.*/
index 8f70295..f25ac17 100644 (file)
@@ -404,6 +404,9 @@ bool resource_are_streams_timing_synchronizable(
        if (stream1->view_format != stream2->view_format)
                return false;
 
+       if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param)
+               return false;
+
        return true;
 }
 static bool is_dp_and_hdmi_sharable(
@@ -1540,6 +1543,9 @@ bool dc_is_stream_unchanged(
        if (!are_stream_backends_same(old_stream, stream))
                return false;
 
+       if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
+               return false;
+
        return true;
 }
 
index 01c7e30..bbd6e01 100644 (file)
@@ -393,6 +393,10 @@ bool cm_helper_translate_curve_to_hw_format(
        rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
        rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
+       rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+       rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+       rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
        // All 3 color channels have same x
        corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
                                             dc_fixpt_from_int(region_start));
@@ -464,13 +468,6 @@ bool cm_helper_translate_curve_to_hw_format(
 
        i = 1;
        while (i != hw_points + 1) {
-               if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
-                       rgb_plus_1->red = rgb->red;
-               if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
-                       rgb_plus_1->green = rgb->green;
-               if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
-                       rgb_plus_1->blue = rgb->blue;
-
                rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
                rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
                rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
@@ -562,6 +559,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
        rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
        rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
+       rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+       rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+       rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
        corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
                                             dc_fixpt_from_int(region_start));
        corner_points[0].green.x = corner_points[0].red.x;
@@ -624,13 +625,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 
        i = 1;
        while (i != hw_points + 1) {
-               if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
-                       rgb_plus_1->red = rgb->red;
-               if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
-                       rgb_plus_1->green = rgb->green;
-               if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
-                       rgb_plus_1->blue = rgb->blue;
-
                rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
                rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
                rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
index ddb8d56..63f3bdd 100644 (file)
@@ -10,15 +10,20 @@ ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 DCN20 += dcn20_dsc.o
 endif
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
 
-CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
+else
 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
 endif
 
index 5a2763d..dfb2082 100644 (file)
@@ -814,7 +814,7 @@ static const struct resource_caps res_cap_nv14 = {
                .num_audio = 6,
                .num_stream_encoder = 5,
                .num_pll = 5,
-               .num_dwb = 0,
+               .num_dwb = 1,
                .num_ddc = 5,
 };
 
index ef673bf..ff50ae7 100644 (file)
@@ -3,15 +3,20 @@
 
 DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
 
-CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4
+else
 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
 endif
 
index 5b2a65b..8df2516 100644 (file)
 # It provides the general basic services required by other DAL
 # subcomponents.
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+dml_ccflags := -mhard-float -msse
 
-dml_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dml_ccflags += -mpreferred-stack-boundary=4
+else
 dml_ccflags += -msse2
 endif
 
index 6498837..6c6c486 100644 (file)
@@ -2577,7 +2577,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
                        mode_lib->vba.MinActiveDRAMClockChangeMargin
                                        + mode_lib->vba.DRAMClockChangeLatency;
 
-       if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+       if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+               mode_lib->vba.DRAMClockChangeWatermark += 25;
                mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
        } else {
                if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
index b456cd2..9707372 100644 (file)
@@ -1,15 +1,20 @@
 #
 # Makefile for the 'dsc' sub-component of DAL.
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+dsc_ccflags := -mhard-float -msse
 
-dsc_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dsc_ccflags += -mpreferred-stack-boundary=4
+else
 dsc_ccflags += -msse2
 endif
 
index d08493b..beacfff 100644 (file)
@@ -5098,9 +5098,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
 
        if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
                podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
-               for (i = 0; i < podn_vdd_dep->count - 1; i++)
-                       od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
-               if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
+               for (i = 0; i < podn_vdd_dep->count; i++)
                        od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
        } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
                podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
index 8820ce1..ae27490 100644 (file)
@@ -82,7 +82,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
 
        drm_atomic_helper_commit_modeset_disables(dev, old_state);
 
-       drm_atomic_helper_commit_planes(dev, old_state, 0);
+       drm_atomic_helper_commit_planes(dev, old_state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
index ea26bc9..b848270 100644 (file)
@@ -564,8 +564,8 @@ komeda_splitter_validate(struct komeda_splitter *splitter,
        }
 
        if (!in_range(&splitter->vsize, dflow->in_h)) {
-               DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n",
-                                dflow->in_w);
+               DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
+                                dflow->in_h);
                return -EINVAL;
        }
 
index 698db54..648cf02 100644 (file)
@@ -180,6 +180,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
                              etnaviv_cmdbuf_get_va(&submit->cmdbuf,
                                        &gpu->mmu_context->cmdbuf_mapping));
 
+       mutex_unlock(&gpu->mmu_context->lock);
+
        /* Reserve space for the bomap */
        if (n_bomap_pages) {
                bomap_start = bomap = iter.data;
@@ -221,8 +223,6 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
                                         obj->base.size);
        }
 
-       mutex_unlock(&gpu->mmu_context->lock);
-
        etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
 
        dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
index 043111a..f8bf488 100644 (file)
@@ -155,9 +155,11 @@ static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *bu
 
        memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
        buf += SZ_4K;
-       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
-               if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+               if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
                        memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
+                       buf += SZ_4K;
+               }
 }
 
 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
index 35ebae6..3607d34 100644 (file)
@@ -328,12 +328,23 @@ etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
 
        ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
                                          global->memory_base);
-       if (ret) {
-               global->ops->free(ctx);
-               return NULL;
+       if (ret)
+               goto out_free;
+
+       if (global->version == ETNAVIV_IOMMU_V1 &&
+           ctx->cmdbuf_mapping.iova > 0x80000000) {
+               dev_err(global->dev,
+                       "command buffer outside valid memory window\n");
+               goto out_unmap;
        }
 
        return ctx;
+
+out_unmap:
+       etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
+out_free:
+       global->ops->free(ctx);
+       return NULL;
 }
 
 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
index aa54bb2..dfff6f4 100644 (file)
@@ -9315,7 +9315,6 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
 {
        struct intel_encoder *encoder;
-       bool pch_ssc_in_use = false;
        bool has_fdi = false;
 
        for_each_intel_encoder(&dev_priv->drm, encoder) {
@@ -9343,22 +9342,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
         * clock hierarchy. That would also allow us to do
         * clock bending finally.
         */
+       dev_priv->pch_ssc_use = 0;
+
        if (spll_uses_pch_ssc(dev_priv)) {
                DRM_DEBUG_KMS("SPLL using PCH SSC\n");
-               pch_ssc_in_use = true;
+               dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
        }
 
        if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
                DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
-               pch_ssc_in_use = true;
+               dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
        }
 
        if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
                DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
-               pch_ssc_in_use = true;
+               dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
        }
 
-       if (pch_ssc_in_use)
+       if (dev_priv->pch_ssc_use)
                return;
 
        if (has_fdi) {
index b8148f8..d5a298c 100644 (file)
@@ -525,16 +525,31 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
        val = I915_READ(WRPLL_CTL(id));
        I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
        POSTING_READ(WRPLL_CTL(id));
+
+       /*
+        * Try to set up the PCH reference clock once all DPLLs
+        * that depend on it have been shut down.
+        */
+       if (dev_priv->pch_ssc_use & BIT(id))
+               intel_init_pch_refclk(dev_priv);
 }
 
 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
                                 struct intel_shared_dpll *pll)
 {
+       enum intel_dpll_id id = pll->info->id;
        u32 val;
 
        val = I915_READ(SPLL_CTL);
        I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
        POSTING_READ(SPLL_CTL);
+
+       /*
+        * Try to set up the PCH reference clock once all DPLLs
+        * that depend on it have been shut down.
+        */
+       if (dev_priv->pch_ssc_use & BIT(id))
+               intel_init_pch_refclk(dev_priv);
 }
 
 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
index e758879..104cf6d 100644 (file)
@@ -147,11 +147,11 @@ enum intel_dpll_id {
         */
        DPLL_ID_ICL_MGPLL4 = 6,
        /**
-        * @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5)
+        * @DPLL_ID_TGL_MGPLL5: TGL TC PLL port 5 (TC5)
         */
        DPLL_ID_TGL_MGPLL5 = 7,
        /**
-        * @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6)
+        * @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6)
         */
        DPLL_ID_TGL_MGPLL6 = 8,
 };
index 772154e..953e1d1 100644 (file)
@@ -1723,6 +1723,8 @@ struct drm_i915_private {
                struct work_struct idle_work;
        } gem;
 
+       u8 pch_ssc_use;
+
        /* For i945gm vblank irq vs. C3 workaround */
        struct {
                struct work_struct work;
index bc2ddeb..f21bc8a 100644 (file)
@@ -556,11 +556,11 @@ static int panfrost_probe(struct platform_device *pdev)
        return 0;
 
 err_out2:
+       pm_runtime_disable(pfdev->dev);
        panfrost_devfreq_fini(pfdev);
 err_out1:
        panfrost_device_fini(pfdev);
 err_out0:
-       pm_runtime_disable(pfdev->dev);
        drm_dev_put(ddev);
        return err;
 }
index bdd9905..a3ed64a 100644 (file)
@@ -224,9 +224,9 @@ static size_t get_pgsize(u64 addr, size_t size)
        return SZ_2M;
 }
 
-void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
-                             struct panfrost_mmu *mmu,
-                             u64 iova, size_t size)
+static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+                                    struct panfrost_mmu *mmu,
+                                    u64 iova, size_t size)
 {
        if (mmu->as < 0)
                return;
@@ -406,11 +406,11 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
        spin_lock(&pfdev->as_lock);
        list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
                if (as == mmu->as)
-                       break;
+                       goto found_mmu;
        }
-       if (as != mmu->as)
-               goto out;
+       goto out;
 
+found_mmu:
        priv = container_of(mmu, struct panfrost_file_priv, mmu);
 
        spin_lock(&priv->mm_lock);
@@ -432,7 +432,8 @@ out:
 
 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
 
-int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
+static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+                                      u64 addr)
 {
        int ret, i;
        struct panfrost_gem_object *bo;
index 83c57d3..2dba192 100644 (file)
@@ -16,6 +16,7 @@
 #include "panfrost_issues.h"
 #include "panfrost_job.h"
 #include "panfrost_mmu.h"
+#include "panfrost_perfcnt.h"
 #include "panfrost_regs.h"
 
 #define COUNTERS_PER_BLOCK             64
index 9e55076..4528f4d 100644 (file)
@@ -379,11 +379,25 @@ radeon_pci_remove(struct pci_dev *pdev)
 static void
 radeon_pci_shutdown(struct pci_dev *pdev)
 {
+#ifdef CONFIG_PPC64
+       struct drm_device *ddev = pci_get_drvdata(pdev);
+#endif
+
        /* if we are running in a VM, make sure the device
         * torn down properly on reboot/shutdown
         */
        if (radeon_device_is_virtual())
                radeon_pci_remove(pdev);
+
+#ifdef CONFIG_PPC64
+       /* Some adapters need to be suspended before a
+        * shutdown occurs in order to prevent an error
+        * during kexec.
+        * Make this power specific becauase it breaks
+        * some non-power boards.
+        */
+       radeon_suspend_kms(ddev, true, true, false);
+#endif
 }
 
 static int radeon_pmops_suspend(struct device *dev)
index 9a0ee74..f39b97e 100644 (file)
@@ -479,6 +479,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
        struct drm_sched_job *s_job, *tmp;
        uint64_t guilty_context;
        bool found_guilty = false;
+       struct dma_fence *fence;
 
        list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
                struct drm_sched_fence *s_fence = s_job->s_fence;
@@ -492,7 +493,16 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
                        dma_fence_set_error(&s_fence->finished, -ECANCELED);
 
                dma_fence_put(s_job->s_fence->parent);
-               s_job->s_fence->parent = sched->ops->run_job(s_job);
+               fence = sched->ops->run_job(s_job);
+
+               if (IS_ERR_OR_NULL(fence)) {
+                       s_job->s_fence->parent = NULL;
+                       dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+               } else {
+                       s_job->s_fence->parent = fence;
+               }
+
+
        }
 }
 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
@@ -720,7 +730,7 @@ static int drm_sched_main(void *param)
                fence = sched->ops->run_job(sched_job);
                drm_sched_fence_scheduled(s_fence);
 
-               if (fence) {
+               if (!IS_ERR_OR_NULL(fence)) {
                        s_fence->parent = dma_fence_get(fence);
                        r = dma_fence_add_callback(fence, &sched_job->cb,
                                                   drm_sched_process_job);
@@ -730,8 +740,11 @@ static int drm_sched_main(void *param)
                                DRM_ERROR("fence add callback failed (%d)\n",
                                          r);
                        dma_fence_put(fence);
-               } else
+               } else {
+
+                       dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
                        drm_sched_process_job(NULL, &sched_job->cb);
+               }
 
                wake_up(&sched->job_scheduled);
        }
index 5d80507..19c092d 100644 (file)
@@ -557,13 +557,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
        if (args->bcl_start != args->bcl_end) {
                bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
-               if (!bin)
+               if (!bin) {
+                       v3d_job_put(&render->base);
                        return -ENOMEM;
+               }
 
                ret = v3d_job_init(v3d, file_priv, &bin->base,
                                   v3d_job_free, args->in_sync_bcl);
                if (ret) {
                        v3d_job_put(&render->base);
+                       kfree(bin);
                        return ret;
                }
 
index 6654c15..fbe4e16 100644 (file)
@@ -63,13 +63,20 @@ static int axff_init(struct hid_device *hid)
 {
        struct axff_device *axff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int field_count = 0;
        int i, j;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 3eaee2c..63fdbf0 100644 (file)
@@ -1139,6 +1139,7 @@ int hid_open_report(struct hid_device *device)
        __u8 *start;
        __u8 *buf;
        __u8 *end;
+       __u8 *next;
        int ret;
        static int (*dispatch_type[])(struct hid_parser *parser,
                                      struct hid_item *item) = {
@@ -1192,7 +1193,8 @@ int hid_open_report(struct hid_device *device)
        device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
 
        ret = -EINVAL;
-       while ((start = fetch_item(start, end, &item)) != NULL) {
+       while ((next = fetch_item(start, end, &item)) != NULL) {
+               start = next;
 
                if (item.format != HID_ITEM_FORMAT_SHORT) {
                        hid_err(device, "unexpected long global item\n");
@@ -1230,7 +1232,8 @@ int hid_open_report(struct hid_device *device)
                }
        }
 
-       hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
+       hid_err(device, "item fetching failed at offset %u/%u\n",
+               size - (unsigned int)(end - start), size);
 err:
        kfree(parser->collection_stack);
 alloc_err:
index 17e17f9..947f19f 100644 (file)
@@ -75,13 +75,19 @@ static int drff_init(struct hid_device *hid)
 {
        struct drff_device *drff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_first_entry(&hid->inputs,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 7cd5651..c34f2e5 100644 (file)
@@ -47,13 +47,19 @@ static int emsff_init(struct hid_device *hid)
 {
        struct emsff_device *emsff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_first_entry(&hid->inputs,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 0f95c96..ecbd399 100644 (file)
@@ -64,14 +64,20 @@ static int gaff_init(struct hid_device *hid)
 {
        struct gaff_device *gaff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
        struct list_head *report_ptr = report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 84f8c12..d86a918 100644 (file)
@@ -469,6 +469,10 @@ static int hammer_probe(struct hid_device *hdev,
 static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
index 10a7205..8619b80 100644 (file)
@@ -124,13 +124,19 @@ static int holtekff_init(struct hid_device *hid)
 {
        struct holtekff_device *holtekff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output report found\n");
                return -ENODEV;
index 76969a2..447e8db 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_STAFF     0x502b
 #define USB_DEVICE_ID_GOOGLE_WAND      0x502d
 #define USB_DEVICE_ID_GOOGLE_WHISKERS  0x5030
+#define USB_DEVICE_ID_GOOGLE_MASTERBALL        0x503c
+#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index dd1a6c3..73d07e3 100644 (file)
@@ -50,11 +50,17 @@ int lg2ff_init(struct hid_device *hid)
 {
        struct lg2ff_device *lg2ff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
        if (!report)
index 9ecb6fd..b7e1949 100644 (file)
@@ -117,12 +117,19 @@ static const signed short ff3_joystick_ac[] = {
 
 int lg3ff_init(struct hid_device *hid)
 {
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        const signed short *ff_bits = ff3_joystick_ac;
        int error;
        int i;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
                return -ENODEV;
index 03f0220..5e6a0ce 100644 (file)
@@ -1253,8 +1253,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc
 
 int lg4ff_init(struct hid_device *hid)
 {
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
        struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
        const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
@@ -1266,6 +1266,13 @@ int lg4ff_init(struct hid_device *hid)
        int mmode_ret, mmode_idx = -1;
        u16 real_product_id;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
                return -1;
index c79a6ec..aed4ddc 100644 (file)
@@ -115,12 +115,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
 
 int lgff_init(struct hid_device* hid)
 {
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        const signed short *ff_bits = ff_joystick;
        int error;
        int i;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
                return -ENODEV;
index 0179f7e..8e91e2f 100644 (file)
@@ -1669,6 +1669,7 @@ static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev,
 
 #define HIDPP_FF_EFFECTID_NONE         -1
 #define HIDPP_FF_EFFECTID_AUTOCENTER   -2
+#define HIDPP_AUTOCENTER_PARAMS_LENGTH 18
 
 #define HIDPP_FF_MAX_PARAMS    20
 #define HIDPP_FF_RESERVED_SLOTS        1
@@ -2009,7 +2010,7 @@ static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id)
 static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude)
 {
        struct hidpp_ff_private_data *data = dev->ff->private;
-       u8 params[18];
+       u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH];
 
        dbg_hid("Setting autocenter to %d.\n", magnitude);
 
@@ -2077,23 +2078,34 @@ static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp
 static void hidpp_ff_destroy(struct ff_device *ff)
 {
        struct hidpp_ff_private_data *data = ff->private;
+       struct hid_device *hid = data->hidpp->hid_dev;
 
+       hid_info(hid, "Unloading HID++ force feedback.\n");
+
+       device_remove_file(&hid->dev, &dev_attr_range);
+       destroy_workqueue(data->wq);
        kfree(data->effect_ids);
 }
 
-static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+static int hidpp_ff_init(struct hidpp_device *hidpp,
+                        struct hidpp_ff_private_data *data)
 {
        struct hid_device *hid = hidpp->hid_dev;
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
        const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
        struct ff_device *ff;
-       struct hidpp_report response;
-       struct hidpp_ff_private_data *data;
-       int error, j, num_slots;
+       int error, j, num_slots = data->num_effects;
        u8 version;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        if (!dev) {
                hid_err(hid, "Struct input_dev not set!\n");
                return -EINVAL;
@@ -2109,27 +2121,17 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
                for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++)
                        set_bit(hidpp_ff_effects_v2[j], dev->ffbit);
 
-       /* Read number of slots available in device */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_GET_INFO, NULL, 0, &response);
-       if (error) {
-               if (error < 0)
-                       return error;
-               hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
-                       __func__, error);
-               return -EPROTO;
-       }
-
-       num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
-
        error = input_ff_create(dev, num_slots);
 
        if (error) {
                hid_err(dev, "Failed to create FF device!\n");
                return error;
        }
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       /*
+        * Create a copy of passed data, so we can transfer memory
+        * ownership to FF core
+        */
+       data = kmemdup(data, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
        data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL);
@@ -2145,10 +2147,7 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        }
 
        data->hidpp = hidpp;
-       data->feature_index = feature_index;
        data->version = version;
-       data->slot_autocenter = 0;
-       data->num_effects = num_slots;
        for (j = 0; j < num_slots; j++)
                data->effect_ids[j] = -1;
 
@@ -2162,68 +2161,20 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        ff->set_autocenter = hidpp_ff_set_autocenter;
        ff->destroy = hidpp_ff_destroy;
 
-
-       /* reset all forces */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_RESET_ALL, NULL, 0, &response);
-
-       /* Read current Range */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_GET_APERTURE, NULL, 0, &response);
-       if (error)
-               hid_warn(hidpp->hid_dev, "Failed to read range from device!\n");
-       data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]);
-
        /* Create sysfs interface */
        error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
        if (error)
                hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error);
 
-       /* Read the current gain values */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response);
-       if (error)
-               hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n");
-       data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]);
-       /* ignore boost value at response.fap.params[2] */
-
        /* init the hardware command queue */
        atomic_set(&data->workqueue_size, 0);
 
-       /* initialize with zero autocenter to get wheel in usable state */
-       hidpp_ff_set_autocenter(dev, 0);
-
        hid_info(hid, "Force feedback support loaded (firmware release %d).\n",
                 version);
 
        return 0;
 }
 
-static int hidpp_ff_deinit(struct hid_device *hid)
-{
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
-       struct hidpp_ff_private_data *data;
-
-       if (!dev) {
-               hid_err(hid, "Struct input_dev not found!\n");
-               return -EINVAL;
-       }
-
-       hid_info(hid, "Unloading HID++ force feedback.\n");
-       data = dev->ff->private;
-       if (!data) {
-               hid_err(hid, "Private data not found!\n");
-               return -EINVAL;
-       }
-
-       destroy_workqueue(data->wq);
-       device_remove_file(&hid->dev, &dev_attr_range);
-
-       return 0;
-}
-
-
 /* ************************************************************************** */
 /*                                                                            */
 /* Device Support                                                             */
@@ -2725,24 +2676,93 @@ static int k400_connect(struct hid_device *hdev, bool connected)
 
 #define HIDPP_PAGE_G920_FORCE_FEEDBACK                 0x8123
 
-static int g920_get_config(struct hidpp_device *hidpp)
+static int g920_ff_set_autocenter(struct hidpp_device *hidpp,
+                                 struct hidpp_ff_private_data *data)
 {
+       struct hidpp_report response;
+       u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH] = {
+               [1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART,
+       };
+       int ret;
+
+       /* initialize with zero autocenter to get wheel in usable state */
+
+       dbg_hid("Setting autocenter to 0.\n");
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_DOWNLOAD_EFFECT,
+                                         params, ARRAY_SIZE(params),
+                                         &response);
+       if (ret)
+               hid_warn(hidpp->hid_dev, "Failed to autocenter device!\n");
+       else
+               data->slot_autocenter = response.fap.params[0];
+
+       return ret;
+}
+
+static int g920_get_config(struct hidpp_device *hidpp,
+                          struct hidpp_ff_private_data *data)
+{
+       struct hidpp_report response;
        u8 feature_type;
-       u8 feature_index;
        int ret;
 
+       memset(data, 0, sizeof(*data));
+
        /* Find feature and store for later use */
        ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
-               &feature_index, &feature_type);
+                                    &data->feature_index, &feature_type);
        if (ret)
                return ret;
 
-       ret = hidpp_ff_init(hidpp, feature_index);
+       /* Read number of slots available in device */
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_GET_INFO,
+                                         NULL, 0,
+                                         &response);
+       if (ret) {
+               if (ret < 0)
+                       return ret;
+               hid_err(hidpp->hid_dev,
+                       "%s: received protocol error 0x%02x\n", __func__, ret);
+               return -EPROTO;
+       }
+
+       data->num_effects = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
+
+       /* reset all forces */
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_RESET_ALL,
+                                         NULL, 0,
+                                         &response);
        if (ret)
-               hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n",
-                               ret);
+               hid_warn(hidpp->hid_dev, "Failed to reset all forces!\n");
 
-       return 0;
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_GET_APERTURE,
+                                         NULL, 0,
+                                         &response);
+       if (ret) {
+               hid_warn(hidpp->hid_dev,
+                        "Failed to read range from device!\n");
+       }
+       data->range = ret ?
+               900 : get_unaligned_be16(&response.fap.params[0]);
+
+       /* Read the current gain values */
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_GET_GLOBAL_GAINS,
+                                         NULL, 0,
+                                         &response);
+       if (ret)
+               hid_warn(hidpp->hid_dev,
+                        "Failed to read gain values from device!\n");
+       data->gain = ret ?
+               0xffff : get_unaligned_be16(&response.fap.params[0]);
+
+       /* ignore boost value at response.fap.params[2] */
+
+       return g920_ff_set_autocenter(hidpp, data);
 }
 
 /* -------------------------------------------------------------------------- */
@@ -3458,34 +3478,45 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id)
        return report->field[0]->report_count + 1;
 }
 
-static bool hidpp_validate_report(struct hid_device *hdev, int id,
-                                 int expected_length, bool optional)
+static bool hidpp_validate_device(struct hid_device *hdev)
 {
-       int report_length;
+       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+       int id, report_length, supported_reports = 0;
 
-       if (id >= HID_MAX_IDS || id < 0) {
-               hid_err(hdev, "invalid HID report id %u\n", id);
-               return false;
+       id = REPORT_ID_HIDPP_SHORT;
+       report_length = hidpp_get_report_length(hdev, id);
+       if (report_length) {
+               if (report_length < HIDPP_REPORT_SHORT_LENGTH)
+                       goto bad_device;
+
+               supported_reports++;
        }
 
+       id = REPORT_ID_HIDPP_LONG;
        report_length = hidpp_get_report_length(hdev, id);
-       if (!report_length)
-               return optional;
+       if (report_length) {
+               if (report_length < HIDPP_REPORT_LONG_LENGTH)
+                       goto bad_device;
 
-       if (report_length < expected_length) {
-               hid_warn(hdev, "not enough values in hidpp report %d\n", id);
-               return false;
+               supported_reports++;
        }
 
-       return true;
-}
+       id = REPORT_ID_HIDPP_VERY_LONG;
+       report_length = hidpp_get_report_length(hdev, id);
+       if (report_length) {
+               if (report_length < HIDPP_REPORT_LONG_LENGTH ||
+                   report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
+                       goto bad_device;
 
-static bool hidpp_validate_device(struct hid_device *hdev)
-{
-       return hidpp_validate_report(hdev, REPORT_ID_HIDPP_SHORT,
-                                    HIDPP_REPORT_SHORT_LENGTH, false) &&
-              hidpp_validate_report(hdev, REPORT_ID_HIDPP_LONG,
-                                    HIDPP_REPORT_LONG_LENGTH, true);
+               supported_reports++;
+               hidpp->very_long_report_length = report_length;
+       }
+
+       return supported_reports;
+
+bad_device:
+       hid_warn(hdev, "not enough values in hidpp report %d\n", id);
+       return false;
 }
 
 static bool hidpp_application_equals(struct hid_device *hdev,
@@ -3505,6 +3536,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
        int ret;
        bool connected;
        unsigned int connect_mask = HID_CONNECT_DEFAULT;
+       struct hidpp_ff_private_data data;
 
        /* report_fixup needs drvdata to be set before we call hid_parse */
        hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
@@ -3531,11 +3563,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        }
 
-       hidpp->very_long_report_length =
-               hidpp_get_report_length(hdev, REPORT_ID_HIDPP_VERY_LONG);
-       if (hidpp->very_long_report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
-               hidpp->very_long_report_length = HIDPP_REPORT_VERY_LONG_MAX_LENGTH;
-
        if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE)
                hidpp->quirks |= HIDPP_QUIRK_UNIFYING;
 
@@ -3614,7 +3641,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                if (ret)
                        goto hid_hw_init_fail;
        } else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
-               ret = g920_get_config(hidpp);
+               ret = g920_get_config(hidpp, &data);
                if (ret)
                        goto hid_hw_init_fail;
        }
@@ -3636,6 +3663,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto hid_hw_start_fail;
        }
 
+       if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+               ret = hidpp_ff_init(hidpp, &data);
+               if (ret)
+                       hid_warn(hidpp->hid_dev,
+                    "Unable to initialize force feedback support, errno %d\n",
+                                ret);
+       }
+
        return ret;
 
 hid_hw_init_fail:
@@ -3658,9 +3693,6 @@ static void hidpp_remove(struct hid_device *hdev)
 
        sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
 
-       if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)
-               hidpp_ff_deinit(hdev);
-
        hid_hw_stop(hdev);
        cancel_work_sync(&hidpp->work);
        mutex_destroy(&hidpp->send_mutex);
index 2cf8385..2d8b589 100644 (file)
@@ -328,11 +328,17 @@ static int ms_play_effect(struct input_dev *dev, void *data,
 
 static int ms_init_ff(struct hid_device *hdev)
 {
-       struct hid_input *hidinput = list_entry(hdev->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *input_dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *input_dev;
        struct ms_data *ms = hid_get_drvdata(hdev);
 
+       if (list_empty(&hdev->inputs)) {
+               hid_err(hdev, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+       input_dev = hidinput->input;
+
        if (!(ms->quirks & MS_QUIRK_FF))
                return 0;
 
index 5a3b3d9..2666af0 100644 (file)
@@ -516,7 +516,7 @@ static void pcmidi_setup_extra_keys(
                MY PICTURES =>  KEY_WORDPROCESSOR
                MY MUSIC=>      KEY_SPREADSHEET
        */
-       unsigned int keys[] = {
+       static const unsigned int keys[] = {
                KEY_FN,
                KEY_MESSENGER, KEY_CALENDAR,
                KEY_ADDRESSBOOK, KEY_DOCUMENTS,
@@ -532,7 +532,7 @@ static void pcmidi_setup_extra_keys(
                0
        };
 
-       unsigned int *pkeys = &keys[0];
+       const unsigned int *pkeys = &keys[0];
        unsigned short i;
 
        if (pm->ifnum != 1)  /* only set up ONCE for interace 1 */
index 73c0f7a..4c6ed6e 100644 (file)
@@ -2254,9 +2254,15 @@ static int sony_play_effect(struct input_dev *dev, void *data,
 
 static int sony_init_ff(struct sony_sc *sc)
 {
-       struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *input_dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *input_dev;
+
+       if (list_empty(&sc->hdev->inputs)) {
+               hid_err(sc->hdev, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list);
+       input_dev = hidinput->input;
 
        input_set_capability(input_dev, EV_FF, FF_RUMBLE);
        return input_ff_create_memless(input_dev, NULL, sony_play_effect);
index bdfc5ff..90acef3 100644 (file)
@@ -124,12 +124,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
        struct tmff_device *tmff;
        struct hid_report *report;
        struct list_head *report_list;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                                       struct hid_input, list);
-       struct input_dev *input_dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *input_dev;
        int error;
        int i;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       input_dev = hidinput->input;
+
        tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
        if (!tmff)
                return -ENOMEM;
index f90959e..3abaca0 100644 (file)
@@ -54,11 +54,17 @@ static int zpff_init(struct hid_device *hid)
 {
        struct zpff_device *zpff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        int i, error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        for (i = 0; i < 4; i++) {
                report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
                if (!report)
index 2a7c6e3..d9c55e3 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/pm.h>
-#include <linux/pm_runtime.h>
 #include <linux/device.h>
 #include <linux/wait.h>
 #include <linux/err.h>
@@ -48,8 +47,6 @@
 /* quirks to control the device */
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV       BIT(0)
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
-#define I2C_HID_QUIRK_NO_RUNTIME_PM            BIT(2)
-#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP                BIT(3)
 #define I2C_HID_QUIRK_BOGUS_IRQ                        BIT(4)
 
 /* flags */
@@ -172,14 +169,7 @@ static const struct i2c_hid_quirks {
        { USB_VENDOR_ID_WEIDA, HID_ANY_ID,
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
-               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
-               I2C_HID_QUIRK_NO_RUNTIME_PM },
-       { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
-               I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
-       { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
-               I2C_HID_QUIRK_NO_RUNTIME_PM },
-       { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
-               I2C_HID_QUIRK_NO_RUNTIME_PM },
+               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID,
                 I2C_HID_QUIRK_BOGUS_IRQ },
        { 0, 0 }
@@ -397,7 +387,6 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
 {
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        int ret;
-       unsigned long now, delay;
 
        i2c_hid_dbg(ihid, "%s\n", __func__);
 
@@ -415,22 +404,9 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
                        goto set_pwr_exit;
        }
 
-       if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
-           power_state == I2C_HID_PWR_ON) {
-               now = jiffies;
-               if (time_after(ihid->sleep_delay, now)) {
-                       delay = jiffies_to_usecs(ihid->sleep_delay - now);
-                       usleep_range(delay, delay + 1);
-               }
-       }
-
        ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
                0, NULL, 0, NULL, 0);
 
-       if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
-           power_state == I2C_HID_PWR_SLEEP)
-               ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
-
        if (ret)
                dev_err(&client->dev, "failed to change power setting.\n");
 
@@ -791,11 +767,6 @@ static int i2c_hid_open(struct hid_device *hid)
 {
        struct i2c_client *client = hid->driver_data;
        struct i2c_hid *ihid = i2c_get_clientdata(client);
-       int ret = 0;
-
-       ret = pm_runtime_get_sync(&client->dev);
-       if (ret < 0)
-               return ret;
 
        set_bit(I2C_HID_STARTED, &ihid->flags);
        return 0;
@@ -807,27 +778,6 @@ static void i2c_hid_close(struct hid_device *hid)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
 
        clear_bit(I2C_HID_STARTED, &ihid->flags);
-
-       /* Save some power */
-       pm_runtime_put(&client->dev);
-}
-
-static int i2c_hid_power(struct hid_device *hid, int lvl)
-{
-       struct i2c_client *client = hid->driver_data;
-       struct i2c_hid *ihid = i2c_get_clientdata(client);
-
-       i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
-
-       switch (lvl) {
-       case PM_HINT_FULLON:
-               pm_runtime_get_sync(&client->dev);
-               break;
-       case PM_HINT_NORMAL:
-               pm_runtime_put(&client->dev);
-               break;
-       }
-       return 0;
 }
 
 struct hid_ll_driver i2c_hid_ll_driver = {
@@ -836,7 +786,6 @@ struct hid_ll_driver i2c_hid_ll_driver = {
        .stop = i2c_hid_stop,
        .open = i2c_hid_open,
        .close = i2c_hid_close,
-       .power = i2c_hid_power,
        .output_report = i2c_hid_output_report,
        .raw_request = i2c_hid_raw_request,
 };
@@ -1104,9 +1053,6 @@ static int i2c_hid_probe(struct i2c_client *client,
 
        i2c_hid_acpi_fix_up_power(&client->dev);
 
-       pm_runtime_get_noresume(&client->dev);
-       pm_runtime_set_active(&client->dev);
-       pm_runtime_enable(&client->dev);
        device_enable_async_suspend(&client->dev);
 
        /* Make sure there is something at this address */
@@ -1114,16 +1060,16 @@ static int i2c_hid_probe(struct i2c_client *client,
        if (ret < 0) {
                dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
                ret = -ENXIO;
-               goto err_pm;
+               goto err_regulator;
        }
 
        ret = i2c_hid_fetch_hid_descriptor(ihid);
        if (ret < 0)
-               goto err_pm;
+               goto err_regulator;
 
        ret = i2c_hid_init_irq(client);
        if (ret < 0)
-               goto err_pm;
+               goto err_regulator;
 
        hid = hid_allocate_device();
        if (IS_ERR(hid)) {
@@ -1154,9 +1100,6 @@ static int i2c_hid_probe(struct i2c_client *client,
                goto err_mem_free;
        }
 
-       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
-               pm_runtime_put(&client->dev);
-
        return 0;
 
 err_mem_free:
@@ -1165,10 +1108,6 @@ err_mem_free:
 err_irq:
        free_irq(client->irq, ihid);
 
-err_pm:
-       pm_runtime_put_noidle(&client->dev);
-       pm_runtime_disable(&client->dev);
-
 err_regulator:
        regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
                               ihid->pdata.supplies);
@@ -1181,12 +1120,6 @@ static int i2c_hid_remove(struct i2c_client *client)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        struct hid_device *hid;
 
-       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
-               pm_runtime_get_sync(&client->dev);
-       pm_runtime_disable(&client->dev);
-       pm_runtime_set_suspended(&client->dev);
-       pm_runtime_put_noidle(&client->dev);
-
        hid = ihid->hid;
        hid_destroy_device(hid);
 
@@ -1219,25 +1152,15 @@ static int i2c_hid_suspend(struct device *dev)
        int wake_status;
 
        if (hid->driver && hid->driver->suspend) {
-               /*
-                * Wake up the device so that IO issues in
-                * HID driver's suspend code can succeed.
-                */
-               ret = pm_runtime_resume(dev);
-               if (ret < 0)
-                       return ret;
-
                ret = hid->driver->suspend(hid, PMSG_SUSPEND);
                if (ret < 0)
                        return ret;
        }
 
-       if (!pm_runtime_suspended(dev)) {
-               /* Save some power */
-               i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+       /* Save some power */
+       i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
 
-               disable_irq(client->irq);
-       }
+       disable_irq(client->irq);
 
        if (device_may_wakeup(&client->dev)) {
                wake_status = enable_irq_wake(client->irq);
@@ -1279,11 +1202,6 @@ static int i2c_hid_resume(struct device *dev)
                                wake_status);
        }
 
-       /* We'll resume to full power */
-       pm_runtime_disable(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
-
        enable_irq(client->irq);
 
        /* Instead of resetting device, simply powers the device on. This
@@ -1304,30 +1222,8 @@ static int i2c_hid_resume(struct device *dev)
 }
 #endif
 
-#ifdef CONFIG_PM
-static int i2c_hid_runtime_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-
-       i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
-       disable_irq(client->irq);
-       return 0;
-}
-
-static int i2c_hid_runtime_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-
-       enable_irq(client->irq);
-       i2c_hid_set_power(client, I2C_HID_PWR_ON);
-       return 0;
-}
-#endif
-
 static const struct dev_pm_ops i2c_hid_pm = {
        SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
-       SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
-                          NULL)
 };
 
 static const struct i2c_device_id i2c_hid_id_table[] = {
index 75078c8..d31ea82 100644 (file)
@@ -322,6 +322,25 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
                },
                .driver_data = (void *)&sipodev_desc
        },
+       {
+               /*
+                * There are at least 2 Primebook C11B versions, the older
+                * version has a product-name of "Primebook C11B", and a
+                * bios version / release / firmware revision of:
+                * V2.1.2 / 05/03/2018 / 18.2
+                * The new version has "PRIMEBOOK C11B" as product-name and a
+                * bios version / release / firmware revision of:
+                * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2
+                * Only the older version needs this quirk, note the newer
+                * version will not match as it has a different product-name.
+                */
+               .ident = "Trekstor Primebook C11B",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"),
+               },
+               .driver_data = (void *)&sipodev_desc
+       },
        {
                .ident = "Direkt-Tek DTLAPY116-2",
                .matches = {
index 1b0a0cc..513d7a4 100644 (file)
@@ -84,7 +84,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
        return  0;
 out:
        dev_err(&cl->device->dev, "error in allocating Tx pool\n");
-       ishtp_cl_free_rx_ring(cl);
+       ishtp_cl_free_tx_ring(cl);
        return  -ENOMEM;
 }
 
index fa66951..7b098ff 100644 (file)
 #define ASPEED_I2CD_S_TX_CMD                           BIT(2)
 #define ASPEED_I2CD_M_TX_CMD                           BIT(1)
 #define ASPEED_I2CD_M_START_CMD                                BIT(0)
+#define ASPEED_I2CD_MASTER_CMDS_MASK                                          \
+               (ASPEED_I2CD_M_STOP_CMD |                                      \
+                ASPEED_I2CD_M_S_RX_CMD_LAST |                                 \
+                ASPEED_I2CD_M_RX_CMD |                                        \
+                ASPEED_I2CD_M_TX_CMD |                                        \
+                ASPEED_I2CD_M_START_CMD)
 
 /* 0x18 : I2CD Slave Device Address Register   */
 #define ASPEED_I2CD_DEV_ADDR_MASK                      GENMASK(6, 0)
@@ -336,18 +342,19 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
        struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
        u8 slave_addr = i2c_8bit_addr_from_msg(msg);
 
-       bus->master_state = ASPEED_I2C_MASTER_START;
-
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
        /*
         * If it's requested in the middle of a slave session, set the master
         * state to 'pending' then H/W will continue handling this master
         * command when the bus comes back to the idle state.
         */
-       if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
+       if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
                bus->master_state = ASPEED_I2C_MASTER_PENDING;
+               return;
+       }
 #endif /* CONFIG_I2C_SLAVE */
 
+       bus->master_state = ASPEED_I2C_MASTER_START;
        bus->buf_index = 0;
 
        if (msg->flags & I2C_M_RD) {
@@ -422,20 +429,6 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
                }
        }
 
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
-       /*
-        * A pending master command will be started by H/W when the bus comes
-        * back to idle state after completing a slave operation so change the
-        * master state from 'pending' to 'start' at here if slave is inactive.
-        */
-       if (bus->master_state == ASPEED_I2C_MASTER_PENDING) {
-               if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
-                       goto out_no_complete;
-
-               bus->master_state = ASPEED_I2C_MASTER_START;
-       }
-#endif /* CONFIG_I2C_SLAVE */
-
        /* Master is not currently active, irq was for someone else. */
        if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
            bus->master_state == ASPEED_I2C_MASTER_PENDING)
@@ -462,11 +455,15 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
                /*
                 * If a peer master starts a xfer immediately after it queues a
-                * master command, change its state to 'pending' then H/W will
-                * continue the queued master xfer just after completing the
-                * slave mode session.
+                * master command, clear the queued master command and change
+                * its state to 'pending'. To simplify handling of pending
+                * cases, it uses S/W solution instead of H/W command queue
+                * handling.
                 */
                if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
+                       writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
+                               ~ASPEED_I2CD_MASTER_CMDS_MASK,
+                              bus->base + ASPEED_I2C_CMD_REG);
                        bus->master_state = ASPEED_I2C_MASTER_PENDING;
                        dev_dbg(bus->dev,
                                "master goes pending due to a slave start\n");
@@ -629,6 +626,14 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
                        irq_handled |= aspeed_i2c_master_irq(bus,
                                                             irq_remaining);
        }
+
+       /*
+        * Start a pending master command at here if a slave operation is
+        * completed.
+        */
+       if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
+           bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
+               aspeed_i2c_do_start(bus);
 #else
        irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
 #endif /* CONFIG_I2C_SLAVE */
@@ -691,6 +696,15 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
                     ASPEED_I2CD_BUS_BUSY_STS))
                        aspeed_i2c_recover_bus(bus);
 
+               /*
+                * If timed out and the state is still pending, drop the pending
+                * master command.
+                */
+               spin_lock_irqsave(&bus->lock, flags);
+               if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
+                       bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
+               spin_unlock_irqrestore(&bus->lock, flags);
+
                return -ETIMEDOUT;
        }
 
index 29eae1b..2152ec5 100644 (file)
@@ -875,7 +875,7 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
 
 static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
 {
-       if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
+       if (i2c_check_quirks(adap, I2C_AQ_NO_ZERO_LEN))
                return I2C_FUNC_I2C |
                        (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
        else
index d36cf08..b24e7b9 100644 (file)
@@ -305,7 +305,7 @@ struct stm32f7_i2c_dev {
        struct regmap *regmap;
 };
 
-/**
+/*
  * All these values are coming from I2C Specification, Version 6.0, 4th of
  * April 2014.
  *
@@ -1192,6 +1192,8 @@ static void stm32f7_i2c_slave_start(struct stm32f7_i2c_dev *i2c_dev)
                        STM32F7_I2C_CR1_TXIE;
                stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask);
 
+               /* Write 1st data byte */
+               writel_relaxed(value, base + STM32F7_I2C_TXDR);
        } else {
                /* Notify i2c slave that new write transfer is starting */
                i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
@@ -1501,7 +1503,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
        void __iomem *base = i2c_dev->base;
        struct device *dev = i2c_dev->dev;
        struct stm32_i2c_dma *dma = i2c_dev->dma;
-       u32 mask, status;
+       u32 status;
 
        status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
 
@@ -1526,12 +1528,15 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
                f7_msg->result = -EINVAL;
        }
 
-       /* Disable interrupts */
-       if (stm32f7_i2c_is_slave_registered(i2c_dev))
-               mask = STM32F7_I2C_XFER_IRQ_MASK;
-       else
-               mask = STM32F7_I2C_ALL_IRQ_MASK;
-       stm32f7_i2c_disable_irq(i2c_dev, mask);
+       if (!i2c_dev->slave_running) {
+               u32 mask;
+               /* Disable interrupts */
+               if (stm32f7_i2c_is_slave_registered(i2c_dev))
+                       mask = STM32F7_I2C_XFER_IRQ_MASK;
+               else
+                       mask = STM32F7_I2C_ALL_IRQ_MASK;
+               stm32f7_i2c_disable_irq(i2c_dev, mask);
+       }
 
        /* Disable dma */
        if (i2c_dev->use_dma) {
index 3a8b091..9d07378 100644 (file)
@@ -199,6 +199,7 @@ void ib_mad_cleanup(void);
 int ib_sa_init(void);
 void ib_sa_cleanup(void);
 
+void rdma_nl_init(void);
 void rdma_nl_exit(void);
 
 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
index 2dd2cfe..50a9244 100644 (file)
@@ -2716,6 +2716,8 @@ static int __init ib_core_init(void)
                goto err_comp_unbound;
        }
 
+       rdma_nl_init();
+
        ret = addr_init();
        if (ret) {
                pr_warn("Could't init IB address resolution\n");
index 72141c5..ade7182 100644 (file)
@@ -372,6 +372,7 @@ EXPORT_SYMBOL(iw_cm_disconnect);
 static void destroy_cm_id(struct iw_cm_id *cm_id)
 {
        struct iwcm_id_private *cm_id_priv;
+       struct ib_qp *qp;
        unsigned long flags;
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -389,6 +390,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
        set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
+
        switch (cm_id_priv->state) {
        case IW_CM_STATE_LISTEN:
                cm_id_priv->state = IW_CM_STATE_DESTROYING;
@@ -401,7 +405,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
                cm_id_priv->state = IW_CM_STATE_DESTROYING;
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
                /* Abrupt close of the connection */
-               (void)iwcm_modify_qp_err(cm_id_priv->qp);
+               (void)iwcm_modify_qp_err(qp);
                spin_lock_irqsave(&cm_id_priv->lock, flags);
                break;
        case IW_CM_STATE_IDLE:
@@ -426,11 +430,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
                BUG();
                break;
        }
-       if (cm_id_priv->qp) {
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
-               cm_id_priv->qp = NULL;
-       }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
 
        if (cm_id->mapped) {
                iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
@@ -671,11 +673,11 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
                BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
                cm_id_priv->state = IW_CM_STATE_IDLE;
                spin_lock_irqsave(&cm_id_priv->lock, flags);
-               if (cm_id_priv->qp) {
-                       cm_id->device->ops.iw_rem_ref(qp);
-                       cm_id_priv->qp = NULL;
-               }
+               qp = cm_id_priv->qp;
+               cm_id_priv->qp = NULL;
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+               if (qp)
+                       cm_id->device->ops.iw_rem_ref(qp);
                clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
                wake_up_all(&cm_id_priv->connect_wait);
        }
@@ -696,7 +698,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
        struct iwcm_id_private *cm_id_priv;
        int ret;
        unsigned long flags;
-       struct ib_qp *qp;
+       struct ib_qp *qp = NULL;
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 
@@ -730,13 +732,13 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
                return 0;       /* success */
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
-       if (cm_id_priv->qp) {
-               cm_id->device->ops.iw_rem_ref(qp);
-               cm_id_priv->qp = NULL;
-       }
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
        cm_id_priv->state = IW_CM_STATE_IDLE;
 err:
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id->device->ops.iw_rem_ref(qp);
        clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
        wake_up_all(&cm_id_priv->connect_wait);
        return ret;
@@ -878,6 +880,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
                               struct iw_cm_event *iw_event)
 {
+       struct ib_qp *qp = NULL;
        unsigned long flags;
        int ret;
 
@@ -896,11 +899,13 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
                cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
        } else {
                /* REJECTED or RESET */
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
+               qp = cm_id_priv->qp;
                cm_id_priv->qp = NULL;
                cm_id_priv->state = IW_CM_STATE_IDLE;
        }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
        ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
 
        if (iw_event->private_data_len)
@@ -942,21 +947,18 @@ static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
                                  struct iw_cm_event *iw_event)
 {
+       struct ib_qp *qp;
        unsigned long flags;
-       int ret = 0;
+       int ret = 0, notify_event = 0;
        spin_lock_irqsave(&cm_id_priv->lock, flags);
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
 
-       if (cm_id_priv->qp) {
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
-               cm_id_priv->qp = NULL;
-       }
        switch (cm_id_priv->state) {
        case IW_CM_STATE_ESTABLISHED:
        case IW_CM_STATE_CLOSING:
                cm_id_priv->state = IW_CM_STATE_IDLE;
-               spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-               ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
-               spin_lock_irqsave(&cm_id_priv->lock, flags);
+               notify_event = 1;
                break;
        case IW_CM_STATE_DESTROYING:
                break;
@@ -965,6 +967,10 @@ static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
        }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
+       if (notify_event)
+               ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
        return ret;
 }
 
index 81dbd5f..8cd31ef 100644 (file)
 #include <linux/module.h>
 #include "core_priv.h"
 
-static DEFINE_MUTEX(rdma_nl_mutex);
 static struct {
-       const struct rdma_nl_cbs   *cb_table;
+       const struct rdma_nl_cbs *cb_table;
+       /* Synchronizes between ongoing netlink commands and netlink client
+        * unregistration.
+        */
+       struct rw_semaphore sem;
 } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
 
 bool rdma_nl_chk_listeners(unsigned int group)
@@ -75,70 +78,53 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
        return (op < max_num_ops[type]) ? true : false;
 }
 
-static bool
-is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
+static const struct rdma_nl_cbs *
+get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
 {
        const struct rdma_nl_cbs *cb_table;
 
-       if (!is_nl_msg_valid(type, op))
-               return false;
-
        /*
         * Currently only NLDEV client is supporting netlink commands in
         * non init_net net namespace.
         */
        if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
-               return false;
+               return NULL;
 
-       if (!rdma_nl_types[type].cb_table) {
-               mutex_unlock(&rdma_nl_mutex);
-               request_module("rdma-netlink-subsys-%d", type);
-               mutex_lock(&rdma_nl_mutex);
-       }
+       cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+       if (!cb_table) {
+               /*
+                * Didn't get valid reference of the table, attempt module
+                * load once.
+                */
+               up_read(&rdma_nl_types[type].sem);
 
-       cb_table = rdma_nl_types[type].cb_table;
+               request_module("rdma-netlink-subsys-%d", type);
 
+               down_read(&rdma_nl_types[type].sem);
+               cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+       }
        if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
-               return false;
-       return true;
+               return NULL;
+       return cb_table;
 }
 
 void rdma_nl_register(unsigned int index,
                      const struct rdma_nl_cbs cb_table[])
 {
-       mutex_lock(&rdma_nl_mutex);
-       if (!is_nl_msg_valid(index, 0)) {
-               /*
-                * All clients are not interesting in success/failure of
-                * this call. They want to see the print to error log and
-                * continue their initialization. Print warning for them,
-                * because it is programmer's error to be here.
-                */
-               mutex_unlock(&rdma_nl_mutex);
-               WARN(true,
-                    "The not-valid %u index was supplied to RDMA netlink\n",
-                    index);
+       if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
+           WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
                return;
-       }
-
-       if (rdma_nl_types[index].cb_table) {
-               mutex_unlock(&rdma_nl_mutex);
-               WARN(true,
-                    "The %u index is already registered in RDMA netlink\n",
-                    index);
-               return;
-       }
 
-       rdma_nl_types[index].cb_table = cb_table;
-       mutex_unlock(&rdma_nl_mutex);
+       /* Pairs with the READ_ONCE in is_nl_valid() */
+       smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
 }
 EXPORT_SYMBOL(rdma_nl_register);
 
 void rdma_nl_unregister(unsigned int index)
 {
-       mutex_lock(&rdma_nl_mutex);
+       down_write(&rdma_nl_types[index].sem);
        rdma_nl_types[index].cb_table = NULL;
-       mutex_unlock(&rdma_nl_mutex);
+       up_write(&rdma_nl_types[index].sem);
 }
 EXPORT_SYMBOL(rdma_nl_unregister);
 
@@ -170,15 +156,21 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
        unsigned int index = RDMA_NL_GET_CLIENT(type);
        unsigned int op = RDMA_NL_GET_OP(type);
        const struct rdma_nl_cbs *cb_table;
+       int err = -EINVAL;
 
-       if (!is_nl_valid(skb, index, op))
+       if (!is_nl_msg_valid(index, op))
                return -EINVAL;
 
-       cb_table = rdma_nl_types[index].cb_table;
+       down_read(&rdma_nl_types[index].sem);
+       cb_table = get_cb_table(skb, index, op);
+       if (!cb_table)
+               goto done;
 
        if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
-           !netlink_capable(skb, CAP_NET_ADMIN))
-               return -EPERM;
+           !netlink_capable(skb, CAP_NET_ADMIN)) {
+               err = -EPERM;
+               goto done;
+       }
 
        /*
         * LS responses overload the 0x100 (NLM_F_ROOT) flag.  Don't
@@ -186,8 +178,8 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
         */
        if (index == RDMA_NL_LS) {
                if (cb_table[op].doit)
-                       return cb_table[op].doit(skb, nlh, extack);
-               return -EINVAL;
+                       err = cb_table[op].doit(skb, nlh, extack);
+               goto done;
        }
        /* FIXME: Convert IWCM to properly handle doit callbacks */
        if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
@@ -195,14 +187,15 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
                        .dump = cb_table[op].dump,
                };
                if (c.dump)
-                       return netlink_dump_start(skb->sk, skb, nlh, &c);
-               return -EINVAL;
+                       err = netlink_dump_start(skb->sk, skb, nlh, &c);
+               goto done;
        }
 
        if (cb_table[op].doit)
-               return cb_table[op].doit(skb, nlh, extack);
-
-       return 0;
+               err = cb_table[op].doit(skb, nlh, extack);
+done:
+       up_read(&rdma_nl_types[index].sem);
+       return err;
 }
 
 /*
@@ -263,9 +256,7 @@ skip:
 
 static void rdma_nl_rcv(struct sk_buff *skb)
 {
-       mutex_lock(&rdma_nl_mutex);
        rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
-       mutex_unlock(&rdma_nl_mutex);
 }
 
 int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
@@ -297,6 +288,14 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(rdma_nl_multicast);
 
+void rdma_nl_init(void)
+{
+       int idx;
+
+       for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+               init_rwsem(&rdma_nl_types[idx].sem);
+}
+
 void rdma_nl_exit(void)
 {
        int idx;
index 65b3654..c03af08 100644 (file)
@@ -778,7 +778,7 @@ static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
                container_of(res, struct rdma_counter, res);
 
        if (port && port != counter->port)
-               return 0;
+               return -EAGAIN;
 
        /* Dump it even query failed */
        rdma_counter_query_stats(counter);
index 1e5aeb3..63f7f7d 100644 (file)
@@ -98,7 +98,7 @@ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
 
 struct ib_uverbs_device {
        atomic_t                                refcount;
-       int                                     num_comp_vectors;
+       u32                                     num_comp_vectors;
        struct completion                       comp;
        struct device                           dev;
        /* First group for device attributes, NULL terminated array */
index f974b68..35c2841 100644 (file)
@@ -662,16 +662,17 @@ static bool find_gid_index(const union ib_gid *gid,
                           void *context)
 {
        struct find_gid_index_context *ctx = context;
+       u16 vlan_id = 0xffff;
+       int ret;
 
        if (ctx->gid_type != gid_attr->gid_type)
                return false;
 
-       if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
-           (is_vlan_dev(gid_attr->ndev) &&
-            vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
+       ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
+       if (ret)
                return false;
 
-       return true;
+       return ctx->vlan_id == vlan_id;
 }
 
 static const struct ib_gid_attr *
index e87fc04..347dc24 100644 (file)
@@ -495,7 +495,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
 
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        release_ep_resources(ep);
-       kfree_skb(skb);
        return 0;
 }
 
@@ -506,7 +505,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        c4iw_put_ep(&ep->parent_ep->com);
        release_ep_resources(ep);
-       kfree_skb(skb);
        return 0;
 }
 
@@ -2424,20 +2422,6 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
        enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
 
        pr_debug("ep %p tid %u\n", ep, ep->hwtid);
-
-       skb_get(skb);
-       rpl = cplhdr(skb);
-       if (!is_t4(adapter_type)) {
-               skb_trim(skb, roundup(sizeof(*rpl5), 16));
-               rpl5 = (void *)rpl;
-               INIT_TP_WR(rpl5, ep->hwtid);
-       } else {
-               skb_trim(skb, sizeof(*rpl));
-               INIT_TP_WR(rpl, ep->hwtid);
-       }
-       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
-                                                   ep->hwtid));
-
        cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
                      enable_tcp_timestamps && req->tcpopt.tstamp,
                      (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
@@ -2483,6 +2467,20 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                if (tcph->ece && tcph->cwr)
                        opt2 |= CCTRL_ECN_V(1);
        }
+
+       skb_get(skb);
+       rpl = cplhdr(skb);
+       if (!is_t4(adapter_type)) {
+               skb_trim(skb, roundup(sizeof(*rpl5), 16));
+               rpl5 = (void *)rpl;
+               INIT_TP_WR(rpl5, ep->hwtid);
+       } else {
+               skb_trim(skb, sizeof(*rpl));
+               INIT_TP_WR(rpl, ep->hwtid);
+       }
+       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+                                                   ep->hwtid));
+
        if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
                u32 isn = (prandom_u32() & ~7UL) - 1;
                opt2 |= T5_OPT_2_VALID_F;
index 2ed7bfd..c61b602 100644 (file)
@@ -65,6 +65,7 @@
 #define SDMA_DESCQ_CNT 2048
 #define SDMA_DESC_INTR 64
 #define INVALID_TAIL 0xffff
+#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
 
 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
 module_param(sdma_descq_cnt, uint, S_IRUGO);
@@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
        struct sdma_engine *sde;
 
        if (dd->sdma_pad_dma) {
-               dma_free_coherent(&dd->pcidev->dev, 4,
+               dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
                                  (void *)dd->sdma_pad_dma,
                                  dd->sdma_pad_phys);
                dd->sdma_pad_dma = NULL;
@@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
        }
 
        /* Allocate memory for pad */
-       dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
+       dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
                                              &dd->sdma_pad_phys, GFP_KERNEL);
        if (!dd->sdma_pad_dma) {
                dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
index b4dcc4d..f21fca3 100644 (file)
@@ -2736,11 +2736,6 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
                                diff = cmp_psn(psn,
                                               flow->flow_state.r_next_psn);
                                if (diff > 0) {
-                                       if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
-                                               restart_tid_rdma_read_req(rcd,
-                                                                         qp,
-                                                                         wqe);
-
                                        /* Drop the packet.*/
                                        goto s_unlock;
                                } else if (diff < 0) {
index 7bff0a1..089e201 100644 (file)
@@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp,
 /* Length of buffer to create verbs txreq cache name */
 #define TXREQ_NAME_LEN 24
 
-/* 16B trailing buffer */
-static const u8 trail_buf[MAX_16B_PADDING];
-
 static uint wss_threshold = 80;
 module_param(wss_threshold, uint, S_IRUGO);
 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
@@ -820,8 +817,8 @@ static int build_verbs_tx_desc(
 
        /* add icrc, lt byte, and padding to flit */
        if (extra_bytes)
-               ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
-                                       (void *)trail_buf, extra_bytes);
+               ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
+                                      sde->dd->sdma_pad_phys, extra_bytes);
 
 bail_txadd:
        return ret;
@@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
                }
                /* add icrc, lt byte, and padding to flit */
                if (extra_bytes)
-                       seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
+                       seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
+                                        extra_bytes);
 
                seg_pio_copy_end(pbuf);
        }
index 7a89d66..e82567f 100644 (file)
@@ -5389,9 +5389,9 @@ static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
                return;
        }
 
-       if (eq->buf_list)
-               dma_free_coherent(hr_dev->dev, buf_chk_sz,
-                                 eq->buf_list->buf, eq->buf_list->map);
+       dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
+                         eq->buf_list->map);
+       kfree(eq->buf_list);
 }
 
 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
index 6305993..7019c12 100644 (file)
@@ -1967,8 +1967,8 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
        int err;
 
        if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
-               xa_erase(&dev->mdev->priv.mkey_table,
-                        mlx5_base_mkey(mmw->mmkey.key));
+               xa_erase_irq(&dev->mdev->priv.mkey_table,
+                            mlx5_base_mkey(mmw->mmkey.key));
                /*
                 * pagefault_single_data_segment() may be accessing mmw under
                 * SRCU if the user bound an ODP MR to this MW.
index 8937d72..5fd071c 100644 (file)
@@ -3249,10 +3249,12 @@ static int modify_raw_packet_qp_sq(
        }
 
        /* Only remove the old rate after new rate was set */
-       if ((old_rl.rate &&
-            !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
-           (new_state != MLX5_SQC_STATE_RDY))
+       if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
+           (new_state != MLX5_SQC_STATE_RDY)) {
                mlx5_rl_remove_rate(dev, &old_rl);
+               if (new_state != MLX5_SQC_STATE_RDY)
+                       memset(&new_rl, 0, sizeof(new_rl));
+       }
 
        ibqp->rl = new_rl;
        sq->state = new_state;
index 5136b83..dc71b6e 100644 (file)
@@ -76,7 +76,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
        struct qedr_dev *qedr = get_qedr_dev(ibdev);
        u32 fw_ver = (u32)qedr->attr.fw_ver;
 
-       snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
+       snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
                 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
                 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
 }
index 52d402f..b431748 100644 (file)
@@ -1312,6 +1312,7 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
 void siw_free_qp(struct kref *ref)
 {
        struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
+       struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
        struct siw_device *sdev = qp->sdev;
        unsigned long flags;
 
@@ -1334,4 +1335,5 @@ void siw_free_qp(struct kref *ref)
        atomic_dec(&sdev->num_qp);
        siw_dbg_qp(qp, "free QP\n");
        kfree_rcu(qp, rcu);
+       kfree(siw_base_qp);
 }
index 869e02b..b18a677 100644 (file)
@@ -604,7 +604,6 @@ out:
 int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
 {
        struct siw_qp *qp = to_siw_qp(base_qp);
-       struct siw_base_qp *siw_base_qp = to_siw_base_qp(base_qp);
        struct siw_ucontext *uctx =
                rdma_udata_to_drv_context(udata, struct siw_ucontext,
                                          base_ucontext);
@@ -641,7 +640,6 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
        qp->scq = qp->rcq = NULL;
 
        siw_qp_put(qp);
-       kfree(siw_base_qp);
 
        return 0;
 }
index 3492339..1139714 100644 (file)
@@ -81,8 +81,10 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts)
        for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
                finger[i].is_valid = buf[i + y] >> 7;
                if (finger[i].is_valid) {
-                       finger[i].x = ((buf[i + y] & 0x0070) << 4) | buf[i + 1];
-                       finger[i].y = ((buf[i + y] & 0x0007) << 8) | buf[i + 2];
+                       finger[i].x = ((buf[i + y] & 0x0070) << 4) |
+                                       buf[i + y + 1];
+                       finger[i].y = ((buf[i + y] & 0x0007) << 8) |
+                                       buf[i + y + 2];
 
                        /* st1232 includes a z-axis / touch strength */
                        if (ts->chip_info->have_z)
index c235f79..5120ce4 100644 (file)
@@ -73,6 +73,19 @@ static const struct dmi_system_id ivrs_quirks[] __initconst = {
                },
                .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
        },
+       {
+               /*
+                * Acer Aspire A315-41 requires the very same workaround as
+                * Dell Latitude 5495
+                */
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Acer Aspire A315-41",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+       },
        {
                .callback = ivrs_ioapic_quirk_cb,
                .ident = "Lenovo ideapad 330S-15ARR",
index 3f97491..6db6d96 100644 (file)
@@ -2794,7 +2794,7 @@ static int identity_mapping(struct device *dev)
        struct device_domain_info *info;
 
        info = dev->archdata.iommu;
-       if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
+       if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO)
                return (info->domain == si_domain);
 
        return 0;
@@ -3471,7 +3471,7 @@ static bool iommu_need_mapping(struct device *dev)
                if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
                        dma_mask = dev->coherent_dma_mask;
 
-               if (dma_mask >= dma_get_required_mask(dev))
+               if (dma_mask >= dma_direct_get_required_mask(dev))
                        return false;
 
                /*
@@ -3775,6 +3775,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
        return nelems;
 }
 
+static u64 intel_get_required_mask(struct device *dev)
+{
+       if (!iommu_need_mapping(dev))
+               return dma_direct_get_required_mask(dev);
+       return DMA_BIT_MASK(32);
+}
+
 static const struct dma_map_ops intel_dma_ops = {
        .alloc = intel_alloc_coherent,
        .free = intel_free_coherent,
@@ -3787,6 +3794,7 @@ static const struct dma_map_ops intel_dma_ops = {
        .dma_supported = dma_direct_supported,
        .mmap = dma_common_mmap,
        .get_sgtable = dma_common_get_sgtable,
+       .get_required_mask = intel_get_required_mask,
 };
 
 static void
index 2371034..2639fc7 100644 (file)
@@ -1105,10 +1105,8 @@ static int ipmmu_probe(struct platform_device *pdev)
        /* Root devices have mandatory IRQs */
        if (ipmmu_is_root(mmu)) {
                irq = platform_get_irq(pdev, 0);
-               if (irq < 0) {
-                       dev_err(&pdev->dev, "no IRQ found\n");
+               if (irq < 0)
                        return irq;
-               }
 
                ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
                                       dev_name(&pdev->dev), mmu);
index 1a57cee..0b0a737 100644 (file)
@@ -15,6 +15,7 @@
 
 /* FIC Registers */
 #define AL_FIC_CAUSE           0x00
+#define AL_FIC_SET_CAUSE       0x08
 #define AL_FIC_MASK            0x10
 #define AL_FIC_CONTROL         0x28
 
@@ -126,6 +127,16 @@ static void al_fic_irq_handler(struct irq_desc *desc)
        chained_irq_exit(irqchip, desc);
 }
 
+static int al_fic_irq_retrigger(struct irq_data *data)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+       struct al_fic *fic = gc->private;
+
+       writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE);
+
+       return 1;
+}
+
 static int al_fic_register(struct device_node *node,
                           struct al_fic *fic)
 {
@@ -159,6 +170,7 @@ static int al_fic_register(struct device_node *node,
        gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
        gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
        gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
+       gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger;
        gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
        gc->private = fic;
 
index 6acad2e..2933349 100644 (file)
@@ -313,6 +313,7 @@ static void __init sama5d3_aic_irq_fixup(void)
 static const struct of_device_id aic5_irq_fixups[] __initconst = {
        { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
        { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
+       { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup },
        { /* sentinel */ },
 };
 
@@ -390,3 +391,12 @@ static int __init sama5d4_aic5_of_init(struct device_node *node,
        return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
 }
 IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
+
+#define NR_SAM9X60_IRQS                50
+
+static int __init sam9x60_aic5_of_init(struct device_node *node,
+                                      struct device_node *parent)
+{
+       return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
+}
+IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
index 62e54f1..787e8ee 100644 (file)
@@ -175,6 +175,22 @@ static DEFINE_IDA(its_vpeid_ida);
 #define gic_data_rdist_rd_base()       (gic_data_rdist()->rd_base)
 #define gic_data_rdist_vlpi_base()     (gic_data_rdist_rd_base() + SZ_128K)
 
+static u16 get_its_list(struct its_vm *vm)
+{
+       struct its_node *its;
+       unsigned long its_list = 0;
+
+       list_for_each_entry(its, &its_nodes, entry) {
+               if (!its->is_v4)
+                       continue;
+
+               if (vm->vlpi_count[its->list_nr])
+                       __set_bit(its->list_nr, &its_list);
+       }
+
+       return (u16)its_list;
+}
+
 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
                                               u32 event)
 {
@@ -976,17 +992,15 @@ static void its_send_vmapp(struct its_node *its,
 
 static void its_send_vmovp(struct its_vpe *vpe)
 {
-       struct its_cmd_desc desc;
+       struct its_cmd_desc desc = {};
        struct its_node *its;
        unsigned long flags;
        int col_id = vpe->col_idx;
 
        desc.its_vmovp_cmd.vpe = vpe;
-       desc.its_vmovp_cmd.its_list = (u16)its_list_map;
 
        if (!its_list_map) {
                its = list_first_entry(&its_nodes, struct its_node, entry);
-               desc.its_vmovp_cmd.seq_num = 0;
                desc.its_vmovp_cmd.col = &its->collections[col_id];
                its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
                return;
@@ -1003,6 +1017,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
        raw_spin_lock_irqsave(&vmovp_lock, flags);
 
        desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
+       desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
 
        /* Emit VMOVPs */
        list_for_each_entry(its, &its_nodes, entry) {
index 422664a..1edc993 100644 (file)
@@ -59,7 +59,7 @@ static struct gic_chip_data gic_data __read_mostly;
 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
 
 #define GIC_ID_NR      (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
-#define GIC_LINE_NR    max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+#define GIC_LINE_NR    min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
 #define GIC_ESPI_NR    GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
 
 /*
index c72c036..7d0a12f 100644 (file)
@@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
        }
 }
 
-static void plic_irq_enable(struct irq_data *d)
+static void plic_irq_unmask(struct irq_data *d)
 {
        unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
                                           cpu_online_mask);
@@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d)
        plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
 }
 
-static void plic_irq_disable(struct irq_data *d)
+static void plic_irq_mask(struct irq_data *d)
 {
        plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
 }
@@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d,
        if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       if (!irqd_irq_disabled(d)) {
-               plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
-               plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
-       }
+       plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+       plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
 
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
@@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d,
 }
 #endif
 
+static void plic_irq_eoi(struct irq_data *d)
+{
+       struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+       writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
 static struct irq_chip plic_chip = {
        .name           = "SiFive PLIC",
-       /*
-        * There is no need to mask/unmask PLIC interrupts.  They are "masked"
-        * by reading claim and "unmasked" when writing it back.
-        */
-       .irq_enable     = plic_irq_enable,
-       .irq_disable    = plic_irq_disable,
+       .irq_mask       = plic_irq_mask,
+       .irq_unmask     = plic_irq_unmask,
+       .irq_eoi        = plic_irq_eoi,
 #ifdef CONFIG_SMP
        .irq_set_affinity = plic_set_affinity,
 #endif
@@ -152,7 +154,7 @@ static struct irq_chip plic_chip = {
 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
                              irq_hw_number_t hwirq)
 {
-       irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
+       irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
        irq_set_chip_data(irq, NULL);
        irq_set_noprobe(irq);
        return 0;
@@ -188,7 +190,6 @@ static void plic_handle_irq(struct pt_regs *regs)
                                        hwirq);
                else
                        generic_handle_irq(irq);
-               writel(hwirq, claim);
        }
        csr_set(sie, SIE_SEIE);
 }
@@ -251,8 +252,8 @@ static int __init plic_init(struct device_node *node,
                        continue;
                }
 
-               /* skip context holes */
-               if (parent.args[0] == -1)
+               /* skip contexts other than supervisor external interrupt */
+               if (parent.args[0] != IRQ_S_EXT)
                        continue;
 
                hartid = plic_find_hart_id(parent.np);
index c92b405..ba86195 100644 (file)
@@ -744,7 +744,7 @@ capi_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &(cdev->recvwait), wait);
        mask = EPOLLOUT | EPOLLWRNORM;
-       if (!skb_queue_empty(&cdev->recvqueue))
+       if (!skb_queue_empty_lockless(&cdev->recvqueue))
                mask |= EPOLLIN | EPOLLRDNORM;
        return mask;
 }
index e4fa2a2..7e2bc50 100644 (file)
@@ -173,8 +173,8 @@ symbolic(struct hfcusb_symbolic_list list[], const int num)
 
 
 /*
- * List of all supported enpoints configiration sets, used to find the
- * best matching endpoint configuration within a devices' USB descriptor.
+ * List of all supported endpoint configuration sets, used to find the
+ * best matching endpoint configuration within a device's USB descriptor.
  * We need at least 3 RX endpoints, and 3 TX endpoints, either
  * INT-in and ISO-out, or ISO-in and ISO-out)
  * with 4 RX endpoints even E-Channel logging is possible
index 705c620..7b726f0 100644 (file)
@@ -18,7 +18,7 @@
 
 static int clamped;
 static struct wf_control *clamp_control;
-static struct dev_pm_qos_request qos_req;
+static struct freq_qos_request qos_req;
 static unsigned int min_freq, max_freq;
 
 static int clamp_set(struct wf_control *ct, s32 value)
@@ -35,7 +35,7 @@ static int clamp_set(struct wf_control *ct, s32 value)
        }
        clamped = value;
 
-       return dev_pm_qos_update_request(&qos_req, freq);
+       return freq_qos_update_request(&qos_req, freq);
 }
 
 static int clamp_get(struct wf_control *ct, s32 *value)
@@ -77,38 +77,44 @@ static int __init wf_cpufreq_clamp_init(void)
 
        min_freq = policy->cpuinfo.min_freq;
        max_freq = policy->cpuinfo.max_freq;
+
+       ret = freq_qos_add_request(&policy->constraints, &qos_req, FREQ_QOS_MAX,
+                                  max_freq);
+
        cpufreq_cpu_put(policy);
 
+       if (ret < 0) {
+               pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
+                      ret);
+               return ret;
+       }
+
        dev = get_cpu_device(0);
        if (unlikely(!dev)) {
                pr_warn("%s: No cpu device for cpu0\n", __func__);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto fail;
        }
 
        clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
-       if (clamp == NULL)
-               return -ENOMEM;
-
-       ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY,
-                                    max_freq);
-       if (ret < 0) {
-               pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
-                      ret);
-               goto free;
+       if (clamp == NULL) {
+               ret = -ENOMEM;
+               goto fail;
        }
 
        clamp->ops = &clamp_ops;
        clamp->name = "cpufreq-clamp";
        ret = wf_register_control(clamp);
        if (ret)
-               goto fail;
+               goto free;
+
        clamp_control = clamp;
        return 0;
- fail:
-       dev_pm_qos_remove_request(&qos_req);
 
  free:
        kfree(clamp);
+ fail:
+       freq_qos_remove_request(&qos_req);
        return ret;
 }
 
@@ -116,7 +122,7 @@ static void __exit wf_cpufreq_clamp_exit(void)
 {
        if (clamp_control) {
                wf_unregister_control(clamp_control);
-               dev_pm_qos_remove_request(&qos_req);
+               freq_qos_remove_request(&qos_req);
        }
 }
 
index 310dae2..b2c325e 100644 (file)
@@ -129,11 +129,27 @@ static int mt6397_irq_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
                        mt6397_irq_resume);
 
+struct chip_data {
+       u32 cid_addr;
+       u32 cid_shift;
+};
+
+static const struct chip_data mt6323_core = {
+       .cid_addr = MT6323_CID,
+       .cid_shift = 0,
+};
+
+static const struct chip_data mt6397_core = {
+       .cid_addr = MT6397_CID,
+       .cid_shift = 0,
+};
+
 static int mt6397_probe(struct platform_device *pdev)
 {
        int ret;
        unsigned int id;
        struct mt6397_chip *pmic;
+       const struct chip_data *pmic_core;
 
        pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
        if (!pmic)
@@ -149,28 +165,30 @@ static int mt6397_probe(struct platform_device *pdev)
        if (!pmic->regmap)
                return -ENODEV;
 
-       platform_set_drvdata(pdev, pmic);
+       pmic_core = of_device_get_match_data(&pdev->dev);
+       if (!pmic_core)
+               return -ENODEV;
 
-       ret = regmap_read(pmic->regmap, MT6397_CID, &id);
+       ret = regmap_read(pmic->regmap, pmic_core->cid_addr, &id);
        if (ret) {
-               dev_err(pmic->dev, "Failed to read chip id: %d\n", ret);
+               dev_err(&pdev->dev, "Failed to read chip id: %d\n", ret);
                return ret;
        }
 
+       pmic->chip_id = (id >> pmic_core->cid_shift) & 0xff;
+
+       platform_set_drvdata(pdev, pmic);
+
        pmic->irq = platform_get_irq(pdev, 0);
        if (pmic->irq <= 0)
                return pmic->irq;
 
-       switch (id & 0xff) {
-       case MT6323_CHIP_ID:
-               pmic->int_con[0] = MT6323_INT_CON0;
-               pmic->int_con[1] = MT6323_INT_CON1;
-               pmic->int_status[0] = MT6323_INT_STATUS0;
-               pmic->int_status[1] = MT6323_INT_STATUS1;
-               ret = mt6397_irq_init(pmic);
-               if (ret)
-                       return ret;
+       ret = mt6397_irq_init(pmic);
+       if (ret)
+               return ret;
 
+       switch (pmic->chip_id) {
+       case MT6323_CHIP_ID:
                ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs,
                                           ARRAY_SIZE(mt6323_devs), NULL,
                                           0, pmic->irq_domain);
@@ -178,21 +196,13 @@ static int mt6397_probe(struct platform_device *pdev)
 
        case MT6391_CHIP_ID:
        case MT6397_CHIP_ID:
-               pmic->int_con[0] = MT6397_INT_CON0;
-               pmic->int_con[1] = MT6397_INT_CON1;
-               pmic->int_status[0] = MT6397_INT_STATUS0;
-               pmic->int_status[1] = MT6397_INT_STATUS1;
-               ret = mt6397_irq_init(pmic);
-               if (ret)
-                       return ret;
-
                ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs,
                                           ARRAY_SIZE(mt6397_devs), NULL,
                                           0, pmic->irq_domain);
                break;
 
        default:
-               dev_err(&pdev->dev, "unsupported chip: %d\n", id);
+               dev_err(&pdev->dev, "unsupported chip: %d\n", pmic->chip_id);
                return -ENODEV;
        }
 
@@ -205,9 +215,15 @@ static int mt6397_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id mt6397_of_match[] = {
-       { .compatible = "mediatek,mt6397" },
-       { .compatible = "mediatek,mt6323" },
-       { }
+       {
+               .compatible = "mediatek,mt6323",
+               .data = &mt6323_core,
+       }, {
+               .compatible = "mediatek,mt6397",
+               .data = &mt6397_core,
+       }, {
+               /* sentinel */
+       }
 };
 MODULE_DEVICE_TABLE(of, mt6397_of_match);
 
index f7bdae5..5047f73 100644 (file)
@@ -611,7 +611,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
        cq_host->slot[tag].flags = 0;
 
        cq_host->qcnt += 1;
-
+       /* Make sure descriptors are ready before ringing the doorbell */
+       wmb();
        cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
        if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
                pr_debug("%s: cqhci: doorbell not set for tag %d\n",
index 78e7e35..4031217 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
+#include <linux/dma/mxs-dma.h>
 #include <linux/highmem.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -266,7 +267,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
        ssp->ssp_pio_words[2] = cmd1;
        ssp->dma_dir = DMA_NONE;
        ssp->slave_dirn = DMA_TRANS_NONE;
-       desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+       desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
        if (!desc)
                goto out;
 
@@ -311,7 +312,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
        ssp->ssp_pio_words[2] = cmd1;
        ssp->dma_dir = DMA_NONE;
        ssp->slave_dirn = DMA_TRANS_NONE;
-       desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+       desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
        if (!desc)
                goto out;
 
@@ -441,7 +442,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        host->data = data;
        ssp->dma_dir = dma_data_dir;
        ssp->slave_dirn = slave_dirn;
-       desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
        if (!desc)
                goto out;
 
index 41c2677..083e7e0 100644 (file)
@@ -372,7 +372,7 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
         * on temperature
         */
        if (temperature < -20000)
-               phase_delay = min(max_window + 4 * max_len - 24,
+               phase_delay = min(max_window + 4 * (max_len - 1) - 24,
                                  max_window +
                                  DIV_ROUND_UP(13 * max_len, 16) * 4);
        else if (temperature < 20000)
index 8c79bad..4f2e691 100644 (file)
@@ -952,7 +952,7 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
        struct bond_vlan_tag *tags;
 
        if (is_vlan_dev(upper) &&
-           bond->nest_level == vlan_get_encap_level(upper) - 1) {
+           bond->dev->lower_level == upper->lower_level - 1) {
                if (upper->addr_assign_type == NET_ADDR_STOLEN) {
                        alb_send_lp_vid(slave, mac_addr,
                                        vlan_dev_vlan_proto(upper),
index 21d8fcc..0059e6b 100644 (file)
@@ -200,6 +200,51 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
 
 unsigned int bond_net_id __read_mostly;
 
+static const struct flow_dissector_key flow_keys_bonding_keys[] = {
+       {
+               .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+               .offset = offsetof(struct flow_keys, control),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_BASIC,
+               .offset = offsetof(struct flow_keys, basic),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.v4addrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.v6addrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_TIPC,
+               .offset = offsetof(struct flow_keys, addrs.tipckey),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_PORTS,
+               .offset = offsetof(struct flow_keys, ports),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_ICMP,
+               .offset = offsetof(struct flow_keys, icmp),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_VLAN,
+               .offset = offsetof(struct flow_keys, vlan),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
+               .offset = offsetof(struct flow_keys, tags),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
+               .offset = offsetof(struct flow_keys, keyid),
+       },
+};
+
+static struct flow_dissector flow_keys_bonding __read_mostly;
+
 /*-------------------------- Forward declarations ---------------------------*/
 
 static int bond_init(struct net_device *bond_dev);
@@ -1733,8 +1778,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
                goto err_upper_unlink;
        }
 
-       bond->nest_level = dev_get_nest_level(bond_dev) + 1;
-
        /* If the mode uses primary, then the following is handled by
         * bond_change_active_slave().
         */
@@ -1816,7 +1859,8 @@ err_detach:
        slave_disable_netpoll(new_slave);
 
 err_close:
-       slave_dev->priv_flags &= ~IFF_BONDING;
+       if (!netif_is_bond_master(slave_dev))
+               slave_dev->priv_flags &= ~IFF_BONDING;
        dev_close(slave_dev);
 
 err_restore_mac:
@@ -1956,9 +2000,6 @@ static int __bond_release_one(struct net_device *bond_dev,
        if (!bond_has_slaves(bond)) {
                bond_set_carrier(bond);
                eth_hw_addr_random(bond_dev);
-               bond->nest_level = SINGLE_DEPTH_NESTING;
-       } else {
-               bond->nest_level = dev_get_nest_level(bond_dev) + 1;
        }
 
        unblock_netpoll_tx();
@@ -2017,7 +2058,8 @@ static int __bond_release_one(struct net_device *bond_dev,
        else
                dev_set_mtu(slave_dev, slave->original_mtu);
 
-       slave_dev->priv_flags &= ~IFF_BONDING;
+       if (!netif_is_bond_master(slave_dev))
+               slave_dev->priv_flags &= ~IFF_BONDING;
 
        bond_free_slave(slave);
 
@@ -3263,10 +3305,14 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
        const struct iphdr *iph;
        int noff, proto = -1;
 
-       if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
-               return skb_flow_dissect_flow_keys(skb, fk, 0);
+       if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23) {
+               memset(fk, 0, sizeof(*fk));
+               return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
+                                         fk, NULL, 0, 0, 0, 0);
+       }
 
        fk->ports.ports = 0;
+       memset(&fk->icmp, 0, sizeof(fk->icmp));
        noff = skb_network_offset(skb);
        if (skb->protocol == htons(ETH_P_IP)) {
                if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
@@ -3286,8 +3332,14 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
        } else {
                return false;
        }
-       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
-               fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0) {
+               if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6)
+                       skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data,
+                                             skb_transport_offset(skb),
+                                             skb_headlen(skb));
+               else
+                       fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
+       }
 
        return true;
 }
@@ -3314,10 +3366,14 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
                return bond_eth_hash(skb);
 
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
-           bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+           bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
                hash = bond_eth_hash(skb);
-       else
-               hash = (__force u32)flow.ports.ports;
+       } else {
+               if (flow.icmp.id)
+                       memcpy(&hash, &flow.icmp, sizeof(hash));
+               else
+                       memcpy(&hash, &flow.ports.ports, sizeof(hash));
+       }
        hash ^= (__force u32)flow_get_u32_dst(&flow) ^
                (__force u32)flow_get_u32_src(&flow);
        hash ^= (hash >> 16);
@@ -3442,13 +3498,6 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
        }
 }
 
-static int bond_get_nest_level(struct net_device *bond_dev)
-{
-       struct bonding *bond = netdev_priv(bond_dev);
-
-       return bond->nest_level;
-}
-
 static void bond_get_stats(struct net_device *bond_dev,
                           struct rtnl_link_stats64 *stats)
 {
@@ -3457,7 +3506,7 @@ static void bond_get_stats(struct net_device *bond_dev,
        struct list_head *iter;
        struct slave *slave;
 
-       spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
+       spin_lock(&bond->stats_lock);
        memcpy(stats, &bond->bond_stats, sizeof(*stats));
 
        rcu_read_lock();
@@ -4268,7 +4317,6 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_neigh_setup        = bond_neigh_setup,
        .ndo_vlan_rx_add_vid    = bond_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bond_vlan_rx_kill_vid,
-       .ndo_get_lock_subclass  = bond_get_nest_level,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_netpoll_setup      = bond_netpoll_setup,
        .ndo_netpoll_cleanup    = bond_netpoll_cleanup,
@@ -4296,7 +4344,6 @@ void bond_setup(struct net_device *bond_dev)
        struct bonding *bond = netdev_priv(bond_dev);
 
        spin_lock_init(&bond->mode_lock);
-       spin_lock_init(&bond->stats_lock);
        bond->params = bonding_defaults;
 
        /* Initialize pointers */
@@ -4365,6 +4412,7 @@ static void bond_uninit(struct net_device *bond_dev)
 
        list_del(&bond->bond_list);
 
+       lockdep_unregister_key(&bond->stats_lock_key);
        bond_debug_unregister(bond);
 }
 
@@ -4768,8 +4816,9 @@ static int bond_init(struct net_device *bond_dev)
        if (!bond->wq)
                return -ENOMEM;
 
-       bond->nest_level = SINGLE_DEPTH_NESTING;
-       netdev_lockdep_set_classes(bond_dev);
+       spin_lock_init(&bond->stats_lock);
+       lockdep_register_key(&bond->stats_lock_key);
+       lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
 
        list_add_tail(&bond->bond_list, &bn->dev_list);
 
@@ -4901,6 +4950,10 @@ static int __init bonding_init(void)
                        goto err;
        }
 
+       skb_flow_dissector_init(&flow_keys_bonding,
+                               flow_keys_bonding_keys,
+                               ARRAY_SIZE(flow_keys_bonding_keys));
+
        register_netdevice_notifier(&bond_netdev_notifier);
 out:
        return res;
index f6232ce..685e12b 100644 (file)
@@ -77,6 +77,7 @@ config NET_DSA_REALTEK_SMI
 config NET_DSA_SMSC_LAN9303
        tristate
        select NET_DSA_TAG_LAN9303
+       select REGMAP
        ---help---
          This enables support for the SMSC/Microchip LAN9303 3 port ethernet
          switch chips.
index baadf62..36828f2 100644 (file)
@@ -1503,11 +1503,25 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
                idx = 1;
        }
 
-       memset(&ent, 0, sizeof(ent));
-       ent.port = port;
+       /* For multicast address, the port is a bitmask and the validity
+        * is determined by having at least one port being still active
+        */
+       if (!is_multicast_ether_addr(addr)) {
+               ent.port = port;
+               ent.is_valid = is_valid;
+       } else {
+               if (is_valid)
+                       ent.port |= BIT(port);
+               else
+                       ent.port &= ~BIT(port);
+
+               ent.is_valid = !!(ent.port);
+       }
+
        ent.is_valid = is_valid;
        ent.vid = vid;
        ent.is_static = true;
+       ent.is_age = false;
        memcpy(ent.mac, addr, ETH_ALEN);
        b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
 
@@ -1626,6 +1640,47 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
 }
 EXPORT_SYMBOL(b53_fdb_dump);
 
+int b53_mdb_prepare(struct dsa_switch *ds, int port,
+                   const struct switchdev_obj_port_mdb *mdb)
+{
+       struct b53_device *priv = ds->priv;
+
+       /* 5325 and 5365 require some more massaging, but could
+        * be supported eventually
+        */
+       if (is5325(priv) || is5365(priv))
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+EXPORT_SYMBOL(b53_mdb_prepare);
+
+void b53_mdb_add(struct dsa_switch *ds, int port,
+                const struct switchdev_obj_port_mdb *mdb)
+{
+       struct b53_device *priv = ds->priv;
+       int ret;
+
+       ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
+       if (ret)
+               dev_err(ds->dev, "failed to add MDB entry\n");
+}
+EXPORT_SYMBOL(b53_mdb_add);
+
+int b53_mdb_del(struct dsa_switch *ds, int port,
+               const struct switchdev_obj_port_mdb *mdb)
+{
+       struct b53_device *priv = ds->priv;
+       int ret;
+
+       ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
+       if (ret)
+               dev_err(ds->dev, "failed to delete MDB entry\n");
+
+       return ret;
+}
+EXPORT_SYMBOL(b53_mdb_del);
+
 int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
 {
        struct b53_device *dev = ds->priv;
@@ -1994,6 +2049,9 @@ static const struct dsa_switch_ops b53_switch_ops = {
        .port_fdb_del           = b53_fdb_del,
        .port_mirror_add        = b53_mirror_add,
        .port_mirror_del        = b53_mirror_del,
+       .port_mdb_prepare       = b53_mdb_prepare,
+       .port_mdb_add           = b53_mdb_add,
+       .port_mdb_del           = b53_mdb_del,
 };
 
 struct b53_chip_data {
index a7dd8ac..1877acf 100644 (file)
@@ -250,7 +250,7 @@ b53_build_op(write48, u64);
 b53_build_op(write64, u64);
 
 struct b53_arl_entry {
-       u8 port;
+       u16 port;
        u8 mac[ETH_ALEN];
        u16 vid;
        u8 is_valid:1;
@@ -351,6 +351,12 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
                const unsigned char *addr, u16 vid);
 int b53_fdb_dump(struct dsa_switch *ds, int port,
                 dsa_fdb_dump_cb_t *cb, void *data);
+int b53_mdb_prepare(struct dsa_switch *ds, int port,
+                   const struct switchdev_obj_port_mdb *mdb);
+void b53_mdb_add(struct dsa_switch *ds, int port,
+                const struct switchdev_obj_port_mdb *mdb);
+int b53_mdb_del(struct dsa_switch *ds, int port,
+               const struct switchdev_obj_port_mdb *mdb);
 int b53_mirror_add(struct dsa_switch *ds, int port,
                   struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
index c068a3b..9add84c 100644 (file)
@@ -37,22 +37,11 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
        unsigned int i;
        u32 reg, offset;
 
-       if (priv->type == BCM7445_DEVICE_ID)
-               offset = CORE_STS_OVERRIDE_IMP;
-       else
-               offset = CORE_STS_OVERRIDE_IMP2;
-
        /* Enable the port memories */
        reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
        reg &= ~P_TXQ_PSM_VDD(port);
        core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
 
-       /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
-       reg = core_readl(priv, CORE_IMP_CTL);
-       reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
-       reg &= ~(RX_DIS | TX_DIS);
-       core_writel(priv, reg, CORE_IMP_CTL);
-
        /* Enable forwarding */
        core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
 
@@ -71,10 +60,27 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
 
        b53_brcm_hdr_setup(ds, port);
 
-       /* Force link status for IMP port */
-       reg = core_readl(priv, offset);
-       reg |= (MII_SW_OR | LINK_STS);
-       core_writel(priv, reg, offset);
+       if (port == 8) {
+               if (priv->type == BCM7445_DEVICE_ID)
+                       offset = CORE_STS_OVERRIDE_IMP;
+               else
+                       offset = CORE_STS_OVERRIDE_IMP2;
+
+               /* Force link status for IMP port */
+               reg = core_readl(priv, offset);
+               reg |= (MII_SW_OR | LINK_STS);
+               core_writel(priv, reg, offset);
+
+               /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+               reg = core_readl(priv, CORE_IMP_CTL);
+               reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
+               reg &= ~(RX_DIS | TX_DIS);
+               core_writel(priv, reg, CORE_IMP_CTL);
+       } else {
+               reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+               reg &= ~(RX_DIS | TX_DIS);
+               core_writel(priv, reg, CORE_G_PCTL_PORT(port));
+       }
 }
 
 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
@@ -968,6 +974,9 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
        .set_rxnfc              = bcm_sf2_set_rxnfc,
        .port_mirror_add        = b53_mirror_add,
        .port_mirror_del        = b53_mirror_del,
+       .port_mdb_prepare       = b53_mdb_prepare,
+       .port_mdb_add           = b53_mdb_add,
+       .port_mdb_del           = b53_mdb_del,
 };
 
 struct bcm_sf2_of_data {
index 5fdf6d6..66de492 100644 (file)
@@ -1143,6 +1143,7 @@ static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip)
 
 static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
 {
+       struct dsa_switch *ds = chip->ds;
        int target, port;
        int err;
 
@@ -1151,10 +1152,9 @@ static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
 
        /* Initialize the routing port to the 32 possible target devices */
        for (target = 0; target < 32; target++) {
-               port = 0x1f;
-               if (target < DSA_MAX_SWITCHES)
-                       if (chip->ds->rtable[target] != DSA_RTABLE_NONE)
-                               port = chip->ds->rtable[target];
+               port = dsa_routing_port(ds, target);
+               if (port == ds->num_ports)
+                       port = 0x1f;
 
                err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
                if (err)
@@ -1378,6 +1378,22 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
        return mv88e6xxx_g1_atu_flush(chip, *fid, true);
 }
 
+static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+       if (chip->info->ops->atu_get_hash)
+               return chip->info->ops->atu_get_hash(chip, hash);
+
+       return -EOPNOTSUPP;
+}
+
+static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+       if (chip->info->ops->atu_set_hash)
+               return chip->info->ops->atu_set_hash(chip, hash);
+
+       return -EOPNOTSUPP;
+}
+
 static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
                                        u16 vid_begin, u16 vid_end)
 {
@@ -2637,6 +2653,78 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
        return mv88e6xxx_software_reset(chip);
 }
 
+enum mv88e6xxx_devlink_param_id {
+       MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+       MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+};
+
+static int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+                                      struct devlink_param_gset_ctx *ctx)
+{
+       struct mv88e6xxx_chip *chip = ds->priv;
+       int err;
+
+       mv88e6xxx_reg_lock(chip);
+
+       switch (id) {
+       case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+               err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       mv88e6xxx_reg_unlock(chip);
+
+       return err;
+}
+
+static int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+                                      struct devlink_param_gset_ctx *ctx)
+{
+       struct mv88e6xxx_chip *chip = ds->priv;
+       int err;
+
+       mv88e6xxx_reg_lock(chip);
+
+       switch (id) {
+       case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+               err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       mv88e6xxx_reg_unlock(chip);
+
+       return err;
+}
+
+static const struct devlink_param mv88e6xxx_devlink_params[] = {
+       DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+                                "ATU_hash", DEVLINK_PARAM_TYPE_U8,
+                                BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+static int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
+{
+       return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
+                                          ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+static void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
+{
+       dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
+                                     ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+static void mv88e6xxx_teardown(struct dsa_switch *ds)
+{
+       mv88e6xxx_teardown_devlink_params(ds);
+}
+
 static int mv88e6xxx_setup(struct dsa_switch *ds)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
@@ -2753,7 +2841,11 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
 unlock:
        mv88e6xxx_reg_unlock(chip);
 
-       return err;
+       /* Has to be called without holding the register lock, since
+        * it takes the devlink lock, and we later take the locks in
+        * the reverse order when getting/setting parameters.
+        */
+       return mv88e6xxx_setup_devlink_params(ds);
 }
 
 static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
@@ -3113,6 +3205,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
        .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .phylink_validate = mv88e6185_phylink_validate,
@@ -3242,6 +3336,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
        .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .avb_ops = &mv88e6165_avb_ops,
@@ -3276,6 +3372,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
        .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .avb_ops = &mv88e6165_avb_ops,
@@ -3318,6 +3416,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
        .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .phylink_validate = mv88e6185_phylink_validate,
@@ -3362,6 +3462,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
        .rmu_disable = mv88e6352_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3405,6 +3507,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
        .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .phylink_validate = mv88e6185_phylink_validate,
@@ -3449,6 +3553,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
        .rmu_disable = mv88e6352_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3534,6 +3640,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
        .rmu_disable = mv88e6390_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
@@ -3583,6 +3691,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
        .rmu_disable = mv88e6390_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
@@ -3631,6 +3741,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
        .rmu_disable = mv88e6390_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
@@ -3682,6 +3794,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
        .rmu_disable = mv88e6352_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3773,6 +3887,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
        .rmu_disable = mv88e6390_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
@@ -3959,6 +4075,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
        .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .phylink_validate = mv88e6185_phylink_validate,
@@ -3999,6 +4117,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
        .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .avb_ops = &mv88e6352_avb_ops,
@@ -4045,6 +4165,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
        .rmu_disable = mv88e6352_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -4101,6 +4223,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
        .rmu_disable = mv88e6390_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
@@ -4154,6 +4278,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
        .rmu_disable = mv88e6390_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6390_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
@@ -4929,6 +5055,7 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
 static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
        .get_tag_protocol       = mv88e6xxx_get_tag_protocol,
        .setup                  = mv88e6xxx_setup,
+       .teardown               = mv88e6xxx_teardown,
        .phylink_validate       = mv88e6xxx_validate,
        .phylink_mac_link_state = mv88e6xxx_link_state,
        .phylink_mac_config     = mv88e6xxx_mac_config,
@@ -4971,6 +5098,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
        .port_txtstamp          = mv88e6xxx_port_txtstamp,
        .port_rxtstamp          = mv88e6xxx_port_rxtstamp,
        .get_ts_info            = mv88e6xxx_get_ts_info,
+       .devlink_param_get      = mv88e6xxx_devlink_param_get,
+       .devlink_param_set      = mv88e6xxx_devlink_param_set,
 };
 
 static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
index e9b1a1a..52f7726 100644 (file)
@@ -497,6 +497,10 @@ struct mv88e6xxx_ops {
        int (*serdes_get_stats)(struct mv88e6xxx_chip *chip,  int port,
                                uint64_t *data);
 
+       /* Address Translation Unit operations */
+       int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
+       int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
+
        /* VLAN Translation Unit operations */
        int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
                           struct mv88e6xxx_vtu_entry *entry);
index 0870fcc..40fc0e1 100644 (file)
 /* Offset 0x0A: ATU Control Register */
 #define MV88E6XXX_G1_ATU_CTL           0x0a
 #define MV88E6XXX_G1_ATU_CTL_LEARN2ALL 0x0008
+#define MV88E6161_G1_ATU_CTL_HASH_MASK 0x0003
 
 /* Offset 0x0B: ATU Operation Register */
 #define MV88E6XXX_G1_ATU_OP                            0x0b
@@ -318,6 +319,8 @@ int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
                            bool all);
 int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip);
 void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash);
+int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash);
 
 int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
                             struct mv88e6xxx_vtu_entry *entry);
index 792a96e..d8a03bb 100644 (file)
@@ -73,6 +73,38 @@ int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
        return 0;
 }
 
+int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+       int err;
+       u16 val;
+
+       err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+       if (err)
+               return err;
+
+       *hash = val & MV88E6161_G1_ATU_CTL_HASH_MASK;
+
+       return 0;
+}
+
+int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+       int err;
+       u16 val;
+
+       if (hash & ~MV88E6161_G1_ATU_CTL_HASH_MASK)
+               return -EINVAL;
+
+       err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+       if (err)
+               return err;
+
+       val &= ~MV88E6161_G1_ATU_CTL_HASH_MASK;
+       val |= hash;
+
+       return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
+}
+
 /* Offset 0x0B: ATU Operation Register */
 
 static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
index 7e742cd..36c6ed9 100644 (file)
@@ -1083,7 +1083,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
                return -ENOMEM;
 
        priv->ds->dev = &mdiodev->dev;
-       priv->ds->num_ports = DSA_MAX_PORTS;
+       priv->ds->num_ports = QCA8K_NUM_PORTS;
        priv->ds->priv = priv;
        priv->ops = qca8k_switch_ops;
        priv->ds->ops = &priv->ops;
index f40b248..ffac0ea 100644 (file)
@@ -26,8 +26,8 @@ config NET_DSA_SJA1105_PTP
 
 config NET_DSA_SJA1105_TAS
        bool "Support for the Time-Aware Scheduler on NXP SJA1105"
-       depends on NET_DSA_SJA1105
-       depends on NET_SCH_TAPRIO
+       depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
+       depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
        help
          This enables support for the TTEthernet-based egress scheduling
          engine in the SJA1105 DSA driver, which is controlled using a
index e8e9c16..4ded81b 100644 (file)
@@ -78,7 +78,6 @@ source "drivers/net/ethernet/freescale/Kconfig"
 source "drivers/net/ethernet/fujitsu/Kconfig"
 source "drivers/net/ethernet/google/Kconfig"
 source "drivers/net/ethernet/hisilicon/Kconfig"
-source "drivers/net/ethernet/hp/Kconfig"
 source "drivers/net/ethernet/huawei/Kconfig"
 source "drivers/net/ethernet/i825xx/Kconfig"
 source "drivers/net/ethernet/ibm/Kconfig"
index 05abebc..f8f38dc 100644 (file)
@@ -41,7 +41,6 @@ obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
 obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
 obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
 obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
-obj-$(CONFIG_NET_VENDOR_HP) += hp/
 obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
 obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
 obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
index bb6fbba..8175513 100644 (file)
@@ -533,7 +533,7 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
        struct skb_shared_hwtstamps hwtstamp;
 
        if (!skb) {
-               netdev_err(aq_nic->ndev, "have timestamp but tx_queus empty\n");
+               netdev_err(aq_nic->ndev, "have timestamp but tx_queues empty\n");
                return;
        }
 
@@ -678,6 +678,8 @@ static int aq_ptp_poll(struct napi_struct *napi, int budget)
 
                err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
                                                              &aq_ptp->hwts_rx);
+               if (err < 0)
+                       goto err_exit;
 
                was_cleaned = true;
        }
@@ -713,7 +715,7 @@ static int aq_ptp_poll(struct napi_struct *napi, int budget)
        if (work_done < budget) {
                napi_complete_done(napi, work_done);
                aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
-                                       1 << aq_ptp->ptp_ring_param.vec_idx);
+                                       BIT_ULL(aq_ptp->ptp_ring_param.vec_idx));
        }
 
 err_exit:
@@ -1375,7 +1377,7 @@ static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp)
        return 0;
 }
 
-void aq_ptp_poll_sync_work_cb(struct work_struct *w)
+static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
 {
        struct delayed_work *dw = to_delayed_work(w);
        struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync);
index bf503a4..2319064 100644 (file)
@@ -9,7 +9,6 @@
 #define AQ_PTP_H
 
 #include <linux/net_tstamp.h>
-#include <linux/version.h>
 
 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
 
index abee561..c7297ca 100644 (file)
@@ -1152,7 +1152,7 @@ static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
        return hw_atl_b0_adj_sys_clock(self, delta);
 }
 
-int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
+static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
 {
        *time = self->ptp_clk_offset + ts;
        return 0;
index 42d2e1b..664d664 100644 (file)
@@ -256,6 +256,9 @@ static int emac_rockchip_remove(struct platform_device *pdev)
        if (priv->regulator)
                regulator_disable(priv->regulator);
 
+       if (priv->soc_data->need_div_macclk)
+               clk_disable_unprepare(priv->macclk);
+
        free_netdev(ndev);
        return err;
 }
index b4a8cf6..c071724 100644 (file)
@@ -8762,6 +8762,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
        }
        if (resc_reinit || fw_reset) {
                if (fw_reset) {
+                       if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+                               bnxt_ulp_stop(bp);
                        rc = bnxt_fw_init_one(bp);
                        if (rc) {
                                set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -9224,13 +9226,16 @@ static int bnxt_open(struct net_device *dev)
        if (rc) {
                bnxt_hwrm_if_change(bp, false);
        } else {
-               if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) &&
-                   BNXT_PF(bp)) {
-                       struct bnxt_pf_info *pf = &bp->pf;
-                       int n = pf->active_vfs;
+               if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
+                       if (BNXT_PF(bp)) {
+                               struct bnxt_pf_info *pf = &bp->pf;
+                               int n = pf->active_vfs;
 
-                       if (n)
-                               bnxt_cfg_hw_sriov(bp, &n, true);
+                               if (n)
+                                       bnxt_cfg_hw_sriov(bp, &n, true);
+                       }
+                       if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+                               bnxt_ulp_start(bp, 0);
                }
                bnxt_hwmon_open(bp);
        }
@@ -9927,12 +9932,15 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
        if (netif_running(bp->dev)) {
                int rc;
 
-               if (!silent)
+               if (silent) {
+                       bnxt_close_nic(bp, false, false);
+                       bnxt_open_nic(bp, false, false);
+               } else {
                        bnxt_ulp_stop(bp);
-               bnxt_close_nic(bp, false, false);
-               rc = bnxt_open_nic(bp, false, false);
-               if (!silent && !rc)
-                       bnxt_ulp_start(bp);
+                       bnxt_close_nic(bp, true, false);
+                       rc = bnxt_open_nic(bp, true, false);
+                       bnxt_ulp_start(bp, rc);
+               }
        }
 }
 
@@ -10004,7 +10012,7 @@ static void bnxt_timer(struct timer_list *t)
 
        if (bp->link_info.phy_retry) {
                if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
-                       bp->link_info.phy_retry = 0;
+                       bp->link_info.phy_retry = false;
                        netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
                } else {
                        set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
@@ -10048,8 +10056,8 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
 
 static void bnxt_fw_reset_close(struct bnxt *bp)
 {
+       bnxt_ulp_stop(bp);
        __bnxt_close_nic(bp, true, false);
-       bnxt_ulp_irq_stop(bp);
        bnxt_clear_int_mode(bp);
        bnxt_hwrm_func_drv_unrgtr(bp);
        bnxt_free_ctx_mem(bp);
@@ -10382,7 +10390,8 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
 {
        bnxt_unmap_bars(bp, bp->pdev);
        pci_release_regions(bp->pdev);
-       pci_disable_device(bp->pdev);
+       if (pci_is_enabled(bp->pdev))
+               pci_disable_device(bp->pdev);
 }
 
 static void bnxt_init_dflt_coal(struct bnxt *bp)
@@ -10581,14 +10590,23 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
 static void bnxt_reset_all(struct bnxt *bp)
 {
        struct bnxt_fw_health *fw_health = bp->fw_health;
-       int i;
+       int i, rc;
+
+       if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
+#ifdef CONFIG_TEE_BNXT_FW
+               rc = tee_bnxt_fw_load();
+               if (rc)
+                       netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
+               bp->fw_reset_timestamp = jiffies;
+#endif
+               return;
+       }
 
        if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
                for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
                        bnxt_fw_reset_writel(bp, i);
        } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
                struct hwrm_fw_reset_input req = {0};
-               int rc;
 
                bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
                req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
@@ -10669,14 +10687,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
        }
        /* fall through */
-       case BNXT_FW_RESET_STATE_RESET_FW: {
-               u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
-
+       case BNXT_FW_RESET_STATE_RESET_FW:
                bnxt_reset_all(bp);
                bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
-               bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+               bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
                return;
-       }
        case BNXT_FW_RESET_STATE_ENABLE_DEV:
                if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
                    bp->fw_health) {
@@ -10722,13 +10737,13 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                        clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
                        dev_close(bp->dev);
                }
-               bnxt_ulp_irq_restart(bp, rc);
-               rtnl_unlock();
 
                bp->fw_reset_state = 0;
                /* Make sure fw_reset_state is 0 before clearing the flag */
                smp_mb__before_atomic();
                clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+               bnxt_ulp_start(bp, rc);
+               rtnl_unlock();
                break;
        }
        return;
@@ -10936,7 +10951,7 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
        }
 }
 
-static LIST_HEAD(bnxt_block_cb_list);
+LIST_HEAD(bnxt_block_cb_list);
 
 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
                         void *type_data)
@@ -11884,6 +11899,7 @@ static int bnxt_suspend(struct device *device)
        int rc = 0;
 
        rtnl_lock();
+       bnxt_ulp_stop(bp);
        if (netif_running(dev)) {
                netif_device_detach(dev);
                rc = bnxt_close(dev);
@@ -11917,6 +11933,7 @@ static int bnxt_resume(struct device *device)
        }
 
 resume_exit:
+       bnxt_ulp_start(bp, rc);
        rtnl_unlock();
        return rc;
 }
@@ -11996,10 +12013,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
                if (!err && netif_running(netdev))
                        err = bnxt_open(netdev);
 
-               if (!err) {
+               if (!err)
                        result = PCI_ERS_RESULT_RECOVERED;
-                       bnxt_ulp_start(bp);
-               }
+               bnxt_ulp_start(bp, err);
        }
 
        if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
index d333589..a3545c8 100644 (file)
 #include <net/dst_metadata.h>
 #include <net/xdp.h>
 #include <linux/dim.h>
+#ifdef CONFIG_TEE_BNXT_FW
+#include <linux/firmware/broadcom/tee_bnxt_fw.h>
+#endif
+
+extern struct list_head bnxt_block_cb_list;
 
 struct page_pool;
 
@@ -1241,6 +1246,14 @@ struct bnxt_tc_flow_stats {
        u64             bytes;
 };
 
+#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
+struct bnxt_flower_indr_block_cb_priv {
+       struct net_device *tunnel_netdev;
+       struct bnxt *bp;
+       struct list_head list;
+};
+#endif
+
 struct bnxt_tc_info {
        bool                            enabled;
 
@@ -1804,6 +1817,9 @@ struct bnxt {
 
        u8                      num_leds;
        struct bnxt_led_info    leds[BNXT_MAX_LED];
+       u16                     dump_flag;
+#define BNXT_DUMP_LIVE         0
+#define BNXT_DUMP_CRASH                1
 
        struct bpf_prog         *xdp_prog;
 
@@ -1815,6 +1831,8 @@ struct bnxt {
        u16                     *cfa_code_map; /* cfa_code -> vf_idx map */
        u8                      switch_id[8];
        struct bnxt_tc_info     *tc_info;
+       struct list_head        tc_indr_block_list;
+       struct notifier_block   tc_netdev_nb;
        struct dentry           *debugfs_pdev;
        struct device           *hwmon_dev;
 };
index ff1bc0e..ae4ddf3 100644 (file)
@@ -30,25 +30,20 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
        val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
        health_status = val & 0xffff;
 
-       if (health_status == BNXT_FW_STATUS_HEALTHY) {
-               rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
-                                                 "Healthy;");
-               if (rc)
-                       return rc;
-       } else if (health_status < BNXT_FW_STATUS_HEALTHY) {
-               rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
-                                                 "Not yet completed initialization;");
+       if (health_status < BNXT_FW_STATUS_HEALTHY) {
+               rc = devlink_fmsg_string_pair_put(fmsg, "Description",
+                                                 "Not yet completed initialization");
                if (rc)
                        return rc;
        } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
-               rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
-                                                 "Encountered fatal error and cannot recover;");
+               rc = devlink_fmsg_string_pair_put(fmsg, "Description",
+                                                 "Encountered fatal error and cannot recover");
                if (rc)
                        return rc;
        }
 
        if (val >> 16) {
-               rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16);
+               rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
                if (rc)
                        return rc;
        }
@@ -218,25 +213,68 @@ enum bnxt_dl_param_id {
 
 static const struct bnxt_dl_nvm_param nvm_params[] = {
        {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
-        BNXT_NVM_SHARED_CFG, 1},
+        BNXT_NVM_SHARED_CFG, 1, 1},
        {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
-        BNXT_NVM_SHARED_CFG, 1},
+        BNXT_NVM_SHARED_CFG, 1, 1},
        {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
-        NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10},
+        NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
        {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
-        NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7},
+        NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
        {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
-        BNXT_NVM_SHARED_CFG, 1},
+        BNXT_NVM_SHARED_CFG, 1, 1},
 };
 
+union bnxt_nvm_data {
+       u8      val8;
+       __le32  val32;
+};
+
+static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst,
+                                 union devlink_param_value *src,
+                                 int nvm_num_bits, int dl_num_bytes)
+{
+       u32 val32 = 0;
+
+       if (nvm_num_bits == 1) {
+               dst->val8 = src->vbool;
+               return;
+       }
+       if (dl_num_bytes == 4)
+               val32 = src->vu32;
+       else if (dl_num_bytes == 2)
+               val32 = (u32)src->vu16;
+       else if (dl_num_bytes == 1)
+               val32 = (u32)src->vu8;
+       dst->val32 = cpu_to_le32(val32);
+}
+
+static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
+                                   union bnxt_nvm_data *src,
+                                   int nvm_num_bits, int dl_num_bytes)
+{
+       u32 val32;
+
+       if (nvm_num_bits == 1) {
+               dst->vbool = src->val8;
+               return;
+       }
+       val32 = le32_to_cpu(src->val32);
+       if (dl_num_bytes == 4)
+               dst->vu32 = val32;
+       else if (dl_num_bytes == 2)
+               dst->vu16 = (u16)val32;
+       else if (dl_num_bytes == 1)
+               dst->vu8 = (u8)val32;
+}
+
 static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
                             int msg_len, union devlink_param_value *val)
 {
        struct hwrm_nvm_get_variable_input *req = msg;
-       void *data_addr = NULL, *buf = NULL;
        struct bnxt_dl_nvm_param nvm_param;
-       int bytesize, idx = 0, rc, i;
+       union bnxt_nvm_data *data;
        dma_addr_t data_dma_addr;
+       int idx = 0, rc, i;
 
        /* Get/Set NVM CFG parameter is supported only on PFs */
        if (BNXT_VF(bp))
@@ -257,47 +295,31 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
        else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
                idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
 
-       bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
-       switch (bytesize) {
-       case 1:
-               if (nvm_param.num_bits == 1)
-                       buf = &val->vbool;
-               else
-                       buf = &val->vu8;
-               break;
-       case 2:
-               buf = &val->vu16;
-               break;
-       case 4:
-               buf = &val->vu32;
-               break;
-       default:
-               return -EFAULT;
-       }
-
-       data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
-                                      &data_dma_addr, GFP_KERNEL);
-       if (!data_addr)
+       data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
+                                 &data_dma_addr, GFP_KERNEL);
+       if (!data)
                return -ENOMEM;
 
        req->dest_data_addr = cpu_to_le64(data_dma_addr);
-       req->data_len = cpu_to_le16(nvm_param.num_bits);
+       req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
        req->option_num = cpu_to_le16(nvm_param.offset);
        req->index_0 = cpu_to_le16(idx);
        if (idx)
                req->dimensions = cpu_to_le16(1);
 
        if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
-               memcpy(data_addr, buf, bytesize);
+               bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
+                                     nvm_param.dl_num_bytes);
                rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
        } else {
                rc = hwrm_send_message_silent(bp, msg, msg_len,
                                              HWRM_CMD_TIMEOUT);
+               if (!rc)
+                       bnxt_copy_from_nvm_data(val, data,
+                                               nvm_param.nvm_num_bits,
+                                               nvm_param.dl_num_bytes);
        }
-       if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
-               memcpy(buf, data_addr, bytesize);
-
-       dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+       dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
        if (rc == -EACCES)
                netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
        return rc;
index b97e0ba..2f4fd0a 100644 (file)
@@ -52,7 +52,8 @@ struct bnxt_dl_nvm_param {
        u16 id;
        u16 offset;
        u16 dir_type;
-       u16 num_bits;
+       u16 nvm_num_bits;
+       u8 dl_num_bytes;
 };
 
 void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
index 51c1404..f2220b8 100644 (file)
@@ -3311,6 +3311,24 @@ err:
        return rc;
 }
 
+static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (dump->flag > BNXT_DUMP_CRASH) {
+               netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
+               return -EINVAL;
+       }
+
+       if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
+               netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
+               return -EOPNOTSUPP;
+       }
+
+       bp->dump_flag = dump->flag;
+       return 0;
+}
+
 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
 {
        struct bnxt *bp = netdev_priv(dev);
@@ -3323,7 +3341,12 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
                        bp->ver_resp.hwrm_fw_bld_8b << 8 |
                        bp->ver_resp.hwrm_fw_rsvd_8b;
 
-       return bnxt_get_coredump(bp, NULL, &dump->len);
+       dump->flag = bp->dump_flag;
+       if (bp->dump_flag == BNXT_DUMP_CRASH)
+               dump->len = BNXT_CRASH_DUMP_LEN;
+       else
+               bnxt_get_coredump(bp, NULL, &dump->len);
+       return 0;
 }
 
 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
@@ -3336,7 +3359,16 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
 
        memset(buf, 0, dump->len);
 
-       return bnxt_get_coredump(bp, buf, &dump->len);
+       dump->flag = bp->dump_flag;
+       if (dump->flag == BNXT_DUMP_CRASH) {
+#ifdef CONFIG_TEE_BNXT_FW
+               return tee_bnxt_copy_coredump(buf, 0, dump->len);
+#endif
+       } else {
+               return bnxt_get_coredump(bp, buf, &dump->len);
+       }
+
+       return 0;
 }
 
 void bnxt_ethtool_init(struct bnxt *bp)
@@ -3446,6 +3478,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
        .set_phys_id            = bnxt_set_phys_id,
        .self_test              = bnxt_self_test,
        .reset                  = bnxt_reset,
+       .set_dump               = bnxt_set_dump,
        .get_dump_flag          = bnxt_get_dump_flag,
        .get_dump_data          = bnxt_get_dump_data,
 };
index b5b65b3..01de7e7 100644 (file)
@@ -59,6 +59,8 @@ struct hwrm_dbg_cmn_output {
        #define HWRM_DBG_CMN_FLAGS_MORE 1
 };
 
+#define BNXT_CRASH_DUMP_LEN    (8 << 20)
+
 #define BNXT_LED_DFLT_ENA                              \
        (PORT_LED_CFG_REQ_ENABLES_LED0_ID |             \
         PORT_LED_CFG_REQ_ENABLES_LED0_STATE |          \
index c8062d0..174412a 100644 (file)
@@ -16,7 +16,9 @@
 #include <net/tc_act/tc_skbedit.h>
 #include <net/tc_act/tc_mirred.h>
 #include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_pedit.h>
 #include <net/tc_act/tc_tunnel_key.h>
+#include <net/vxlan.h>
 
 #include "bnxt_hsi.h"
 #include "bnxt.h"
@@ -36,6 +38,8 @@
 #define is_vid_exactmatch(vlan_tci_mask)       \
        ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
 
+static bool is_wildcard(void *mask, int len);
+static bool is_exactmatch(void *mask, int len);
 /* Return the dst fid of the func for flow forwarding
  * For PFs: src_fid is the fid of the PF
  * For VF-reps: src_fid the fid of the VF
@@ -111,10 +115,182 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
        return 0;
 }
 
+/* Key & Mask from the stack comes unaligned in multiple iterations of 4 bytes
+ * each(u32).
+ * This routine consolidates such multiple unaligned values into one
+ * field each for Key & Mask (for src and dst macs separately)
+ * For example,
+ *                     Mask/Key        Offset  Iteration
+ *                     ==========      ======  =========
+ *     dst mac         0xffffffff      0       1
+ *     dst mac         0x0000ffff      4       2
+ *
+ *     src mac         0xffff0000      4       1
+ *     src mac         0xffffffff      8       2
+ *
+ * The above combination coming from the stack will be consolidated as
+ *                     Mask/Key
+ *                     ==============
+ *     src mac:        0xffffffffffff
+ *     dst mac:        0xffffffffffff
+ */
+static void bnxt_set_l2_key_mask(u32 part_key, u32 part_mask,
+                                u8 *actual_key, u8 *actual_mask)
+{
+       u32 key = get_unaligned((u32 *)actual_key);
+       u32 mask = get_unaligned((u32 *)actual_mask);
+
+       part_key &= part_mask;
+       part_key |= key & ~part_mask;
+
+       put_unaligned(mask | part_mask, (u32 *)actual_mask);
+       put_unaligned(part_key, (u32 *)actual_key);
+}
+
+static int
+bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions,
+                           u16 *eth_addr, u16 *eth_addr_mask)
+{
+       u16 *p;
+       int j;
+
+       if (unlikely(bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask)))
+               return -EINVAL;
+
+       if (!is_wildcard(&eth_addr_mask[0], ETH_ALEN)) {
+               if (!is_exactmatch(&eth_addr_mask[0], ETH_ALEN))
+                       return -EINVAL;
+               /* FW expects dmac to be in u16 array format */
+               p = eth_addr;
+               for (j = 0; j < 3; j++)
+                       actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j));
+       }
+
+       if (!is_wildcard(&eth_addr_mask[ETH_ALEN], ETH_ALEN)) {
+               if (!is_exactmatch(&eth_addr_mask[ETH_ALEN], ETH_ALEN))
+                       return -EINVAL;
+               /* FW expects smac to be in u16 array format */
+               p = &eth_addr[ETH_ALEN / 2];
+               for (j = 0; j < 3; j++)
+                       actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j));
+       }
+
+       return 0;
+}
+
+static int
+bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
+                   struct flow_action_entry *act, int act_idx, u8 *eth_addr,
+                   u8 *eth_addr_mask)
+{
+       size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr);
+       size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr);
+       u32 mask, val, offset, idx;
+       u8 htype;
+
+       offset = act->mangle.offset;
+       htype = act->mangle.htype;
+       mask = ~act->mangle.mask;
+       val = act->mangle.val;
+
+       switch (htype) {
+       case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+               if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) {
+                       netdev_err(bp->dev,
+                                  "%s: eth_hdr: Invalid pedit field\n",
+                                  __func__);
+                       return -EINVAL;
+               }
+               actions->flags |= BNXT_TC_ACTION_FLAG_L2_REWRITE;
+
+               bnxt_set_l2_key_mask(val, mask, &eth_addr[offset],
+                                    &eth_addr_mask[offset]);
+               break;
+       case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+               actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
+               actions->nat.l3_is_ipv4 = true;
+               if (offset ==  offsetof(struct iphdr, saddr)) {
+                       actions->nat.src_xlate = true;
+                       actions->nat.l3.ipv4.saddr.s_addr = htonl(val);
+               } else if (offset ==  offsetof(struct iphdr, daddr)) {
+                       actions->nat.src_xlate = false;
+                       actions->nat.l3.ipv4.daddr.s_addr = htonl(val);
+               } else {
+                       netdev_err(bp->dev,
+                                  "%s: IPv4_hdr: Invalid pedit field\n",
+                                  __func__);
+                       return -EINVAL;
+               }
+
+               netdev_dbg(bp->dev, "nat.src_xlate = %d src IP: %pI4 dst ip : %pI4\n",
+                          actions->nat.src_xlate, &actions->nat.l3.ipv4.saddr,
+                          &actions->nat.l3.ipv4.daddr);
+               break;
+
+       case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+               actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
+               actions->nat.l3_is_ipv4 = false;
+               if (offset >= offsetof(struct ipv6hdr, saddr) &&
+                   offset < offset_of_ip6_daddr) {
+                       /* 16 byte IPv6 address comes in 4 iterations of
+                        * 4byte chunks each
+                        */
+                       actions->nat.src_xlate = true;
+                       idx = (offset - offset_of_ip6_saddr) / 4;
+                       /* First 4bytes will be copied to idx 0 and so on */
+                       actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+               } else if (offset >= offset_of_ip6_daddr &&
+                          offset < offset_of_ip6_daddr + 16) {
+                       actions->nat.src_xlate = false;
+                       idx = (offset - offset_of_ip6_daddr) / 4;
+                       actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+               } else {
+                       netdev_err(bp->dev,
+                                  "%s: IPv6_hdr: Invalid pedit field\n",
+                                  __func__);
+                       return -EINVAL;
+               }
+               break;
+       case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+       case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+               /* HW does not support L4 rewrite alone without L3
+                * rewrite
+                */
+               if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) {
+                       netdev_err(bp->dev,
+                                  "Need to specify L3 rewrite as well\n");
+                       return -EINVAL;
+               }
+               if (actions->nat.src_xlate)
+                       actions->nat.l4.ports.sport = htons(val);
+               else
+                       actions->nat.l4.ports.dport = htons(val);
+               netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n",
+                          actions->nat.l4.ports.sport,
+                          actions->nat.l4.ports.dport);
+               break;
+       default:
+               netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n",
+                          __func__);
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static int bnxt_tc_parse_actions(struct bnxt *bp,
                                 struct bnxt_tc_actions *actions,
                                 struct flow_action *flow_action)
 {
+       /* Used to store the L2 rewrite mask for dmac (6 bytes) followed by
+        * smac (6 bytes) if rewrite of both is specified, otherwise either
+        * dmac or smac
+        */
+       u16 eth_addr_mask[ETH_ALEN] = { 0 };
+       /* Used to store the L2 rewrite key for dmac (6 bytes) followed by
+        * smac (6 bytes) if rewrite of both is specified, otherwise either
+        * dmac or smac
+        */
+       u16 eth_addr[ETH_ALEN] = { 0 };
        struct flow_action_entry *act;
        int i, rc;
 
@@ -148,11 +324,26 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
                case FLOW_ACTION_TUNNEL_DECAP:
                        actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
                        break;
+               /* Packet edit: L2 rewrite, NAT, NAPT */
+               case FLOW_ACTION_MANGLE:
+                       rc = bnxt_tc_parse_pedit(bp, actions, act, i,
+                                                (u8 *)eth_addr,
+                                                (u8 *)eth_addr_mask);
+                       if (rc)
+                               return rc;
+                       break;
                default:
                        break;
                }
        }
 
+       if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
+               rc = bnxt_fill_l2_rewrite_fields(actions, eth_addr,
+                                                eth_addr_mask);
+               if (rc)
+                       return rc;
+       }
+
        if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
                if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
                        /* dst_fid is PF's fid */
@@ -401,6 +592,76 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
        req.src_fid = cpu_to_le16(flow->src_fid);
        req.ref_flow_handle = ref_flow_handle;
 
+       if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
+               memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac,
+                      ETH_ALEN);
+               memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac,
+                      ETH_ALEN);
+               action_flags |=
+                       CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
+       }
+
+       if (actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE) {
+               if (actions->nat.l3_is_ipv4) {
+                       action_flags |=
+                               CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS;
+
+                       if (actions->nat.src_xlate) {
+                               action_flags |=
+                                       CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
+                               /* L3 source rewrite */
+                               req.nat_ip_address[0] =
+                                       actions->nat.l3.ipv4.saddr.s_addr;
+                               /* L4 source port */
+                               if (actions->nat.l4.ports.sport)
+                                       req.nat_port =
+                                               actions->nat.l4.ports.sport;
+                       } else {
+                               action_flags |=
+                                       CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
+                               /* L3 destination rewrite */
+                               req.nat_ip_address[0] =
+                                       actions->nat.l3.ipv4.daddr.s_addr;
+                               /* L4 destination port */
+                               if (actions->nat.l4.ports.dport)
+                                       req.nat_port =
+                                               actions->nat.l4.ports.dport;
+                       }
+                       netdev_dbg(bp->dev,
+                                  "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n",
+                                  req.nat_ip_address, actions->nat.src_xlate,
+                                  req.nat_port);
+               } else {
+                       if (actions->nat.src_xlate) {
+                               action_flags |=
+                                       CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
+                               /* L3 source rewrite */
+                               memcpy(req.nat_ip_address,
+                                      actions->nat.l3.ipv6.saddr.s6_addr32,
+                                      sizeof(req.nat_ip_address));
+                               /* L4 source port */
+                               if (actions->nat.l4.ports.sport)
+                                       req.nat_port =
+                                               actions->nat.l4.ports.sport;
+                       } else {
+                               action_flags |=
+                                       CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
+                               /* L3 destination rewrite */
+                               memcpy(req.nat_ip_address,
+                                      actions->nat.l3.ipv6.daddr.s6_addr32,
+                                      sizeof(req.nat_ip_address));
+                               /* L4 destination port */
+                               if (actions->nat.l4.ports.dport)
+                                       req.nat_port =
+                                               actions->nat.l4.ports.dport;
+                       }
+                       netdev_dbg(bp->dev,
+                                  "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n",
+                                  req.nat_ip_address, actions->nat.src_xlate,
+                                  req.nat_port);
+               }
+       }
+
        if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
            actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
                req.tunnel_handle = tunnel_handle;
@@ -1274,7 +1535,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
 
        if (!bnxt_tc_can_offload(bp, flow)) {
                rc = -EOPNOTSUPP;
-               goto free_node;
+               kfree_rcu(new_node, rcu);
+               return rc;
        }
 
        /* If a flow exists with the same cookie, delete it */
@@ -1580,6 +1842,147 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
        }
 }
 
+static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
+                                      void *type_data, void *cb_priv)
+{
+       struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
+       struct flow_cls_offload *flower = type_data;
+       struct bnxt *bp = priv->bp;
+
+       if (flower->common.chain_index)
+               return -EOPNOTSUPP;
+
+       switch (type) {
+       case TC_SETUP_CLSFLOWER:
+               return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static struct bnxt_flower_indr_block_cb_priv *
+bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
+{
+       struct bnxt_flower_indr_block_cb_priv *cb_priv;
+
+       /* All callback list access should be protected by RTNL. */
+       ASSERT_RTNL();
+
+       list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
+               if (cb_priv->tunnel_netdev == netdev)
+                       return cb_priv;
+
+       return NULL;
+}
+
+static void bnxt_tc_setup_indr_rel(void *cb_priv)
+{
+       struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
+
+       list_del(&priv->list);
+       kfree(priv);
+}
+
+static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
+                                   struct flow_block_offload *f)
+{
+       struct bnxt_flower_indr_block_cb_priv *cb_priv;
+       struct flow_block_cb *block_cb;
+
+       if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+               return -EOPNOTSUPP;
+
+       switch (f->command) {
+       case FLOW_BLOCK_BIND:
+               cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+               if (!cb_priv)
+                       return -ENOMEM;
+
+               cb_priv->tunnel_netdev = netdev;
+               cb_priv->bp = bp;
+               list_add(&cb_priv->list, &bp->tc_indr_block_list);
+
+               block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
+                                              cb_priv, cb_priv,
+                                              bnxt_tc_setup_indr_rel);
+               if (IS_ERR(block_cb)) {
+                       list_del(&cb_priv->list);
+                       kfree(cb_priv);
+                       return PTR_ERR(block_cb);
+               }
+
+               flow_block_cb_add(block_cb, f);
+               list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list);
+               break;
+       case FLOW_BLOCK_UNBIND:
+               cb_priv = bnxt_tc_indr_block_cb_lookup(bp, netdev);
+               if (!cb_priv)
+                       return -ENOENT;
+
+               block_cb = flow_block_cb_lookup(f->block,
+                                               bnxt_tc_setup_indr_block_cb,
+                                               cb_priv);
+               if (!block_cb)
+                       return -ENOENT;
+
+               flow_block_cb_remove(block_cb, f);
+               list_del(&block_cb->driver_list);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
+}
+
+static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
+                                enum tc_setup_type type, void *type_data)
+{
+       switch (type) {
+       case TC_SETUP_BLOCK:
+               return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
+{
+       return netif_is_vxlan(netdev);
+}
+
+static int bnxt_tc_indr_block_event(struct notifier_block *nb,
+                                   unsigned long event, void *ptr)
+{
+       struct net_device *netdev;
+       struct bnxt *bp;
+       int rc;
+
+       netdev = netdev_notifier_info_to_dev(ptr);
+       if (!bnxt_is_netdev_indr_offload(netdev))
+               return NOTIFY_OK;
+
+       bp = container_of(nb, struct bnxt, tc_netdev_nb);
+
+       switch (event) {
+       case NETDEV_REGISTER:
+               rc = __flow_indr_block_cb_register(netdev, bp,
+                                                  bnxt_tc_setup_indr_cb,
+                                                  bp);
+               if (rc)
+                       netdev_info(bp->dev,
+                                   "Failed to register indirect blk: dev: %s",
+                                   netdev->name);
+               break;
+       case NETDEV_UNREGISTER:
+               __flow_indr_block_cb_unregister(netdev,
+                                               bnxt_tc_setup_indr_cb,
+                                               bp);
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
        .head_offset = offsetof(struct bnxt_tc_flow_node, node),
        .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
@@ -1663,7 +2066,15 @@ int bnxt_init_tc(struct bnxt *bp)
        bp->dev->hw_features |= NETIF_F_HW_TC;
        bp->dev->features |= NETIF_F_HW_TC;
        bp->tc_info = tc_info;
-       return 0;
+
+       /* init indirect block notifications */
+       INIT_LIST_HEAD(&bp->tc_indr_block_list);
+       bp->tc_netdev_nb.notifier_call = bnxt_tc_indr_block_event;
+       rc = register_netdevice_notifier(&bp->tc_netdev_nb);
+       if (!rc)
+               return 0;
+
+       rhashtable_destroy(&tc_info->encap_table);
 
 destroy_decap_table:
        rhashtable_destroy(&tc_info->decap_table);
@@ -1685,6 +2096,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)
        if (!bnxt_tc_flower_enabled(bp))
                return;
 
+       unregister_netdevice_notifier(&bp->tc_netdev_nb);
        rhashtable_destroy(&tc_info->flow_table);
        rhashtable_destroy(&tc_info->l2_table);
        rhashtable_destroy(&tc_info->decap_l2_table);
index 4f05305..2867549 100644 (file)
@@ -62,6 +62,12 @@ struct bnxt_tc_tunnel_key {
        __be32                  id;
 };
 
+#define bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask)                \
+       ((is_wildcard(&(eth_addr)[0], ETH_ALEN) &&                      \
+        is_wildcard(&(eth_addr)[ETH_ALEN], ETH_ALEN)) ||               \
+       (is_wildcard(&(eth_addr_mask)[0], ETH_ALEN) &&                  \
+        is_wildcard(&(eth_addr_mask)[ETH_ALEN], ETH_ALEN)))
+
 struct bnxt_tc_actions {
        u32                             flags;
 #define BNXT_TC_ACTION_FLAG_FWD                        BIT(0)
@@ -71,6 +77,8 @@ struct bnxt_tc_actions {
 #define BNXT_TC_ACTION_FLAG_DROP               BIT(5)
 #define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP       BIT(6)
 #define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP       BIT(7)
+#define BNXT_TC_ACTION_FLAG_L2_REWRITE         BIT(8)
+#define BNXT_TC_ACTION_FLAG_NAT_XLATE          BIT(9)
 
        u16                             dst_fid;
        struct net_device               *dst_dev;
@@ -79,6 +87,18 @@ struct bnxt_tc_actions {
 
        /* tunnel encap */
        struct ip_tunnel_key            tun_encap_key;
+#define        PEDIT_OFFSET_SMAC_LAST_4_BYTES          0x8
+       __be16                          l2_rewrite_dmac[3];
+       __be16                          l2_rewrite_smac[3];
+       struct {
+               bool src_xlate;  /* true => translate src,
+                                 * false => translate dst
+                                 * Mutually exclusive, i.e cannot set both
+                                 */
+               bool l3_is_ipv4; /* false means L3 is ipv6 */
+               struct bnxt_tc_l3_key l3;
+               struct bnxt_tc_l4_key l4;
+       } nat;
 };
 
 struct bnxt_tc_flow {
index b2c1609..077fd10 100644 (file)
@@ -182,7 +182,7 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
 
        edev->ulp_tbl[ulp_id].msix_requested = 0;
        edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
-       if (netif_running(dev)) {
+       if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
                bnxt_close_nic(bp, true, false);
                bnxt_open_nic(bp, true, false);
        }
@@ -266,6 +266,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
        if (!edev)
                return;
 
+       edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
        for (i = 0; i < BNXT_MAX_ULP; i++) {
                struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
 
@@ -276,7 +277,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
        }
 }
 
-void bnxt_ulp_start(struct bnxt *bp)
+void bnxt_ulp_start(struct bnxt *bp, int err)
 {
        struct bnxt_en_dev *edev = bp->edev;
        struct bnxt_ulp_ops *ops;
@@ -285,6 +286,11 @@ void bnxt_ulp_start(struct bnxt *bp)
        if (!edev)
                return;
 
+       edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+
+       if (err)
+               return;
+
        for (i = 0; i < BNXT_MAX_ULP; i++) {
                struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
 
index cd78453..9895406 100644 (file)
@@ -64,6 +64,7 @@ struct bnxt_en_dev {
        #define BNXT_EN_FLAG_ROCE_CAP           (BNXT_EN_FLAG_ROCEV1_CAP | \
                                                 BNXT_EN_FLAG_ROCEV2_CAP)
        #define BNXT_EN_FLAG_MSIX_REQUESTED     0x4
+       #define BNXT_EN_FLAG_ULP_STOPPED        0x8
        const struct bnxt_en_ops        *en_ops;
        struct bnxt_ulp                 ulp_tbl[BNXT_MAX_ULP];
 };
@@ -92,7 +93,7 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp);
 int bnxt_get_ulp_msix_base(struct bnxt *bp);
 int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
 void bnxt_ulp_stop(struct bnxt *bp);
-void bnxt_ulp_start(struct bnxt *bp);
+void bnxt_ulp_start(struct bnxt *bp, int err);
 void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
 void bnxt_ulp_shutdown(struct bnxt *bp);
 void bnxt_ulp_irq_stop(struct bnxt *bp);
index acb0168..1e09fdb 100644 (file)
@@ -1007,14 +1007,14 @@ static void bgx_poll_for_link(struct work_struct *work)
 
        if ((spu_link & SPU_STATUS1_RCV_LNK) &&
            !(smu_link & SMU_RX_CTL_STATUS)) {
-               lmac->link_up = 1;
+               lmac->link_up = true;
                if (lmac->lmac_type == BGX_MODE_XLAUI)
                        lmac->last_speed = SPEED_40000;
                else
                        lmac->last_speed = SPEED_10000;
                lmac->last_duplex = DUPLEX_FULL;
        } else {
-               lmac->link_up = 0;
+               lmac->link_up = false;
                lmac->last_speed = SPEED_UNKNOWN;
                lmac->last_duplex = DUPLEX_UNKNOWN;
        }
@@ -1023,7 +1023,7 @@ static void bgx_poll_for_link(struct work_struct *work)
                if (lmac->link_up) {
                        if (bgx_xaui_check_link(lmac)) {
                                /* Errors, clear link_up state */
-                               lmac->link_up = 0;
+                               lmac->link_up = false;
                                lmac->last_speed = SPEED_UNKNOWN;
                                lmac->last_duplex = DUPLEX_UNKNOWN;
                        }
@@ -1055,11 +1055,11 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
        if ((lmac->lmac_type == BGX_MODE_SGMII) ||
            (lmac->lmac_type == BGX_MODE_QSGMII) ||
            (lmac->lmac_type == BGX_MODE_RGMII)) {
-               lmac->is_sgmii = 1;
+               lmac->is_sgmii = true;
                if (bgx_lmac_sgmii_init(bgx, lmac))
                        return -1;
        } else {
-               lmac->is_sgmii = 0;
+               lmac->is_sgmii = false;
                if (bgx_lmac_xaui_init(bgx, lmac))
                        return -1;
        }
@@ -1304,7 +1304,7 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
 {
        if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
            (lmac->lmac_type != BGX_MODE_40G_KR)) {
-               lmac->use_training = 0;
+               lmac->use_training = false;
                return;
        }
 
index a4dead4..86b528d 100644 (file)
@@ -695,10 +695,10 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
        lld->write_cmpl_support = adap->params.write_cmpl_support;
 }
 
-static void uld_attach(struct adapter *adap, unsigned int uld)
+static int uld_attach(struct adapter *adap, unsigned int uld)
 {
-       void *handle;
        struct cxgb4_lld_info lli;
+       void *handle;
 
        uld_init(adap, &lli);
        uld_queue_init(adap, uld, &lli);
@@ -708,7 +708,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
                dev_warn(adap->pdev_dev,
                         "could not attach to the %s driver, error %ld\n",
                         adap->uld[uld].name, PTR_ERR(handle));
-               return;
+               return PTR_ERR(handle);
        }
 
        adap->uld[uld].handle = handle;
@@ -716,22 +716,22 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
 
        if (adap->flags & CXGB4_FULL_INIT_DONE)
                adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
+
+       return 0;
 }
 
-/**
- *     cxgb4_register_uld - register an upper-layer driver
- *     @type: the ULD type
- *     @p: the ULD methods
+/* cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
  *
- *     Registers an upper-layer driver with this driver and notifies the ULD
- *     about any presently available devices that support its type.  Returns
- *     %-EBUSY if a ULD of the same type is already registered.
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type.
  */
 void cxgb4_register_uld(enum cxgb4_uld type,
                        const struct cxgb4_uld_info *p)
 {
-       int ret = 0;
        struct adapter *adap;
+       int ret = 0;
 
        if (type >= CXGB4_ULD_MAX)
                return;
@@ -763,8 +763,12 @@ void cxgb4_register_uld(enum cxgb4_uld type,
                if (ret)
                        goto free_irq;
                adap->uld[type] = *p;
-               uld_attach(adap, type);
+               ret = uld_attach(adap, type);
+               if (ret)
+                       goto free_txq;
                continue;
+free_txq:
+               release_sge_txq_uld(adap, type);
 free_irq:
                if (adap->flags & CXGB4_FULL_INIT_DONE)
                        quiesce_rx_uld(adap, type);
index 1a407d3..e9e4500 100644 (file)
@@ -351,15 +351,13 @@ exists:
 static void _t4_l2e_free(struct l2t_entry *e)
 {
        struct l2t_data *d;
-       struct sk_buff *skb;
 
        if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
                if (e->neigh) {
                        neigh_release(e->neigh);
                        e->neigh = NULL;
                }
-               while ((skb = __skb_dequeue(&e->arpq)) != NULL)
-                       kfree_skb(skb);
+               __skb_queue_purge(&e->arpq);
        }
 
        d = container_of(e, struct l2t_data, l2tab[e->idx]);
@@ -370,7 +368,6 @@ static void _t4_l2e_free(struct l2t_entry *e)
 static void t4_l2e_free(struct l2t_entry *e)
 {
        struct l2t_data *d;
-       struct sk_buff *skb;
 
        spin_lock_bh(&e->lock);
        if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
@@ -378,8 +375,7 @@ static void t4_l2e_free(struct l2t_entry *e)
                        neigh_release(e->neigh);
                        e->neigh = NULL;
                }
-               while ((skb = __skb_dequeue(&e->arpq)) != NULL)
-                       kfree_skb(skb);
+               __skb_queue_purge(&e->arpq);
        }
        spin_unlock_bh(&e->lock);
 
index b3da81e..928bfea 100644 (file)
@@ -3791,15 +3791,11 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
         * write the CIDX Updates into the Status Page at the end of the
         * TX Queue.
         */
-       c.autoequiqe_to_viid = htonl((dbqt
-                                     ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
-                                     : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
+       c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
                                     FW_EQ_ETH_CMD_VIID_V(pi->viid));
 
        c.fetchszm_to_iqid =
-               htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
-                                                ? HOSTFCMODE_INGRESS_QUEUE_X
-                                                : HOSTFCMODE_STATUS_PAGE_X) |
+               htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
                      FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
                      FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
 
index 0b12f89..9fdf77d 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Register definitions for Gemini GMAC Ethernet device driver
  *
  * Copyright (C) 2006 Storlink, Corp.
index 8243102..da0c506 100644 (file)
@@ -730,6 +730,18 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
         */
        nfrags = skb_shinfo(skb)->nr_frags;
 
+       /* Setup HW checksumming */
+       csum_vlan = 0;
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           !ftgmac100_prep_tx_csum(skb, &csum_vlan))
+               goto drop;
+
+       /* Add VLAN tag */
+       if (skb_vlan_tag_present(skb)) {
+               csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
+               csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
+       }
+
        /* Get header len */
        len = skb_headlen(skb);
 
@@ -756,19 +768,6 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
        if (nfrags == 0)
                f_ctl_stat |= FTGMAC100_TXDES0_LTS;
        txdes->txdes3 = cpu_to_le32(map);
-
-       /* Setup HW checksumming */
-       csum_vlan = 0;
-       if (skb->ip_summed == CHECKSUM_PARTIAL &&
-           !ftgmac100_prep_tx_csum(skb, &csum_vlan))
-               goto drop;
-
-       /* Add VLAN tag */
-       if (skb_vlan_tag_present(skb)) {
-               csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
-               csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
-       }
-
        txdes->txdes1 = cpu_to_le32(csum_vlan);
 
        /* Next descriptor */
index d321454..6a9d12d 100644 (file)
@@ -178,31 +178,9 @@ struct fm_port_fqs {
 /* All the dpa bps in use at any moment */
 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
 
-/* The raw buffer size must be cacheline aligned */
 #define DPAA_BP_RAW_SIZE 4096
-/* When using more than one buffer pool, the raw sizes are as follows:
- * 1 bp: 4KB
- * 2 bp: 2KB, 4KB
- * 3 bp: 1KB, 2KB, 4KB
- * 4 bp: 1KB, 2KB, 4KB, 8KB
- */
-static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
-{
-       size_t res = DPAA_BP_RAW_SIZE / 4;
-       u8 i;
-
-       for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
-               res *= 2;
-       return res;
-}
 
-/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
- * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
- * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
- * half-page-aligned buffers, so we reserve some more space for start-of-buffer
- * alignment.
- */
-#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
 
 static int dpaa_max_frm;
 
@@ -288,7 +266,7 @@ static int dpaa_stop(struct net_device *net_dev)
        /* Allow the Fman (Tx) port to process in-flight frames before we
         * try switching it off.
         */
-       usleep_range(5000, 10000);
+       msleep(200);
 
        err = mac_dev->stop(mac_dev);
        if (err < 0)
@@ -305,6 +283,8 @@ static int dpaa_stop(struct net_device *net_dev)
                phy_disconnect(net_dev->phydev);
        net_dev->phydev = NULL;
 
+       msleep(200);
+
        return err;
 }
 
@@ -596,10 +576,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
 
 static void dpaa_bps_free(struct dpaa_priv *priv)
 {
-       int i;
-
-       for (i = 0; i < DPAA_BPS_NUM; i++)
-               dpaa_bp_free(priv->dpaa_bps[i]);
+       dpaa_bp_free(priv->dpaa_bp);
 }
 
 /* Use multiple WQs for FQ assignment:
@@ -773,7 +750,7 @@ static void dpaa_release_channel(void)
        qman_release_pool(rx_pool_channel);
 }
 
-static void dpaa_eth_add_channel(u16 channel)
+static void dpaa_eth_add_channel(u16 channel, struct device *dev)
 {
        u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
        const cpumask_t *cpus = qman_affine_cpus();
@@ -783,6 +760,7 @@ static void dpaa_eth_add_channel(u16 channel)
        for_each_cpu_and(cpu, cpus, cpu_online_mask) {
                portal = qman_get_affine_portal(cpu);
                qman_p_static_dequeue_add(portal, pool);
+               qman_start_using_portal(portal, dev);
        }
 }
 
@@ -1197,15 +1175,15 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
        return err;
 }
 
-static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
-                                size_t count, struct dpaa_fq *errq,
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
+                                struct dpaa_fq *errq,
                                 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
                                 struct dpaa_buffer_layout *buf_layout)
 {
        struct fman_buffer_prefix_content buf_prefix_content;
        struct fman_port_rx_params *rx_p;
        struct fman_port_params params;
-       int i, err;
+       int err;
 
        memset(&params, 0, sizeof(params));
        memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
@@ -1224,12 +1202,9 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
                rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
        }
 
-       count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
-       rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
-       for (i = 0; i < count; i++) {
-               rx_p->ext_buf_pools.ext_buf_pool[i].id =  bps[i]->bpid;
-               rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
-       }
+       rx_p->ext_buf_pools.num_of_pools_used = 1;
+       rx_p->ext_buf_pools.ext_buf_pool[0].id =  bp->bpid;
+       rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
 
        err = fman_port_config(port, &params);
        if (err) {
@@ -1252,7 +1227,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
 }
 
 static int dpaa_eth_init_ports(struct mac_device *mac_dev,
-                              struct dpaa_bp **bps, size_t count,
+                              struct dpaa_bp *bp,
                               struct fm_port_fqs *port_fqs,
                               struct dpaa_buffer_layout *buf_layout,
                               struct device *dev)
@@ -1266,7 +1241,7 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev,
        if (err)
                return err;
 
-       err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+       err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
                                    port_fqs->rx_defq, port_fqs->rx_pcdq,
                                    &buf_layout[RX]);
 
@@ -1335,13 +1310,14 @@ static void dpaa_fd_release(const struct net_device *net_dev,
                vaddr = phys_to_virt(qm_fd_addr(fd));
                sgt = vaddr + qm_fd_get_offset(fd);
 
-               dma_unmap_single(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
-                                dpaa_bp->size, DMA_FROM_DEVICE);
+               dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
+                              DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
 
                dpaa_release_sgt_members(sgt);
 
-               addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, vaddr,
-                                     dpaa_bp->size, DMA_FROM_DEVICE);
+               addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
+                                   virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
+                                   DMA_FROM_DEVICE);
                if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
                        netdev_err(net_dev, "DMA mapping failed\n");
                        return;
@@ -1396,7 +1372,7 @@ static void count_ern(struct dpaa_percpu_priv *percpu_priv,
 static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
                               struct sk_buff *skb,
                               struct qm_fd *fd,
-                              char *parse_results)
+                              void *parse_results)
 {
        struct fman_prs_result *parse_result;
        u16 ethertype = ntohs(skb->protocol);
@@ -1491,21 +1467,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
        struct net_device *net_dev = dpaa_bp->priv->net_dev;
        struct bm_buffer bmb[8];
        dma_addr_t addr;
-       void *new_buf;
+       struct page *p;
        u8 i;
 
        for (i = 0; i < 8; i++) {
-               new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
-               if (unlikely(!new_buf)) {
-                       netdev_err(net_dev,
-                                  "netdev_alloc_frag() failed, size %zu\n",
-                                  dpaa_bp->raw_size);
+               p = dev_alloc_pages(0);
+               if (unlikely(!p)) {
+                       netdev_err(net_dev, "dev_alloc_pages() failed\n");
                        goto release_previous_buffs;
                }
-               new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
 
-               addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, new_buf,
-                                     dpaa_bp->size, DMA_FROM_DEVICE);
+               addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
+                                   DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
                if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
                                               addr))) {
                        netdev_err(net_dev, "DMA map failed\n");
@@ -1583,17 +1556,16 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
 {
        struct dpaa_bp *dpaa_bp;
        int *countptr;
-       int res, i;
+       int res;
+
+       dpaa_bp = priv->dpaa_bp;
+       if (!dpaa_bp)
+               return -EINVAL;
+       countptr = this_cpu_ptr(dpaa_bp->percpu_count);
+       res  = dpaa_eth_refill_bpool(dpaa_bp, countptr);
+       if (res)
+               return res;
 
-       for (i = 0; i < DPAA_BPS_NUM; i++) {
-               dpaa_bp = priv->dpaa_bps[i];
-               if (!dpaa_bp)
-                       return -EINVAL;
-               countptr = this_cpu_ptr(dpaa_bp->percpu_count);
-               res  = dpaa_eth_refill_bpool(dpaa_bp, countptr);
-               if (res)
-                       return res;
-       }
        return 0;
 }
 
@@ -1602,68 +1574,74 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
  * Skb freeing is not handled here.
  *
  * This function may be called on error paths in the Tx function, so guard
- * against cases when not all fd relevant fields were filled in.
+ * against cases when not all fd relevant fields were filled in. To avoid
+ * reading the invalid transmission timestamp for the error paths set ts to
+ * false.
  *
  * Return the skb backpointer, since for S/G frames the buffer containing it
  * gets freed here.
  */
 static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
-                                         const struct qm_fd *fd)
+                                         const struct qm_fd *fd, bool ts)
 {
        const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
        struct device *dev = priv->net_dev->dev.parent;
        struct skb_shared_hwtstamps shhwtstamps;
        dma_addr_t addr = qm_fd_addr(fd);
+       void *vaddr = phys_to_virt(addr);
        const struct qm_sg_entry *sgt;
-       struct sk_buff **skbh, *skb;
-       int nr_frags, i;
+       struct sk_buff *skb;
        u64 ns;
-
-       skbh = (struct sk_buff **)phys_to_virt(addr);
-       skb = *skbh;
-
-       if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
-               memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-
-               if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
-                                         &ns)) {
-                       shhwtstamps.hwtstamp = ns_to_ktime(ns);
-                       skb_tstamp_tx(skb, &shhwtstamps);
-               } else {
-                       dev_warn(dev, "fman_port_get_tstamp failed!\n");
-               }
-       }
+       int i;
 
        if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
-               nr_frags = skb_shinfo(skb)->nr_frags;
-               dma_unmap_single(priv->tx_dma_dev, addr,
-                                qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
-                                dma_dir);
+               dma_unmap_page(priv->tx_dma_dev, addr,
+                              qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
+                              dma_dir);
 
                /* The sgt buffer has been allocated with netdev_alloc_frag(),
                 * it's from lowmem.
                 */
-               sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
+               sgt = vaddr + qm_fd_get_offset(fd);
 
                /* sgt[0] is from lowmem, was dma_map_single()-ed */
                dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
                                 qm_sg_entry_get_len(&sgt[0]), dma_dir);
 
                /* remaining pages were mapped with skb_frag_dma_map() */
-               for (i = 1; i <= nr_frags; i++) {
+               for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
+                    !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
                        WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
 
                        dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
                                       qm_sg_entry_get_len(&sgt[i]), dma_dir);
                }
-
-               /* Free the page frag that we allocated on Tx */
-               skb_free_frag(phys_to_virt(addr));
        } else {
                dma_unmap_single(priv->tx_dma_dev, addr,
-                                skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+                                priv->tx_headroom + qm_fd_get_length(fd),
+                                dma_dir);
+       }
+
+       skb = *(struct sk_buff **)vaddr;
+
+       /* DMA unmapping is required before accessing the HW provided info */
+       if (ts && priv->tx_tstamp &&
+           skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+               memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+               if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
+                                         &ns)) {
+                       shhwtstamps.hwtstamp = ns_to_ktime(ns);
+                       skb_tstamp_tx(skb, &shhwtstamps);
+               } else {
+                       dev_warn(dev, "fman_port_get_tstamp failed!\n");
+               }
        }
 
+       if (qm_fd_get_format(fd) == qm_fd_sg)
+               /* Free the page that we allocated on Tx for the SGT */
+               free_pages((unsigned long)vaddr, 0);
+
        return skb;
 }
 
@@ -1717,7 +1695,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
        return skb;
 
 free_buffer:
-       skb_free_frag(vaddr);
+       free_pages((unsigned long)vaddr, 0);
        return NULL;
 }
 
@@ -1764,8 +1742,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
                        goto free_buffers;
 
                count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
-               dma_unmap_single(dpaa_bp->priv->rx_dma_dev, sg_addr,
-                                dpaa_bp->size, DMA_FROM_DEVICE);
+               dma_unmap_page(priv->rx_dma_dev, sg_addr,
+                              DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
                if (!skb) {
                        sz = dpaa_bp->size +
                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1817,7 +1795,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
        WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
 
        /* free the SG table buffer */
-       skb_free_frag(vaddr);
+       free_pages((unsigned long)vaddr, 0);
 
        return skb;
 
@@ -1834,7 +1812,7 @@ free_buffers:
        for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
                sg_addr = qm_sg_addr(&sgt[i]);
                sg_vaddr = phys_to_virt(sg_addr);
-               skb_free_frag(sg_vaddr);
+               free_pages((unsigned long)sg_vaddr, 0);
                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
                if (dpaa_bp) {
                        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
@@ -1845,7 +1823,7 @@ free_buffers:
                        break;
        }
        /* free the SGT fragment */
-       skb_free_frag(vaddr);
+       free_pages((unsigned long)vaddr, 0);
 
        return NULL;
 }
@@ -1856,7 +1834,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
 {
        struct net_device *net_dev = priv->net_dev;
        enum dma_data_direction dma_dir;
-       unsigned char *buffer_start;
+       unsigned char *buff_start;
        struct sk_buff **skbh;
        dma_addr_t addr;
        int err;
@@ -1865,10 +1843,10 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
         * available, so just use that for offset.
         */
        fd->bpid = FSL_DPAA_BPID_INV;
-       buffer_start = skb->data - priv->tx_headroom;
+       buff_start = skb->data - priv->tx_headroom;
        dma_dir = DMA_TO_DEVICE;
 
-       skbh = (struct sk_buff **)buffer_start;
+       skbh = (struct sk_buff **)buff_start;
        *skbh = skb;
 
        /* Enable L3/L4 hardware checksum computation.
@@ -1877,7 +1855,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
         * need to write into the skb.
         */
        err = dpaa_enable_tx_csum(priv, skb, fd,
-                                 ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
+                                 buff_start + DPAA_TX_PRIV_DATA_SIZE);
        if (unlikely(err < 0)) {
                if (net_ratelimit())
                        netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1890,8 +1868,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
        fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
 
        /* Map the entire buffer size that may be seen by FMan, but no more */
-       addr = dma_map_single(priv->tx_dma_dev, skbh,
-                             skb_tail_pointer(skb) - buffer_start, dma_dir);
+       addr = dma_map_single(priv->tx_dma_dev, buff_start,
+                             priv->tx_headroom + skb->len, dma_dir);
        if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
                if (net_ratelimit())
                        netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
@@ -1910,21 +1888,20 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        struct net_device *net_dev = priv->net_dev;
        struct qm_sg_entry *sgt;
        struct sk_buff **skbh;
-       int i, j, err, sz;
-       void *buffer_start;
+       void *buff_start;
        skb_frag_t *frag;
        dma_addr_t addr;
        size_t frag_len;
-       void *sgt_buf;
-
-       /* get a page frag to store the SGTable */
-       sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
-       sgt_buf = netdev_alloc_frag(sz);
-       if (unlikely(!sgt_buf)) {
-               netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
-                          sz);
+       struct page *p;
+       int i, j, err;
+
+       /* get a page to store the SGTable */
+       p = dev_alloc_pages(0);
+       if (unlikely(!p)) {
+               netdev_err(net_dev, "dev_alloc_pages() failed\n");
                return -ENOMEM;
        }
+       buff_start = page_address(p);
 
        /* Enable L3/L4 hardware checksum computation.
         *
@@ -1932,7 +1909,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
         * need to write into the skb.
         */
        err = dpaa_enable_tx_csum(priv, skb, fd,
-                                 sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
+                                 buff_start + DPAA_TX_PRIV_DATA_SIZE);
        if (unlikely(err < 0)) {
                if (net_ratelimit())
                        netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1941,7 +1918,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        }
 
        /* SGT[0] is used by the linear part */
-       sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+       sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
        frag_len = skb_headlen(skb);
        qm_sg_entry_set_len(&sgt[0], frag_len);
        sgt[0].bpid = FSL_DPAA_BPID_INV;
@@ -1979,15 +1956,15 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        /* Set the final bit in the last used entry of the SGT */
        qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
 
+       /* set fd offset to priv->tx_headroom */
        qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
 
        /* DMA map the SGT page */
-       buffer_start = (void *)sgt - priv->tx_headroom;
-       skbh = (struct sk_buff **)buffer_start;
+       skbh = (struct sk_buff **)buff_start;
        *skbh = skb;
 
-       addr = dma_map_single(priv->tx_dma_dev, buffer_start,
-                             priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
+       addr = dma_map_page(priv->tx_dma_dev, p, 0,
+                           priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
        if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
                netdev_err(priv->net_dev, "DMA mapping failed\n");
                err = -EINVAL;
@@ -2007,7 +1984,7 @@ sg_map_failed:
                               qm_sg_entry_get_len(&sgt[j]), dma_dir);
 sg0_map_failed:
 csum_failed:
-       skb_free_frag(sgt_buf);
+       free_pages((unsigned long)buff_start, 0);
 
        return err;
 }
@@ -2114,7 +2091,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
        if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
                return NETDEV_TX_OK;
 
-       dpaa_cleanup_tx_fd(priv, &fd);
+       dpaa_cleanup_tx_fd(priv, &fd, false);
 skb_to_fd_failed:
 enomem:
        percpu_stats->tx_errors++;
@@ -2160,7 +2137,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
 
        percpu_priv->stats.tx_errors++;
 
-       skb = dpaa_cleanup_tx_fd(priv, fd);
+       skb = dpaa_cleanup_tx_fd(priv, fd, false);
        dev_kfree_skb(skb);
 }
 
@@ -2200,7 +2177,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,
 
        percpu_priv->tx_confirm++;
 
-       skb = dpaa_cleanup_tx_fd(priv, fd);
+       skb = dpaa_cleanup_tx_fd(priv, fd, true);
 
        consume_skb(skb);
 }
@@ -2304,8 +2281,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
                return qman_cb_dqrr_consume;
        }
 
-       dma_unmap_single(dpaa_bp->priv->rx_dma_dev, addr, dpaa_bp->size,
-                        DMA_FROM_DEVICE);
+       dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+                      DMA_FROM_DEVICE);
 
        /* prefetch the first 64 bytes of the frame or the SGT start */
        vaddr = phys_to_virt(addr);
@@ -2427,7 +2404,7 @@ static void egress_ern(struct qman_portal *portal,
        percpu_priv->stats.tx_fifo_errors++;
        count_ern(percpu_priv, msg);
 
-       skb = dpaa_cleanup_tx_fd(priv, fd);
+       skb = dpaa_cleanup_tx_fd(priv, fd, false);
        dev_kfree_skb_any(skb);
 }
 
@@ -2660,7 +2637,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
 {
        dma_addr_t addr = bm_buf_addr(bmb);
 
-       dma_unmap_single(bp->priv->rx_dma_dev, addr, bp->size, DMA_FROM_DEVICE);
+       dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+                      DMA_FROM_DEVICE);
 
        skb_free_frag(phys_to_virt(addr));
 }
@@ -2761,13 +2739,13 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
 
 static int dpaa_eth_probe(struct platform_device *pdev)
 {
-       struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
        struct net_device *net_dev = NULL;
+       struct dpaa_bp *dpaa_bp = NULL;
        struct dpaa_fq *dpaa_fq, *tmp;
        struct dpaa_priv *priv = NULL;
        struct fm_port_fqs port_fqs;
        struct mac_device *mac_dev;
-       int err = 0, i, channel;
+       int err = 0, channel;
        struct device *dev;
 
        dev = &pdev->dev;
@@ -2856,23 +2834,21 @@ static int dpaa_eth_probe(struct platform_device *pdev)
        priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
 
        /* bp init */
-       for (i = 0; i < DPAA_BPS_NUM; i++) {
-               dpaa_bps[i] = dpaa_bp_alloc(dev);
-               if (IS_ERR(dpaa_bps[i])) {
-                       err = PTR_ERR(dpaa_bps[i]);
-                       goto free_dpaa_bps;
-               }
-               /* the raw size of the buffers used for reception */
-               dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
-               /* avoid runtime computations by keeping the usable size here */
-               dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
-               dpaa_bps[i]->priv = priv;
-
-               err = dpaa_bp_alloc_pool(dpaa_bps[i]);
-               if (err < 0)
-                       goto free_dpaa_bps;
-               priv->dpaa_bps[i] = dpaa_bps[i];
+       dpaa_bp = dpaa_bp_alloc(dev);
+       if (IS_ERR(dpaa_bp)) {
+               err = PTR_ERR(dpaa_bp);
+               goto free_dpaa_bps;
        }
+       /* the raw size of the buffers used for reception */
+       dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
+       /* avoid runtime computations by keeping the usable size here */
+       dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
+       dpaa_bp->priv = priv;
+
+       err = dpaa_bp_alloc_pool(dpaa_bp);
+       if (err < 0)
+               goto free_dpaa_bps;
+       priv->dpaa_bp = dpaa_bp;
 
        INIT_LIST_HEAD(&priv->dpaa_fq_list);
 
@@ -2898,7 +2874,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
        /* Walk the CPUs with affine portals
         * and add this pool channel to each's dequeue mask.
         */
-       dpaa_eth_add_channel(priv->channel);
+       dpaa_eth_add_channel(priv->channel, &pdev->dev);
 
        dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
 
@@ -2930,7 +2906,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
        priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
 
        /* All real interfaces need their ports initialized */
-       err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+       err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
                                  &priv->buf_layout[0], dev);
        if (err)
                goto free_dpaa_fqs;
index 1bdfead..fc2cc4c 100644 (file)
@@ -47,8 +47,6 @@
 /* Total number of Tx queues */
 #define DPAA_ETH_TXQ_NUM       (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
 
-#define DPAA_BPS_NUM 3 /* number of bpools per interface */
-
 /* More detailed FQ types - used for fine-grained WQ assignments */
 enum dpaa_fq_type {
        FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
@@ -148,7 +146,7 @@ struct dpaa_buffer_layout {
 
 struct dpaa_priv {
        struct dpaa_percpu_priv __percpu *percpu_priv;
-       struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
+       struct dpaa_bp *dpaa_bp;
        /* Store here the needed Tx headroom for convenience and speed
         * (even though it can be computed based on the fields of buf_layout)
         */
index 0d9b185..ee62d25 100644 (file)
@@ -131,11 +131,9 @@ static ssize_t dpaa_eth_show_bpids(struct device *dev,
 {
        struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
        ssize_t bytes = 0;
-       int i = 0;
 
-       for (i = 0; i < DPAA_BPS_NUM; i++)
-               bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
-                                 priv->dpaa_bps[i]->bpid);
+       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
+                                 priv->dpaa_bp->bpid);
 
        return bytes;
 }
index 7ce2e99..66d1508 100644 (file)
@@ -47,6 +47,8 @@ static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
        "tx S/G",
        "tx error",
        "rx error",
+       "rx dropped",
+       "tx dropped",
 };
 
 static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
@@ -78,10 +80,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
 static int dpaa_get_link_ksettings(struct net_device *net_dev,
                                   struct ethtool_link_ksettings *cmd)
 {
-       if (!net_dev->phydev) {
-               netdev_dbg(net_dev, "phy device not initialized\n");
+       if (!net_dev->phydev)
                return 0;
-       }
 
        phy_ethtool_ksettings_get(net_dev->phydev, cmd);
 
@@ -93,10 +93,8 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
 {
        int err;
 
-       if (!net_dev->phydev) {
-               netdev_err(net_dev, "phy device not initialized\n");
+       if (!net_dev->phydev)
                return -ENODEV;
-       }
 
        err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
        if (err < 0)
@@ -140,10 +138,8 @@ static int dpaa_nway_reset(struct net_device *net_dev)
 {
        int err;
 
-       if (!net_dev->phydev) {
-               netdev_err(net_dev, "phy device not initialized\n");
+       if (!net_dev->phydev)
                return -ENODEV;
-       }
 
        err = 0;
        if (net_dev->phydev->autoneg) {
@@ -165,10 +161,8 @@ static void dpaa_get_pauseparam(struct net_device *net_dev,
        priv = netdev_priv(net_dev);
        mac_dev = priv->mac_dev;
 
-       if (!net_dev->phydev) {
-               netdev_err(net_dev, "phy device not initialized\n");
+       if (!net_dev->phydev)
                return;
-       }
 
        epause->autoneg = mac_dev->autoneg_pause;
        epause->rx_pause = mac_dev->rx_pause_active;
@@ -223,7 +217,7 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
        unsigned int total_stats, num_stats;
 
        num_stats   = num_online_cpus() + 1;
-       total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) +
+       total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
                        DPAA_STATS_GLOBAL_LEN;
 
        switch (type) {
@@ -235,10 +229,10 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
 }
 
 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
-                      int crr_cpu, u64 *bp_count, u64 *data)
+                      int crr_cpu, u64 bp_count, u64 *data)
 {
        int num_values = num_cpus + 1;
-       int crr = 0, j;
+       int crr = 0;
 
        /* update current CPU's stats and also add them to the total values */
        data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
@@ -262,23 +256,27 @@ static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
        data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
        data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
 
-       for (j = 0; j < DPAA_BPS_NUM; j++) {
-               data[crr * num_values + crr_cpu] = bp_count[j];
-               data[crr++ * num_values + num_cpus] += bp_count[j];
-       }
+       data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
+       data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
+
+       data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
+       data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
+
+       data[crr * num_values + crr_cpu] = bp_count;
+       data[crr++ * num_values + num_cpus] += bp_count;
 }
 
 static void dpaa_get_ethtool_stats(struct net_device *net_dev,
                                   struct ethtool_stats *stats, u64 *data)
 {
-       u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
        struct dpaa_percpu_priv *percpu_priv;
        struct dpaa_rx_errors rx_errors;
        unsigned int num_cpus, offset;
+       u64 bp_count, cg_time, cg_num;
        struct dpaa_ern_cnt ern_cnt;
        struct dpaa_bp *dpaa_bp;
        struct dpaa_priv *priv;
-       int total_stats, i, j;
+       int total_stats, i;
        bool cg_status;
 
        total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
@@ -292,12 +290,10 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
 
        for_each_online_cpu(i) {
                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-               for (j = 0; j < DPAA_BPS_NUM; j++) {
-                       dpaa_bp = priv->dpaa_bps[j];
-                       if (!dpaa_bp->percpu_count)
-                               continue;
-                       bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
-               }
+               dpaa_bp = priv->dpaa_bp;
+               if (!dpaa_bp->percpu_count)
+                       continue;
+               bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
                rx_errors.dme += percpu_priv->rx_errors.dme;
                rx_errors.fpe += percpu_priv->rx_errors.fpe;
                rx_errors.fse += percpu_priv->rx_errors.fse;
@@ -315,7 +311,7 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
                copy_stats(percpu_priv, num_cpus, i, bp_count, data);
        }
 
-       offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM);
+       offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
        memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
 
        offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
@@ -363,18 +359,16 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
                memcpy(strings, string_cpu, ETH_GSTRING_LEN);
                strings += ETH_GSTRING_LEN;
        }
-       for (i = 0; i < DPAA_BPS_NUM; i++) {
-               for (j = 0; j < num_cpus; j++) {
-                       snprintf(string_cpu, ETH_GSTRING_LEN,
-                                "bpool %c [CPU %d]", 'a' + i, j);
-                       memcpy(strings, string_cpu, ETH_GSTRING_LEN);
-                       strings += ETH_GSTRING_LEN;
-               }
-               snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
-                        'a' + i);
+       for (j = 0; j < num_cpus; j++) {
+               snprintf(string_cpu, ETH_GSTRING_LEN,
+                        "bpool [CPU %d]", j);
                memcpy(strings, string_cpu, ETH_GSTRING_LEN);
                strings += ETH_GSTRING_LEN;
        }
+       snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
+       memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+       strings += ETH_GSTRING_LEN;
+
        memcpy(strings, dpaa_stats_global, size);
 }
 
index d1e78cd..69184ca 100644 (file)
@@ -6,7 +6,7 @@
 obj-$(CONFIG_FSL_DPAA2_ETH)            += fsl-dpaa2-eth.o
 obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK)      += fsl-dpaa2-ptp.o
 
-fsl-dpaa2-eth-objs     := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-objs     := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
 fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
 fsl-dpaa2-ptp-objs     := dpaa2-ptp.o dprtc.o
 
index 90fc79b..c26c0a7 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
 /* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2019 NXP
  */
 #include <linux/init.h>
 #include <linux/module.h>
@@ -1255,8 +1255,6 @@ static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
        priv->rx_td_enabled = enable;
 }
 
-static void update_tx_fqids(struct dpaa2_eth_priv *priv);
-
 static int link_state_update(struct dpaa2_eth_priv *priv)
 {
        struct dpni_link_state state = {0};
@@ -1278,12 +1276,17 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
                   !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
        dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
 
+       /* When we manage the MAC/PHY using phylink there is no need
+        * to manually update the netif_carrier.
+        */
+       if (priv->mac)
+               goto out;
+
        /* Chech link state; speed / duplex changes are not treated yet */
        if (priv->link_state.up == state.up)
                goto out;
 
        if (state.up) {
-               update_tx_fqids(priv);
                netif_carrier_on(priv->net_dev);
                netif_tx_start_all_queues(priv->net_dev);
        } else {
@@ -1315,17 +1318,21 @@ static int dpaa2_eth_open(struct net_device *net_dev)
                           priv->dpbp_dev->obj_desc.id, priv->bpid);
        }
 
-       /* We'll only start the txqs when the link is actually ready; make sure
-        * we don't race against the link up notification, which may come
-        * immediately after dpni_enable();
-        */
-       netif_tx_stop_all_queues(net_dev);
+       if (!priv->mac) {
+               /* We'll only start the txqs when the link is actually ready;
+                * make sure we don't race against the link up notification,
+                * which may come immediately after dpni_enable();
+                */
+               netif_tx_stop_all_queues(net_dev);
+
+               /* Also, explicitly set carrier off, otherwise
+                * netif_carrier_ok() will return true and cause 'ip link show'
+                * to report the LOWER_UP flag, even though the link
+                * notification wasn't even received.
+                */
+               netif_carrier_off(net_dev);
+       }
        enable_ch_napi(priv);
-       /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
-        * return true and cause 'ip link show' to report the LOWER_UP flag,
-        * even though the link notification wasn't even received.
-        */
-       netif_carrier_off(net_dev);
 
        err = dpni_enable(priv->mc_io, 0, priv->mc_token);
        if (err < 0) {
@@ -1333,13 +1340,17 @@ static int dpaa2_eth_open(struct net_device *net_dev)
                goto enable_err;
        }
 
-       /* If the DPMAC object has already processed the link up interrupt,
-        * we have to learn the link state ourselves.
-        */
-       err = link_state_update(priv);
-       if (err < 0) {
-               netdev_err(net_dev, "Can't update link state\n");
-               goto link_state_err;
+       if (!priv->mac) {
+               /* If the DPMAC object has already processed the link up
+                * interrupt, we have to learn the link state ourselves.
+                */
+               err = link_state_update(priv);
+               if (err < 0) {
+                       netdev_err(net_dev, "Can't update link state\n");
+                       goto link_state_err;
+               }
+       } else {
+               phylink_start(priv->mac->phylink);
        }
 
        return 0;
@@ -1414,8 +1425,12 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
        int dpni_enabled = 0;
        int retries = 10;
 
-       netif_tx_stop_all_queues(net_dev);
-       netif_carrier_off(net_dev);
+       if (!priv->mac) {
+               netif_tx_stop_all_queues(net_dev);
+               netif_carrier_off(net_dev);
+       } else {
+               phylink_stop(priv->mac->phylink);
+       }
 
        /* On dpni_disable(), the MC firmware will:
         * - stop MAC Rx and wait for all Rx frames to be enqueued to software
@@ -3345,12 +3360,56 @@ static int poll_link_state(void *arg)
        return 0;
 }
 
+static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
+{
+       struct fsl_mc_device *dpni_dev, *dpmac_dev;
+       struct dpaa2_mac *mac;
+       int err;
+
+       dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
+       dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
+       if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+               return 0;
+
+       if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
+               return 0;
+
+       mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
+       if (!mac)
+               return -ENOMEM;
+
+       mac->mc_dev = dpmac_dev;
+       mac->mc_io = priv->mc_io;
+       mac->net_dev = priv->net_dev;
+
+       err = dpaa2_mac_connect(mac);
+       if (err) {
+               netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
+               kfree(mac);
+               return err;
+       }
+       priv->mac = mac;
+
+       return 0;
+}
+
+static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
+{
+       if (!priv->mac)
+               return;
+
+       dpaa2_mac_disconnect(priv->mac);
+       kfree(priv->mac);
+       priv->mac = NULL;
+}
+
 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
 {
        u32 status = ~0;
        struct device *dev = (struct device *)arg;
        struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
        struct net_device *net_dev = dev_get_drvdata(dev);
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
        int err;
 
        err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
@@ -3363,8 +3422,17 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
        if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
                link_state_update(netdev_priv(net_dev));
 
-       if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED)
+       if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
                set_mac_addr(netdev_priv(net_dev));
+               update_tx_fqids(priv);
+
+               rtnl_lock();
+               if (priv->mac)
+                       dpaa2_eth_disconnect_mac(priv);
+               else
+                       dpaa2_eth_connect_mac(priv);
+               rtnl_unlock();
+       }
 
        return IRQ_HANDLED;
 }
@@ -3540,6 +3608,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
                priv->do_link_poll = true;
        }
 
+       err = dpaa2_eth_connect_mac(priv);
+       if (err)
+               goto err_connect_mac;
+
        err = register_netdev(net_dev);
        if (err < 0) {
                dev_err(dev, "register_netdev() failed\n");
@@ -3554,6 +3626,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
        return 0;
 
 err_netdev_reg:
+       dpaa2_eth_disconnect_mac(priv);
+err_connect_mac:
        if (priv->do_link_poll)
                kthread_stop(priv->poll_thread);
        else
@@ -3596,6 +3670,10 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
 #ifdef CONFIG_DEBUG_FS
        dpaa2_dbg_remove(priv);
 #endif
+       rtnl_lock();
+       dpaa2_eth_disconnect_mac(priv);
+       rtnl_unlock();
+
        unregister_netdev(net_dev);
 
        if (priv->do_link_poll)
index 686b651..7635db3 100644 (file)
@@ -17,6 +17,7 @@
 
 #include "dpaa2-eth-trace.h"
 #include "dpaa2-eth-debugfs.h"
+#include "dpaa2-mac.h"
 
 #define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
 
@@ -415,6 +416,8 @@ struct dpaa2_eth_priv {
 #ifdef CONFIG_DEBUG_FS
        struct dpaa2_debugfs dbg;
 #endif
+
+       struct dpaa2_mac *mac;
 };
 
 #define DPAA2_RXH_SUPPORTED    (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
index dc9a6c3..0883620 100644 (file)
@@ -85,6 +85,10 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
 {
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 
+       if (priv->mac)
+               return phylink_ethtool_ksettings_get(priv->mac->phylink,
+                                                    link_settings);
+
        link_settings->base.autoneg = AUTONEG_DISABLE;
        if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
                link_settings->base.duplex = DUPLEX_FULL;
@@ -93,12 +97,29 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
        return 0;
 }
 
+static int
+dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
+                            const struct ethtool_link_ksettings *link_settings)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+       if (!priv->mac)
+               return -ENOTSUPP;
+
+       return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
+}
+
 static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
                                     struct ethtool_pauseparam *pause)
 {
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
        u64 link_options = priv->link_state.options;
 
+       if (priv->mac) {
+               phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
+               return;
+       }
+
        pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
        pause->tx_pause = pause->rx_pause ^
                          !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
@@ -118,6 +139,9 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
                return -EOPNOTSUPP;
        }
 
+       if (priv->mac)
+               return phylink_ethtool_set_pauseparam(priv->mac->phylink,
+                                                     pause);
        if (pause->autoneg)
                return -EOPNOTSUPP;
 
@@ -728,6 +752,7 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
        .get_drvinfo = dpaa2_eth_get_drvinfo,
        .get_link = ethtool_op_get_link,
        .get_link_ksettings = dpaa2_eth_get_link_ksettings,
+       .set_link_ksettings = dpaa2_eth_set_link_ksettings,
        .get_pauseparam = dpaa2_eth_get_pauseparam,
        .set_pauseparam = dpaa2_eth_set_pauseparam,
        .get_sset_count = dpaa2_eth_get_sset_count,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
new file mode 100644 (file)
index 0000000..fea388d
--- /dev/null
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include "dpaa2-eth.h"
+#include "dpaa2-mac.h"
+
+#define phylink_to_dpaa2_mac(config) \
+       container_of((config), struct dpaa2_mac, phylink_config)
+
+static phy_interface_t phy_mode(enum dpmac_eth_if eth_if)
+{
+       switch (eth_if) {
+       case DPMAC_ETH_IF_RGMII:
+               return PHY_INTERFACE_MODE_RGMII;
+       default:
+               return -EINVAL;
+       }
+}
+
+/* Caller must call of_node_put on the returned value */
+static struct device_node *dpaa2_mac_get_node(u16 dpmac_id)
+{
+       struct device_node *dpmacs, *dpmac = NULL;
+       u32 id;
+       int err;
+
+       dpmacs = of_find_node_by_name(NULL, "dpmacs");
+       if (!dpmacs)
+               return NULL;
+
+       while ((dpmac = of_get_next_child(dpmacs, dpmac)) != NULL) {
+               err = of_property_read_u32(dpmac, "reg", &id);
+               if (err)
+                       continue;
+               if (id == dpmac_id)
+                       break;
+       }
+
+       of_node_put(dpmacs);
+
+       return dpmac;
+}
+
+static int dpaa2_mac_get_if_mode(struct device_node *node,
+                                struct dpmac_attr attr)
+{
+       int if_mode;
+
+       if_mode = of_get_phy_mode(node);
+       if (if_mode >= 0)
+               return if_mode;
+
+       if_mode = phy_mode(attr.eth_if);
+       if (if_mode >= 0)
+               return if_mode;
+
+       return -ENODEV;
+}
+
+static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
+                                       phy_interface_t interface)
+{
+       switch (interface) {
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               return (interface != mac->if_mode);
+       default:
+               return true;
+       }
+}
+
+static void dpaa2_mac_validate(struct phylink_config *config,
+                              unsigned long *supported,
+                              struct phylink_link_state *state)
+{
+       struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+       if (state->interface != PHY_INTERFACE_MODE_NA &&
+           dpaa2_mac_phy_mode_mismatch(mac, state->interface)) {
+               goto empty_set;
+       }
+
+       phylink_set_port_modes(mask);
+       phylink_set(mask, Autoneg);
+       phylink_set(mask, Pause);
+       phylink_set(mask, Asym_Pause);
+
+       switch (state->interface) {
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               phylink_set(mask, 10baseT_Full);
+               phylink_set(mask, 100baseT_Full);
+               phylink_set(mask, 1000baseT_Full);
+               break;
+       default:
+               goto empty_set;
+       }
+
+       linkmode_and(supported, supported, mask);
+       linkmode_and(state->advertising, state->advertising, mask);
+
+       return;
+
+empty_set:
+       linkmode_zero(supported);
+}
+
+static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
+                            const struct phylink_link_state *state)
+{
+       struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+       struct dpmac_link_state *dpmac_state = &mac->state;
+       int err;
+
+       if (state->speed != SPEED_UNKNOWN)
+               dpmac_state->rate = state->speed;
+
+       if (state->duplex != DUPLEX_UNKNOWN) {
+               if (!state->duplex)
+                       dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+               else
+                       dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+       }
+
+       if (state->an_enabled)
+               dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
+       else
+               dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
+
+       if (state->pause & MLO_PAUSE_RX)
+               dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+       else
+               dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+       if (!!(state->pause & MLO_PAUSE_RX) ^ !!(state->pause & MLO_PAUSE_TX))
+               dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+       else
+               dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
+
+       err = dpmac_set_link_state(mac->mc_io, 0,
+                                  mac->mc_dev->mc_handle, dpmac_state);
+       if (err)
+               netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static void dpaa2_mac_link_up(struct phylink_config *config, unsigned int mode,
+                             phy_interface_t interface, struct phy_device *phy)
+{
+       struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+       struct dpmac_link_state *dpmac_state = &mac->state;
+       int err;
+
+       dpmac_state->up = 1;
+       err = dpmac_set_link_state(mac->mc_io, 0,
+                                  mac->mc_dev->mc_handle, dpmac_state);
+       if (err)
+               netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static void dpaa2_mac_link_down(struct phylink_config *config,
+                               unsigned int mode,
+                               phy_interface_t interface)
+{
+       struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+       struct dpmac_link_state *dpmac_state = &mac->state;
+       int err;
+
+       dpmac_state->up = 0;
+       err = dpmac_set_link_state(mac->mc_io, 0,
+                                  mac->mc_dev->mc_handle, dpmac_state);
+       if (err)
+               netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
+       .validate = dpaa2_mac_validate,
+       .mac_config = dpaa2_mac_config,
+       .mac_link_up = dpaa2_mac_link_up,
+       .mac_link_down = dpaa2_mac_link_down,
+};
+
+bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+                            struct fsl_mc_io *mc_io)
+{
+       struct dpmac_attr attr;
+       bool fixed = false;
+       u16 mc_handle = 0;
+       int err;
+
+       err = dpmac_open(mc_io, 0, dpmac_dev->obj_desc.id,
+                        &mc_handle);
+       if (err || !mc_handle)
+               return false;
+
+       err = dpmac_get_attributes(mc_io, 0, mc_handle, &attr);
+       if (err)
+               goto out;
+
+       if (attr.link_type == DPMAC_LINK_TYPE_FIXED)
+               fixed = true;
+
+out:
+       dpmac_close(mc_io, 0, mc_handle);
+
+       return fixed;
+}
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac)
+{
+       struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+       struct net_device *net_dev = mac->net_dev;
+       struct device_node *dpmac_node;
+       struct phylink *phylink;
+       struct dpmac_attr attr;
+       int err;
+
+       err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+                        &dpmac_dev->mc_handle);
+       if (err || !dpmac_dev->mc_handle) {
+               netdev_err(net_dev, "dpmac_open() = %d\n", err);
+               return -ENODEV;
+       }
+
+       err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle, &attr);
+       if (err) {
+               netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+               goto err_close_dpmac;
+       }
+
+       dpmac_node = dpaa2_mac_get_node(attr.id);
+       if (!dpmac_node) {
+               netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
+               err = -ENODEV;
+               goto err_close_dpmac;
+       }
+
+       err = dpaa2_mac_get_if_mode(dpmac_node, attr);
+       if (err < 0) {
+               err = -EINVAL;
+               goto err_put_node;
+       }
+       mac->if_mode = err;
+
+       /* The MAC does not have the capability to add RGMII delays so
+        * error out if the interface mode requests them and there is no PHY
+        * to act upon them
+        */
+       if (of_phy_is_fixed_link(dpmac_node) &&
+           (mac->if_mode == PHY_INTERFACE_MODE_RGMII_ID ||
+            mac->if_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+            mac->if_mode == PHY_INTERFACE_MODE_RGMII_TXID)) {
+               netdev_err(net_dev, "RGMII delay not supported\n");
+               err = -EINVAL;
+               goto err_put_node;
+       }
+
+       mac->phylink_config.dev = &net_dev->dev;
+       mac->phylink_config.type = PHYLINK_NETDEV;
+
+       phylink = phylink_create(&mac->phylink_config,
+                                of_fwnode_handle(dpmac_node), mac->if_mode,
+                                &dpaa2_mac_phylink_ops);
+       if (IS_ERR(phylink)) {
+               err = PTR_ERR(phylink);
+               goto err_put_node;
+       }
+       mac->phylink = phylink;
+
+       err = phylink_of_phy_connect(mac->phylink, dpmac_node, 0);
+       if (err) {
+               netdev_err(net_dev, "phylink_of_phy_connect() = %d\n", err);
+               goto err_phylink_destroy;
+       }
+
+       of_node_put(dpmac_node);
+
+       return 0;
+
+err_phylink_destroy:
+       phylink_destroy(mac->phylink);
+err_put_node:
+       of_node_put(dpmac_node);
+err_close_dpmac:
+       dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+       return err;
+}
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
+{
+       if (!mac->phylink)
+               return;
+
+       phylink_disconnect_phy(mac->phylink);
+       phylink_destroy(mac->phylink);
+       dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
new file mode 100644 (file)
index 0000000..8634d0d
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+#ifndef DPAA2_MAC_H
+#define DPAA2_MAC_H
+
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phylink.h>
+
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+struct dpaa2_mac {
+       struct fsl_mc_device *mc_dev;
+       struct dpmac_link_state state;
+       struct net_device *net_dev;
+       struct fsl_mc_io *mc_io;
+
+       struct phylink_config phylink_config;
+       struct phylink *phylink;
+       phy_interface_t if_mode;
+};
+
+bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+                            struct fsl_mc_io *mc_io);
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac);
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+
+#endif /* DPAA2_MAC_H */
index ff2e177..df2458a 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright 2018 NXP
  */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
new file mode 100644 (file)
index 0000000..96a9b0d
--- /dev/null
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef _FSL_DPMAC_CMD_H
+#define _FSL_DPMAC_CMD_H
+
+/* DPMAC Version */
+#define DPMAC_VER_MAJOR                                4
+#define DPMAC_VER_MINOR                                4
+#define DPMAC_CMD_BASE_VERSION                 1
+#define DPMAC_CMD_2ND_VERSION                  2
+#define DPMAC_CMD_ID_OFFSET                    4
+
+#define DPMAC_CMD(id)  (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
+#define DPMAC_CMD_V2(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_2ND_VERSION)
+
+/* Command IDs */
+#define DPMAC_CMDID_CLOSE              DPMAC_CMD(0x800)
+#define DPMAC_CMDID_OPEN               DPMAC_CMD(0x80c)
+
+#define DPMAC_CMDID_GET_ATTR           DPMAC_CMD(0x004)
+#define DPMAC_CMDID_SET_LINK_STATE     DPMAC_CMD_V2(0x0c3)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPMAC_MASK(field)        \
+       GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
+               DPMAC_##field##_SHIFT)
+
+#define dpmac_set_field(var, field, val) \
+       ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
+#define dpmac_get_field(var, field)      \
+       (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
+
+struct dpmac_cmd_open {
+       __le32 dpmac_id;
+};
+
+struct dpmac_rsp_get_attributes {
+       u8 eth_if;
+       u8 link_type;
+       __le16 id;
+       __le32 max_rate;
+};
+
+#define DPMAC_STATE_SIZE       1
+#define DPMAC_STATE_SHIFT      0
+#define DPMAC_STATE_VALID_SIZE 1
+#define DPMAC_STATE_VALID_SHIFT        1
+
+struct dpmac_cmd_set_link_state {
+       __le64 options;
+       __le32 rate;
+       __le32 pad0;
+       /* from lsb: up:1, state_valid:1 */
+       u8 state;
+       u8 pad1[7];
+       __le64 supported;
+       __le64 advertising;
+};
+
+#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
new file mode 100644 (file)
index 0000000..b75189d
--- /dev/null
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#include <linux/fsl/mc.h>
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+/**
+ * dpmac_open() - Open a control session for the specified object.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmac_id:  DPMAC unique ID
+ * @token:     Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmac_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpmac_open(struct fsl_mc_io *mc_io,
+              u32 cmd_flags,
+              int dpmac_id,
+              u16 *token)
+{
+       struct dpmac_cmd_open *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
+                                         cmd_flags,
+                                         0);
+       cmd_params = (struct dpmac_cmd_open *)cmd.params;
+       cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
+
+       /* send command to mc*/
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       /* retrieve response parameters */
+       *token = mc_cmd_hdr_read_token(&cmd);
+
+       return err;
+}
+
+/**
+ * dpmac_close() - Close the control session of the object
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPMAC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpmac_close(struct fsl_mc_io *mc_io,
+               u32 cmd_flags,
+               u16 token)
+{
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
+                                         token);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_attributes - Retrieve DPMAC attributes.
+ *
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPMAC object
+ * @attr:      Returned object's attributes
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 token,
+                        struct dpmac_attr *attr)
+{
+       struct dpmac_rsp_get_attributes *rsp_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
+                                         cmd_flags,
+                                         token);
+
+       /* send command to mc*/
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       /* retrieve response parameters */
+       rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
+       attr->eth_if = rsp_params->eth_if;
+       attr->link_type = rsp_params->link_type;
+       attr->id = le16_to_cpu(rsp_params->id);
+       attr->max_rate = le32_to_cpu(rsp_params->max_rate);
+
+       return 0;
+}
+
+/**
+ * dpmac_set_link_state() - Set the Ethernet link status
+ * @mc_io:      Pointer to opaque I/O object
+ * @cmd_flags:  Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:      Token of DPMAC object
+ * @link_state: Link state configuration
+ *
+ * Return:      '0' on Success; Error code otherwise.
+ */
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 token,
+                        struct dpmac_link_state *link_state)
+{
+       struct dpmac_cmd_set_link_state *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
+       cmd_params->options = cpu_to_le64(link_state->options);
+       cmd_params->rate = cpu_to_le32(link_state->rate);
+       dpmac_set_field(cmd_params->state, STATE, link_state->up);
+       dpmac_set_field(cmd_params->state, STATE_VALID,
+                       link_state->state_valid);
+       cmd_params->supported = cpu_to_le64(link_state->supported);
+       cmd_params->advertising = cpu_to_le64(link_state->advertising);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
new file mode 100644 (file)
index 0000000..4efc410
--- /dev/null
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef __FSL_DPMAC_H
+#define __FSL_DPMAC_H
+
+/* Data Path MAC API
+ * Contains initialization APIs and runtime control APIs for DPMAC
+ */
+
+struct fsl_mc_io;
+
+int dpmac_open(struct fsl_mc_io *mc_io,
+              u32 cmd_flags,
+              int dpmac_id,
+              u16 *token);
+
+int dpmac_close(struct fsl_mc_io *mc_io,
+               u32 cmd_flags,
+               u16 token);
+
+/**
+ * enum dpmac_link_type -  DPMAC link type
+ * @DPMAC_LINK_TYPE_NONE: No link
+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
+ */
+enum dpmac_link_type {
+       DPMAC_LINK_TYPE_NONE,
+       DPMAC_LINK_TYPE_FIXED,
+       DPMAC_LINK_TYPE_PHY,
+       DPMAC_LINK_TYPE_BACKPLANE
+};
+
+/**
+ * enum dpmac_eth_if - DPMAC Ethrnet interface
+ * @DPMAC_ETH_IF_MII: MII interface
+ * @DPMAC_ETH_IF_RMII: RMII interface
+ * @DPMAC_ETH_IF_SMII: SMII interface
+ * @DPMAC_ETH_IF_GMII: GMII interface
+ * @DPMAC_ETH_IF_RGMII: RGMII interface
+ * @DPMAC_ETH_IF_SGMII: SGMII interface
+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
+ * @DPMAC_ETH_IF_XAUI: XAUI interface
+ * @DPMAC_ETH_IF_XFI: XFI interface
+ * @DPMAC_ETH_IF_CAUI: CAUI interface
+ * @DPMAC_ETH_IF_1000BASEX: 1000BASEX interface
+ * @DPMAC_ETH_IF_USXGMII: USXGMII interface
+ */
+enum dpmac_eth_if {
+       DPMAC_ETH_IF_MII,
+       DPMAC_ETH_IF_RMII,
+       DPMAC_ETH_IF_SMII,
+       DPMAC_ETH_IF_GMII,
+       DPMAC_ETH_IF_RGMII,
+       DPMAC_ETH_IF_SGMII,
+       DPMAC_ETH_IF_QSGMII,
+       DPMAC_ETH_IF_XAUI,
+       DPMAC_ETH_IF_XFI,
+       DPMAC_ETH_IF_CAUI,
+       DPMAC_ETH_IF_1000BASEX,
+       DPMAC_ETH_IF_USXGMII,
+};
+
+/**
+ * struct dpmac_attr - Structure representing DPMAC attributes
+ * @id:                DPMAC object ID
+ * @max_rate:  Maximum supported rate - in Mbps
+ * @eth_if:    Ethernet interface
+ * @link_type: link type
+ */
+struct dpmac_attr {
+       u16 id;
+       u32 max_rate;
+       enum dpmac_eth_if eth_if;
+       enum dpmac_link_type link_type;
+};
+
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 token,
+                        struct dpmac_attr *attr);
+
+/**
+ * DPMAC link configuration/state options
+ */
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPMAC_LINK_OPT_AUTONEG                 BIT_ULL(0)
+/**
+ * Enable half-duplex mode
+ */
+#define DPMAC_LINK_OPT_HALF_DUPLEX             BIT_ULL(1)
+/**
+ * Enable pause frames
+ */
+#define DPMAC_LINK_OPT_PAUSE                   BIT_ULL(2)
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPMAC_LINK_OPT_ASYM_PAUSE              BIT_ULL(3)
+
+/**
+ * Advertised link speeds
+ */
+#define DPMAC_ADVERTISED_10BASET_FULL          BIT_ULL(0)
+#define DPMAC_ADVERTISED_100BASET_FULL         BIT_ULL(1)
+#define DPMAC_ADVERTISED_1000BASET_FULL                BIT_ULL(2)
+#define DPMAC_ADVERTISED_10000BASET_FULL       BIT_ULL(4)
+#define DPMAC_ADVERTISED_2500BASEX_FULL                BIT_ULL(5)
+
+/**
+ * Advertise auto-negotiation enable
+ */
+#define DPMAC_ADVERTISED_AUTONEG               BIT_ULL(3)
+
+/**
+ * struct dpmac_link_state - DPMAC link configuration request
+ * @rate: Rate in Mbps
+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
+ * @up: Link state
+ * @state_valid: Ignore/Update the state of the link
+ * @supported: Speeds capability of the phy (bitmap)
+ * @advertising: Speeds that are advertised for autoneg (bitmap)
+ */
+struct dpmac_link_state {
+       u32 rate;
+       u64 options;
+       int up;
+       int state_valid;
+       u64 supported;
+       u64 advertising;
+};
+
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 token,
+                        struct dpmac_link_state *link_state);
+
+#endif /* __FSL_DPMAC_H */
index 720cd50..4ac05bf 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright 2013-2016 Freescale Semiconductor Inc.
  * Copyright 2016-2018 NXP
index be7914c..311c184 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright 2013-2016 Freescale Semiconductor Inc.
  * Copyright 2016-2018 NXP
index d4d4c72..7d37ba9 100644 (file)
@@ -2706,7 +2706,6 @@ static void fec_enet_free_buffers(struct net_device *ndev)
 
        for (q = 0; q < fep->num_tx_queues; q++) {
                txq = fep->tx_queue[q];
-               bdp = txq->bd.base;
                for (i = 0; i < txq->bd.ring_size; i++) {
                        kfree(txq->tx_bounce[i]);
                        txq->tx_bounce[i] = NULL;
@@ -3558,7 +3557,7 @@ fec_probe(struct platform_device *pdev)
 
        for (i = 0; i < irq_cnt; i++) {
                snprintf(irq_name, sizeof(irq_name), "int%d", i);
-               irq = platform_get_irq_byname(pdev, irq_name);
+               irq = platform_get_irq_byname_optional(pdev, irq_name);
                if (irq < 0)
                        irq = platform_get_irq(pdev, i);
                if (irq < 0) {
index 19e2365..945643c 100644 (file)
@@ -600,9 +600,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
 
        INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
 
-       irq = platform_get_irq_byname(pdev, "pps");
+       irq = platform_get_irq_byname_optional(pdev, "pps");
        if (irq < 0)
-               irq = platform_get_irq(pdev, irq_idx);
+               irq = platform_get_irq_optional(pdev, irq_idx);
        /* Failure to get an irq is not fatal,
         * only the PTP_CLOCK_PPS clock events should stop
         */
index 59564ac..edec61d 100644 (file)
@@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
 
        len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
        page_info = &rx->data.page_info[idx];
+       dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
+                               PAGE_SIZE, DMA_FROM_DEVICE);
 
        /* gvnic can only receive into registered segments. If the buffer
         * can't be recycled, our only choice is to copy the data out of
index 778b87b..0a9a7ee 100644 (file)
@@ -390,7 +390,21 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
        seg_desc->seg.seg_addr = cpu_to_be64(addr);
 }
 
-static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
+static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
+                                   u64 iov_offset, u64 iov_len)
+{
+       dma_addr_t dma;
+       u64 addr;
+
+       for (addr = iov_offset; addr < iov_offset + iov_len;
+            addr += PAGE_SIZE) {
+               dma = page_buses[addr / PAGE_SIZE];
+               dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
+       }
+}
+
+static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
+                         struct device *dev)
 {
        int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
        union gve_tx_desc *pkt_desc, *seg_desc;
@@ -432,6 +446,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
        skb_copy_bits(skb, 0,
                      tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
                      hlen);
+       gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+                               info->iov[hdr_nfrags - 1].iov_offset,
+                               info->iov[hdr_nfrags - 1].iov_len);
        copy_offset = hlen;
 
        for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
@@ -445,6 +462,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
                skb_copy_bits(skb, copy_offset,
                              tx->tx_fifo.base + info->iov[i].iov_offset,
                              info->iov[i].iov_len);
+               gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+                                       info->iov[i].iov_offset,
+                                       info->iov[i].iov_len);
                copy_offset += info->iov[i].iov_len;
        }
 
@@ -473,7 +493,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
                gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
                return NETDEV_TX_BUSY;
        }
-       nsegs = gve_tx_add_skb(tx, skb);
+       nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
 
        netdev_tx_sent_queue(tx->netdev_txq, skb->len);
        skb_tx_timestamp(skb);
index c841674..4606a7e 100644 (file)
@@ -237,6 +237,7 @@ struct hip04_priv {
        dma_addr_t rx_phys[RX_DESC_NUM];
        unsigned int rx_head;
        unsigned int rx_buf_size;
+       unsigned int rx_cnt_remaining;
 
        struct device_node *phy_node;
        struct phy_device *phy;
@@ -575,7 +576,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
        struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
        struct net_device *ndev = priv->ndev;
        struct net_device_stats *stats = &ndev->stats;
-       unsigned int cnt = hip04_recv_cnt(priv);
        struct rx_desc *desc;
        struct sk_buff *skb;
        unsigned char *buf;
@@ -588,8 +588,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 
        /* clean up tx descriptors */
        tx_remaining = hip04_tx_reclaim(ndev, false);
-
-       while (cnt && !last) {
+       priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+       while (priv->rx_cnt_remaining && !last) {
                buf = priv->rx_buf[priv->rx_head];
                skb = build_skb(buf, priv->rx_buf_size);
                if (unlikely(!skb)) {
@@ -635,11 +635,13 @@ refill:
                hip04_set_recv_desc(priv, phys);
 
                priv->rx_head = RX_NEXT(priv->rx_head);
-               if (rx >= budget)
+               if (rx >= budget) {
+                       --priv->rx_cnt_remaining;
                        goto done;
+               }
 
-               if (--cnt == 0)
-                       cnt = hip04_recv_cnt(priv);
+               if (--priv->rx_cnt_remaining == 0)
+                       priv->rx_cnt_remaining += hip04_recv_cnt(priv);
        }
 
        if (!(priv->reg_inten & RCV_INT)) {
@@ -724,6 +726,7 @@ static int hip04_mac_open(struct net_device *ndev)
        int i;
 
        priv->rx_head = 0;
+       priv->rx_cnt_remaining = 0;
        priv->tx_head = 0;
        priv->tx_tail = 0;
        hip04_reset_ppe(priv);
@@ -1038,7 +1041,6 @@ static int hip04_remove(struct platform_device *pdev)
 
        hip04_free_ring(ndev, d);
        unregister_netdev(ndev);
-       free_irq(ndev->irq, ndev);
        of_node_put(priv->phy_node);
        cancel_work_sync(&priv->tx_timeout_task);
        free_netdev(ndev);
index 0059d44..1b03139 100644 (file)
@@ -47,7 +47,7 @@ enum HCLGE_MBX_OPCODE {
        HCLGE_MBX_GET_MEDIA_TYPE,       /* (VF -> PF) get media type */
        HCLGE_MBX_PUSH_PROMISC_INFO,    /* (PF -> VF) push vf promisc info */
 
-       HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
+       HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
        HCLGE_MBX_PUSH_LINK_STATUS,     /* (M7 -> PF) get port link status */
        HCLGE_MBX_NCSI_ERROR,           /* (M7 -> PF) receive a NCSI error */
 };
@@ -72,7 +72,7 @@ enum hclge_mbx_vlan_cfg_subcode {
 };
 
 #define HCLGE_MBX_MAX_MSG_SIZE 16
-#define HCLGE_MBX_MAX_RESP_DATA_SIZE   8
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE   8U
 #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM       3
 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM       3
 
index 03ca7d9..eef1b27 100644 (file)
@@ -146,7 +146,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
                return;
 
        mutex_lock(&hnae3_common_lock);
-
+       /* one system should only have one client for every type */
        list_for_each_entry(client_tmp, &hnae3_client_list, node) {
                if (client_tmp->type == client->type) {
                        existed = true;
index e480236..45f5916 100644 (file)
@@ -130,7 +130,6 @@ enum hnae3_module_type {
        HNAE3_MODULE_TYPE_CR            = 0x04,
        HNAE3_MODULE_TYPE_KR            = 0x05,
        HNAE3_MODULE_TYPE_TP            = 0x06,
-
 };
 
 enum hnae3_fec_mode {
@@ -576,7 +575,8 @@ struct hnae3_ae_algo {
        const struct pci_device_id *pdev_id_table;
 };
 
-#define HNAE3_INT_NAME_LEN        (IFNAMSIZ + 16)
+#define HNAE3_INT_NAME_EXT_LEN    32    /* Max extra information length */
+#define HNAE3_INT_NAME_LEN        (IFNAMSIZ + HNAE3_INT_NAME_EXT_LEN)
 #define HNAE3_ITR_COUNTDOWN_START 100
 
 struct hnae3_tc_info {
index fe5bc6f..6b328a2 100644 (file)
@@ -57,68 +57,68 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
                                           HNS3_RING_RX_RING_BASEADDR_H_REG);
                base_add_l = readl_relaxed(ring->tqp->io_base +
                                           HNS3_RING_RX_RING_BASEADDR_L_REG);
-               dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i,
+               dev_info(&h->pdev->dev, "RX(%u) BASE ADD: 0x%08x%08x\n", i,
                         base_add_h, base_add_l);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_RX_RING_BD_NUM_REG);
-               dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value);
+               dev_info(&h->pdev->dev, "RX(%u) RING BD NUM: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_RX_RING_BD_LEN_REG);
-               dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value);
+               dev_info(&h->pdev->dev, "RX(%u) RING BD LEN: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_RX_RING_TAIL_REG);
-               dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value);
+               dev_info(&h->pdev->dev, "RX(%u) RING TAIL: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_RX_RING_HEAD_REG);
-               dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value);
+               dev_info(&h->pdev->dev, "RX(%u) RING HEAD: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_RX_RING_FBDNUM_REG);
-               dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value);
+               dev_info(&h->pdev->dev, "RX(%u) RING FBDNUM: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
-               dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
+               dev_info(&h->pdev->dev, "RX(%u) RING PKTNUM: %u\n", i, value);
 
                ring = &priv->ring[i];
                base_add_h = readl_relaxed(ring->tqp->io_base +
                                           HNS3_RING_TX_RING_BASEADDR_H_REG);
                base_add_l = readl_relaxed(ring->tqp->io_base +
                                           HNS3_RING_TX_RING_BASEADDR_L_REG);
-               dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i,
+               dev_info(&h->pdev->dev, "TX(%u) BASE ADD: 0x%08x%08x\n", i,
                         base_add_h, base_add_l);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_TX_RING_BD_NUM_REG);
-               dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value);
+               dev_info(&h->pdev->dev, "TX(%u) RING BD NUM: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_TX_RING_TC_REG);
-               dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value);
+               dev_info(&h->pdev->dev, "TX(%u) RING TC: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_TX_RING_TAIL_REG);
-               dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value);
+               dev_info(&h->pdev->dev, "TX(%u) RING TAIL: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_TX_RING_HEAD_REG);
-               dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value);
+               dev_info(&h->pdev->dev, "TX(%u) RING HEAD: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_TX_RING_FBDNUM_REG);
-               dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value);
+               dev_info(&h->pdev->dev, "TX(%u) RING FBDNUM: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_TX_RING_OFFSET_REG);
-               dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value);
+               dev_info(&h->pdev->dev, "TX(%u) RING OFFSET: %u\n", i, value);
 
                value = readl_relaxed(ring->tqp->io_base +
                                      HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
-               dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i,
+               dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n\n", i,
                         value);
        }
 
@@ -190,21 +190,24 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
        addr = le64_to_cpu(tx_desc->addr);
        dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
        dev_info(dev, "(TX)addr: %pad\n", &addr);
-       dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag);
-       dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size);
+       dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag));
+       dev_info(dev, "(TX)send_size: %u\n",
+                le16_to_cpu(tx_desc->tx.send_size));
        dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
        dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
        dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
        dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
-       dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag);
-       dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv);
+       dev_info(dev, "(TX)vlan_tag: %u\n",
+                le16_to_cpu(tx_desc->tx.outer_vlan_tag));
+       dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
        dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
        dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
        dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
        dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
-       dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen);
-       dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri);
-       dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss);
+       dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen));
+       dev_info(dev, "(TX)vld_ra_ri: %u\n",
+                le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
+       dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
 
        ring  = &priv->ring[q_num + h->kinfo.num_tqps];
        value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
@@ -214,15 +217,19 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
        addr = le64_to_cpu(rx_desc->addr);
        dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
        dev_info(dev, "(RX)addr: %pad\n", &addr);
-       dev_info(dev, "(RX)l234_info: %u\n", rx_desc->rx.l234_info);
-       dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len);
-       dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size);
-       dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash);
-       dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id);
-       dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag);
-       dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb);
-       dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag);
-       dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info);
+       dev_info(dev, "(RX)l234_info: %u\n",
+                le32_to_cpu(rx_desc->rx.l234_info));
+       dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len));
+       dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size));
+       dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash));
+       dev_info(dev, "(RX)fd_id: %u\n", le16_to_cpu(rx_desc->rx.fd_id));
+       dev_info(dev, "(RX)vlan_tag: %u\n", le16_to_cpu(rx_desc->rx.vlan_tag));
+       dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n",
+                le16_to_cpu(rx_desc->rx.o_dm_vlan_id_fb));
+       dev_info(dev, "(RX)ot_vlan_tag: %u\n",
+                le16_to_cpu(rx_desc->rx.ot_vlan_tag));
+       dev_info(dev, "(RX)bd_base_info: %u\n",
+                le32_to_cpu(rx_desc->rx.bd_base_info));
 
        return 0;
 }
index 0fdd684..ba05368 100644 (file)
@@ -1710,8 +1710,8 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
        int ret = -EIO;
 
        netif_dbg(h, drv, netdev,
-                 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n",
-                 vf, vlan, qos, vlan_proto);
+                 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
+                 vf, vlan, qos, ntohs(vlan_proto));
 
        if (h->ae_algo->ops->set_vf_vlan_filter)
                ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
@@ -1771,7 +1771,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
 {
        struct hns3_nic_priv *priv = netdev_priv(ndev);
        struct hnae3_handle *h = hns3_get_handle(ndev);
-       struct hns3_enet_ring *tx_ring = NULL;
+       struct hns3_enet_ring *tx_ring;
        struct napi_struct *napi;
        int timeout_queue = 0;
        int hw_head, hw_tail;
@@ -1792,6 +1792,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
                    time_after(jiffies,
                               (trans_start + ndev->watchdog_timeo))) {
                        timeout_queue = i;
+                       netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
+                                   q->state,
+                                   jiffies_to_msecs(jiffies - trans_start));
                        break;
                }
        }
@@ -1999,7 +2002,7 @@ bool hns3_is_phys_func(struct pci_dev *pdev)
        case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
                return false;
        default:
-               dev_warn(&pdev->dev, "un-recognized pci device-id %d",
+               dev_warn(&pdev->dev, "un-recognized pci device-id %u",
                         dev_id);
        }
 
@@ -3936,14 +3939,14 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
        struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
 
        dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
-       dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
-       dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
-       dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
-       dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
-       dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
-       dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
-       dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
-       dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
+       dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+       dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
+       dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+       dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
+       dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+       dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+       dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
+       dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
 }
 
 static int hns3_client_init(struct hnae3_handle *handle)
@@ -4563,7 +4566,7 @@ int hns3_set_channels(struct net_device *netdev,
        if (new_tqp_num > hns3_get_max_available_channels(h) ||
            new_tqp_num < 1) {
                dev_err(&netdev->dev,
-                       "Change tqps fail, the tqp range is from 1 to %d",
+                       "Change tqps fail, the tqp range is from 1 to %u",
                        hns3_get_max_available_channels(h));
                return -EINVAL;
        }
index 0725dc5..345633f 100644 (file)
@@ -186,7 +186,7 @@ enum hns3_nic_state {
 #define HNS3_TXD_MSS_S                         0
 #define HNS3_TXD_MSS_M                         (0x3fff << HNS3_TXD_MSS_S)
 
-#define HNS3_TX_LAST_SIZE_M                    0xffff
+#define HNS3_TX_LAST_SIZE_M                    0xffff
 
 #define HNS3_VECTOR_TX_IRQ                     BIT_ULL(0)
 #define HNS3_VECTOR_RX_IRQ                     BIT_ULL(1)
@@ -313,7 +313,7 @@ struct hns3_desc_cb {
 
        u16 reuse_flag;
 
-       /* desc type, used by the ring user to mark the type of the priv data */
+       /* desc type, used by the ring user to mark the type of the priv data */
        u16 type;
 };
 
index 50b07b9..b104d3c 100644 (file)
@@ -985,7 +985,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
        }
 
        netdev_info(ndev,
-                   "Changing Tx/Rx ring depth from %d/%d to %d/%d\n",
+                   "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
                    old_tx_desc_num, old_rx_desc_num,
                    new_tx_desc_num, new_rx_desc_num);
 
@@ -1097,7 +1097,7 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
 
        if (queue >= queue_num) {
                netdev_err(netdev,
-                          "Invalid queue value %d! Queue max id=%d\n",
+                          "Invalid queue value %u! Queue max id=%u\n",
                           queue, queue_num - 1);
                return -EINVAL;
        }
@@ -1147,14 +1147,14 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
        rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
        if (rx_gl != cmd->rx_coalesce_usecs) {
                netdev_info(netdev,
-                           "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+                           "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
                            cmd->rx_coalesce_usecs, rx_gl);
        }
 
        tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
        if (tx_gl != cmd->tx_coalesce_usecs) {
                netdev_info(netdev,
-                           "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+                           "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
                            cmd->tx_coalesce_usecs, tx_gl);
        }
 
@@ -1182,7 +1182,7 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev,
        rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
        if (rl != cmd->rx_coalesce_usecs_high) {
                netdev_info(netdev,
-                           "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
+                           "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n",
                            cmd->rx_coalesce_usecs_high, rl);
        }
 
@@ -1211,7 +1211,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
        if (cmd->use_adaptive_tx_coalesce == 1 ||
            cmd->use_adaptive_rx_coalesce == 1) {
                netdev_info(netdev,
-                           "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
+                           "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
                            cmd->use_adaptive_tx_coalesce,
                            cmd->use_adaptive_rx_coalesce);
        }
index ecf58cf..940ead3 100644 (file)
@@ -145,7 +145,7 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
        rmb(); /* Make sure head is ready before touch any data */
 
        if (!is_valid_csq_clean_head(csq, head)) {
-               dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+               dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
                         csq->next_to_use, csq->next_to_clean);
                dev_warn(&hdev->pdev->dev,
                         "Disabling any further commands to IMP firmware\n");
@@ -314,11 +314,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
                } while (timeout < hw->cmq.tx_timeout);
        }
 
-       if (!complete) {
+       if (!complete)
                retval = -EBADE;
-       } else {
+       else
                retval = hclge_cmd_check_retval(hw, desc, num, ntc);
-       }
 
        /* Clean the command send queue */
        handle = hclge_cmd_csq_clean(hw);
index 919911f..af96e79 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/etherdevice.h>
 
 #define HCLGE_CMDQ_TX_TIMEOUT          30000
+#define HCLGE_DESC_DATA_LEN            6
 
 struct hclge_dev;
 struct hclge_desc {
@@ -19,7 +20,7 @@ struct hclge_desc {
        __le16 flag;
        __le16 retval;
        __le16 rsv;
-       __le32 data[6];
+       __le32 data[HCLGE_DESC_DATA_LEN];
 };
 
 struct hclge_cmq_ring {
@@ -260,6 +261,7 @@ enum hclge_opcode_type {
 
        /* NCL config command */
        HCLGE_OPC_QUERY_NCL_CONFIG      = 0x7011,
+
        /* M7 stats command */
        HCLGE_OPC_M7_STATS_BD           = 0x7012,
        HCLGE_OPC_M7_STATS_INFO         = 0x7013,
@@ -429,8 +431,10 @@ struct hclge_rx_pkt_buf_cmd {
 #define HCLGE_PF_MAC_NUM_MASK  0x3
 #define HCLGE_PF_STATE_MAIN    BIT(HCLGE_PF_STATE_MAIN_B)
 #define HCLGE_PF_STATE_DONE    BIT(HCLGE_PF_STATE_DONE_B)
+#define HCLGE_VF_RST_STATUS_CMD        4
+
 struct hclge_func_status_cmd {
-       __le32  vf_rst_state[4];
+       __le32  vf_rst_state[HCLGE_VF_RST_STATUS_CMD];
        u8 pf_state;
        u8 mac_id;
        u8 rsv1;
@@ -486,10 +490,12 @@ struct hclge_pf_res_cmd {
 #define HCLGE_CFG_UMV_TBL_SPACE_S      16
 #define HCLGE_CFG_UMV_TBL_SPACE_M      GENMASK(31, 16)
 
+#define HCLGE_CFG_CMD_CNT              4
+
 struct hclge_cfg_param_cmd {
        __le32 offset;
        __le32 rsv;
-       __le32 param[4];
+       __le32 param[HCLGE_CFG_CMD_CNT];
 };
 
 #define HCLGE_MAC_MODE         0x0
@@ -758,20 +764,27 @@ struct hclge_vlan_filter_ctrl_cmd {
        u8 rsv2[19];
 };
 
+#define HCLGE_VLAN_ID_OFFSET_STEP      160
+#define HCLGE_VLAN_BYTE_SIZE           8
+#define        HCLGE_VLAN_OFFSET_BITMAP \
+       (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE)
+
 struct hclge_vlan_filter_pf_cfg_cmd {
        u8 vlan_offset;
        u8 vlan_cfg;
        u8 rsv[2];
-       u8 vlan_offset_bitmap[20];
+       u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP];
 };
 
+#define HCLGE_MAX_VF_BYTES  16
+
 struct hclge_vlan_filter_vf_cfg_cmd {
        __le16 vlan_id;
        u8  resp_code;
        u8  rsv;
        u8  vlan_cfg;
        u8  rsv1[3];
-       u8  vf_bitmap[16];
+       u8  vf_bitmap[HCLGE_MAX_VF_BYTES];
 };
 
 #define HCLGE_SWITCH_ANTI_SPOOF_B      0U
@@ -806,6 +819,7 @@ enum hclge_mac_vlan_cfg_sel {
 #define HCLGE_CFG_NIC_ROCE_SEL_B       4
 #define HCLGE_ACCEPT_TAG2_B            5
 #define HCLGE_ACCEPT_UNTAG2_B          6
+#define HCLGE_VF_NUM_PER_BYTE          8
 
 struct hclge_vport_vtag_tx_cfg_cmd {
        u8 vport_vlan_cfg;
@@ -813,7 +827,7 @@ struct hclge_vport_vtag_tx_cfg_cmd {
        u8 rsv1[2];
        __le16 def_vlan_tag1;
        __le16 def_vlan_tag2;
-       u8 vf_bitmap[8];
+       u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
        u8 rsv2[8];
 };
 
@@ -825,7 +839,7 @@ struct hclge_vport_vtag_rx_cfg_cmd {
        u8 vport_vlan_cfg;
        u8 vf_offset;
        u8 rsv1[6];
-       u8 vf_bitmap[8];
+       u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
        u8 rsv2[8];
 };
 
@@ -864,7 +878,7 @@ struct hclge_mac_ethertype_idx_rd_cmd {
        u8      flags;
        u8      resp_code;
        __le16  vlan_tag;
-       u8      mac_addr[6];
+       u8      mac_addr[ETH_ALEN];
        __le16  index;
        __le16  ethter_type;
        __le16  egress_port;
index c063301..49ad848 100644 (file)
@@ -87,7 +87,7 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
                if (prio_tc[i] >= num_tc) {
                        dev_err(&hdev->pdev->dev,
-                               "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
+                               "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
                                i, prio_tc[i], num_tc);
                        return -EINVAL;
                }
index 0ccc8e7..112df34 100644 (file)
@@ -145,7 +145,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
                return;
        }
 
-       buf_len  = sizeof(struct hclge_desc) * bd_num;
+       buf_len = sizeof(struct hclge_desc) * bd_num;
        desc_src = kzalloc(buf_len, GFP_KERNEL);
        if (!desc_src) {
                dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
@@ -153,7 +153,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
        }
 
        desc = desc_src;
-       ret  = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
+       ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
        if (ret) {
                kfree(desc_src);
                return;
@@ -169,7 +169,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
                if (dfx_message->flag)
                        dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
                                 dfx_message->message,
-                                desc->data[i % entries_per_desc]);
+                                le32_to_cpu(desc->data[i % entries_per_desc]));
 
                dfx_message++;
        }
@@ -237,44 +237,48 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
        if (ret)
                return;
 
-       dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]);
+       dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
 
        ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
        if (ret)
                return;
 
-       dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]);
+       dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
 
        ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
        if (ret)
                return;
 
-       dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]);
-       dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]);
-       dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]);
-       dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]);
-       dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]);
-       dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]);
-       dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]);
+       dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+       dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+       dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
+                le32_to_cpu(desc[0].data[3]));
+       dev_info(dev, "tx_private_waterline: 0x%x\n",
+                le32_to_cpu(desc[0].data[4]));
+       dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
+       dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
+       dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
 
        ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
                                 HCLGE_OPC_TM_INTERNAL_CNT);
        if (ret)
                return;
 
-       dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]);
-       dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]);
+       dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+       dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
 
        ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
                                 HCLGE_OPC_TM_INTERNAL_STS_1);
        if (ret)
                return;
 
-       dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]);
-       dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]);
-       dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]);
-       dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]);
-       dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
+       dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+       dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+       dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
+       dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
+                le32_to_cpu(desc[0].data[4]));
+       dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
+                le32_to_cpu(desc[0].data[5]));
 }
 
 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
@@ -364,7 +368,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
        pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
        dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
        dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
-                pg_shap_cfg_cmd->pg_shapping_para);
+                le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
 
        cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
        hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -375,7 +379,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
        pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
        dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
        dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
-                pg_shap_cfg_cmd->pg_shapping_para);
+                le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
 
        cmd = HCLGE_OPC_TM_PORT_SHAPPING;
        hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -385,7 +389,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
 
        port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
        dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
-                port_shap_cfg_cmd->port_shapping_para);
+                le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
 
        cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
        hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -393,7 +397,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
        if (ret)
                goto err_tm_pg_cmd_send;
 
-       dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);
+       dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
+                le32_to_cpu(desc.data[0]));
 
        cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
        hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -401,7 +406,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
        if (ret)
                goto err_tm_pg_cmd_send;
 
-       dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
+       dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
+                le32_to_cpu(desc.data[0]));
 
        cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
        hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -409,7 +415,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
        if (ret)
                goto err_tm_pg_cmd_send;
 
-       dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
+       dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
+                le32_to_cpu(desc.data[0]));
 
        if (!hnae3_dev_dcb_supported(hdev)) {
                dev_info(&hdev->pdev->dev,
@@ -429,7 +436,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
        dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
                 bp_to_qs_map_cmd->qs_group_id);
        dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
-                bp_to_qs_map_cmd->qs_bit_map);
+                le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
        return;
 
 err_tm_pg_cmd_send:
@@ -471,7 +478,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
 
        qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
        dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
-                qs_to_pri_map->qs_id);
+                le16_to_cpu(qs_to_pri_map->qs_id));
        dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
                 qs_to_pri_map->priority);
        dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
@@ -484,9 +491,10 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
                goto err_tm_cmd_send;
 
        nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
-       dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
+       dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
+                le16_to_cpu(nq_to_qs_map->nq_id));
        dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
-                nq_to_qs_map->qset_id);
+                le16_to_cpu(nq_to_qs_map->qset_id));
 
        cmd = HCLGE_OPC_TM_PG_WEIGHT;
        hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -505,7 +513,8 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
                goto err_tm_cmd_send;
 
        qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
-       dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
+       dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
+                le16_to_cpu(qs_weight->qs_id));
        dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
 
        cmd = HCLGE_OPC_TM_PRI_WEIGHT;
@@ -527,7 +536,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
        shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
        dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
        dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
-                shap_cfg_cmd->pri_shapping_para);
+                le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
 
        cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
        hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -538,7 +547,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
        shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
        dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
        dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
-                shap_cfg_cmd->pri_shapping_para);
+                le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
 
        hclge_dbg_dump_tm_pg(hdev);
 
@@ -658,7 +667,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
        dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
                 pause_param->pause_trans_gap);
        dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
-                pause_param->pause_trans_time);
+                le16_to_cpu(pause_param->pause_trans_time));
 }
 
 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
@@ -712,7 +721,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
        tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
        for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
                dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
-                        tx_buf_cmd->tx_pkt_buff[i]);
+                        le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
 
        cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
        hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -724,10 +733,10 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
        rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
        for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
                dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
-                        rx_buf_cmd->buf_num[i]);
+                        le16_to_cpu(rx_buf_cmd->buf_num[i]));
 
        dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
-                rx_buf_cmd->shared_buf);
+                le16_to_cpu(rx_buf_cmd->shared_buf));
 
        cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
        hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -738,7 +747,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
        rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
        dev_info(&hdev->pdev->dev, "\n");
        dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
-                rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+                le16_to_cpu(rx_com_wl->com_wl.high),
+                le16_to_cpu(rx_com_wl->com_wl.low));
 
        cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
        hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -749,7 +759,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
        rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
        dev_info(&hdev->pdev->dev,
                 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
-                rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+                le16_to_cpu(rx_packet_cnt->com_wl.high),
+                le16_to_cpu(rx_packet_cnt->com_wl.low));
        dev_info(&hdev->pdev->dev, "\n");
 
        if (!hnae3_dev_dcb_supported(hdev)) {
@@ -769,14 +780,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
        for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
                dev_info(&hdev->pdev->dev,
                         "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
-                        rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+                        le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+                        le16_to_cpu(rx_priv_wl->tc_wl[i].low));
 
        rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
        for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
                dev_info(&hdev->pdev->dev,
                         "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
                         i + HCLGE_TC_NUM_ONE_DESC,
-                        rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+                        le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+                        le16_to_cpu(rx_priv_wl->tc_wl[i].low));
 
        cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
        hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
@@ -791,16 +804,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
        for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
                dev_info(&hdev->pdev->dev,
                         "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
-                        rx_com_thrd->com_thrd[i].high,
-                        rx_com_thrd->com_thrd[i].low);
+                        le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+                        le16_to_cpu(rx_com_thrd->com_thrd[i].low));
 
        rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
        for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
                dev_info(&hdev->pdev->dev,
                         "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
                         i + HCLGE_TC_NUM_ONE_DESC,
-                        rx_com_thrd->com_thrd[i].high,
-                        rx_com_thrd->com_thrd[i].low);
+                        le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+                        le16_to_cpu(rx_com_thrd->com_thrd[i].low));
        return;
 
 err_qos_cmd_send:
@@ -845,7 +858,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
                memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
                snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
                         "%02u   |%02x:%02x:%02x:%02x:%02x:%02x|",
-                        req0->index, req0->mac_addr[0], req0->mac_addr[1],
+                        le16_to_cpu(req0->index),
+                        req0->mac_addr[0], req0->mac_addr[1],
                         req0->mac_addr[2], req0->mac_addr[3],
                         req0->mac_addr[4], req0->mac_addr[5]);
 
@@ -929,7 +943,7 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
        }
 }
 
-static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
+void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
 {
        dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
                 hdev->rst_stats.pf_rst_cnt);
@@ -945,8 +959,6 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
                 hdev->rst_stats.hw_reset_done_cnt);
        dev_info(&hdev->pdev->dev, "reset count: %u\n",
                 hdev->rst_stats.reset_cnt);
-       dev_info(&hdev->pdev->dev, "reset count: %u\n",
-                hdev->rst_stats.reset_cnt);
        dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
                 hdev->rst_stats.reset_fail_cnt);
        dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
@@ -961,6 +973,7 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
                 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
        dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
                 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
+       dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
 }
 
 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
index 87dece0..dc66b4e 100644 (file)
@@ -1747,7 +1747,7 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
 
        if (vf_id) {
                if (vf_id >= hdev->num_alloc_vport) {
-                       dev_err(dev, "invalid vf id(%d)\n", vf_id);
+                       dev_err(dev, "invalid vf id(%u)\n", vf_id);
                        return;
                }
 
index bf6bca2..4f8f068 100644 (file)
@@ -1398,7 +1398,7 @@ static int hclge_configure(struct hclge_dev *hdev)
 
        if ((hdev->tc_max > HNAE3_MAX_TC) ||
            (hdev->tc_max < 1)) {
-               dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
+               dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
                         hdev->tc_max);
                hdev->tc_max = 1;
        }
@@ -1658,7 +1658,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
        num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
 
        if (hdev->num_tqps < num_vport) {
-               dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
+               dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
                        hdev->num_tqps, num_vport);
                return -EINVAL;
        }
@@ -2345,7 +2345,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
        }
        if (vectors < hdev->num_msi)
                dev_warn(&hdev->pdev->dev,
-                        "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+                        "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
                         hdev->num_msi, vectors);
 
        hdev->num_msi = vectors;
@@ -2777,7 +2777,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac)
        else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
                mac->module_type = HNAE3_MODULE_TYPE_TP;
 
-       if (mac->support_autoneg == true) {
+       if (mac->support_autoneg) {
                linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
                linkmode_copy(mac->advertising, mac->supported);
        } else {
@@ -3280,7 +3280,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
 
                if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
                        dev_err(&hdev->pdev->dev,
-                               "flr wait timeout: %d\n", cnt);
+                               "flr wait timeout: %u\n", cnt);
                        return -EBUSY;
                }
 
@@ -3330,7 +3330,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
                ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
                if (ret) {
                        dev_err(&hdev->pdev->dev,
-                               "set vf(%d) rst failed %d!\n",
+                               "set vf(%u) rst failed %d!\n",
                                vport->vport_id, ret);
                        return ret;
                }
@@ -3345,7 +3345,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
                ret = hclge_inform_reset_assert_to_vf(vport);
                if (ret)
                        dev_warn(&hdev->pdev->dev,
-                                "inform reset to vf(%d) failed %d!\n",
+                                "inform reset to vf(%u) failed %d!\n",
                                 vport->vport_id, ret);
        }
 
@@ -3658,7 +3658,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
                hdev->rst_stats.reset_fail_cnt++;
                set_bit(hdev->reset_type, &hdev->reset_pending);
                dev_info(&hdev->pdev->dev,
-                        "re-schedule reset task(%d)\n",
+                        "re-schedule reset task(%u)\n",
                         hdev->rst_stats.reset_fail_cnt);
                return true;
        }
@@ -3669,6 +3669,9 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
        hclge_reset_handshake(hdev, true);
 
        dev_err(&hdev->pdev->dev, "Reset fail!\n");
+
+       hclge_dbg_dump_rst_info(hdev);
+
        return false;
 }
 
@@ -3852,12 +3855,13 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
                                  HCLGE_RESET_INTERVAL))) {
                mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
                return;
-       } else if (hdev->default_reset_request)
+       } else if (hdev->default_reset_request) {
                hdev->reset_level =
                        hclge_get_reset_level(ae_dev,
                                              &hdev->default_reset_request);
-       else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+       } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
                hdev->reset_level = HNAE3_FUNC_RESET;
+       }
 
        dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
                 hdev->reset_level);
@@ -3982,6 +3986,7 @@ static void hclge_service_task(struct work_struct *work)
        hclge_update_link_status(hdev);
        hclge_update_vport_alive(hdev);
        hclge_sync_vlan_filter(hdev);
+
        if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
                hclge_rfs_filter_expire(hdev);
                hdev->fd_arfs_expire_timer = 0;
@@ -4488,7 +4493,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
         */
        if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
                dev_err(&hdev->pdev->dev,
-                       "Configure rss tc size failed, invalid TC_SIZE = %d\n",
+                       "Configure rss tc size failed, invalid TC_SIZE = %u\n",
                        rss_size);
                return -EINVAL;
        }
@@ -4838,7 +4843,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
                break;
        default:
                dev_err(&hdev->pdev->dev,
-                       "Unsupported flow director mode %d\n",
+                       "Unsupported flow director mode %u\n",
                        hdev->fd_cfg.fd_mode);
                return -EOPNOTSUPP;
        }
@@ -5168,7 +5173,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
                                   true);
        if (ret) {
                dev_err(&hdev->pdev->dev,
-                       "fd key_y config fail, loc=%d, ret=%d\n",
+                       "fd key_y config fail, loc=%u, ret=%d\n",
                        rule->queue_id, ret);
                return ret;
        }
@@ -5177,7 +5182,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
                                   true);
        if (ret)
                dev_err(&hdev->pdev->dev,
-                       "fd key_x config fail, loc=%d, ret=%d\n",
+                       "fd key_x config fail, loc=%u, ret=%d\n",
                        rule->queue_id, ret);
        return ret;
 }
@@ -5426,7 +5431,7 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
                }
        } else if (!is_add) {
                dev_err(&hdev->pdev->dev,
-                       "delete fail, rule %d is inexistent\n",
+                       "delete fail, rule %u is inexistent\n",
                        location);
                return -EINVAL;
        }
@@ -5666,7 +5671,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
 
                if (vf > hdev->num_req_vfs) {
                        dev_err(&hdev->pdev->dev,
-                               "Error: vf id (%d) > max vf num (%d)\n",
+                               "Error: vf id (%u) > max vf num (%u)\n",
                                vf, hdev->num_req_vfs);
                        return -EINVAL;
                }
@@ -5676,7 +5681,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
 
                if (ring >= tqps) {
                        dev_err(&hdev->pdev->dev,
-                               "Error: queue id (%d) > max tqp num (%d)\n",
+                               "Error: queue id (%u) > max tqp num (%u)\n",
                                ring, tqps - 1);
                        return -EINVAL;
                }
@@ -5735,7 +5740,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
 
        if (!hclge_fd_rule_exist(hdev, fs->location)) {
                dev_err(&hdev->pdev->dev,
-                       "Delete fail, rule %d is inexistent\n", fs->location);
+                       "Delete fail, rule %u is inexistent\n", fs->location);
                return -ENOENT;
        }
 
@@ -5812,7 +5817,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
 
                if (ret) {
                        dev_warn(&hdev->pdev->dev,
-                                "Restore rule %d failed, remove it\n",
+                                "Restore rule %u failed, remove it\n",
                                 rule->location);
                        clear_bit(rule->location, hdev->fd_bmap);
                        hlist_del(&rule->rule_node);
@@ -6805,7 +6810,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
 
        if (cmdq_resp) {
                dev_err(&hdev->pdev->dev,
-                       "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
+                       "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
                        cmdq_resp);
                return -EIO;
        }
@@ -7057,7 +7062,7 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
 
        if (allocated_size < hdev->wanted_umv_size)
                dev_warn(&hdev->pdev->dev,
-                        "Alloc umv space failed, want %d, get %d\n",
+                        "Alloc umv space failed, want %u, get %u\n",
                         hdev->wanted_umv_size, allocated_size);
 
        mutex_init(&hdev->umv_mutex);
@@ -7225,7 +7230,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
 
        /* check if we just hit the duplicate */
        if (!ret) {
-               dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
+               dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
                         vport->vport_id, addr);
                return 0;
        }
@@ -7406,7 +7411,7 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
        mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
 
        list_for_each_entry_safe(mac_cfg, tmp, list, node) {
-               if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
+               if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
                        if (uc_flag && mac_cfg->hd_tbl_status)
                                hclge_rm_uc_addr_common(vport, mac_addr);
 
@@ -7478,7 +7483,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
 
        if (cmdq_resp) {
                dev_err(&hdev->pdev->dev,
-                       "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+                       "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
                        cmdq_resp);
                return -EIO;
        }
@@ -7500,7 +7505,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
                break;
        default:
                dev_err(&hdev->pdev->dev,
-                       "add mac ethertype failed for undefined, code=%d.\n",
+                       "add mac ethertype failed for undefined, code=%u.\n",
                        resp_code);
                return_status = -EIO;
        }
@@ -7741,8 +7746,6 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
                                    bool is_kill, u16 vlan,
                                    __be16 proto)
 {
-#define HCLGE_MAX_VF_BYTES  16
-
        struct hclge_vport *vport = &hdev->vport[vfid];
        struct hclge_vlan_filter_vf_cfg_cmd *req0;
        struct hclge_vlan_filter_vf_cfg_cmd *req1;
@@ -7807,7 +7810,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
                }
 
                dev_err(&hdev->pdev->dev,
-                       "Add vf vlan filter fail, ret =%d.\n",
+                       "Add vf vlan filter fail, ret =%u.\n",
                        req0->resp_code);
        } else {
 #define HCLGE_VF_VLAN_DEL_NO_FOUND     1
@@ -7823,7 +7826,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
                        return 0;
 
                dev_err(&hdev->pdev->dev,
-                       "Kill vf vlan filter fail, ret =%d.\n",
+                       "Kill vf vlan filter fail, ret =%u.\n",
                        req0->resp_code);
        }
 
@@ -7842,9 +7845,10 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
 
-       vlan_offset_160 = vlan_id / 160;
-       vlan_offset_byte = (vlan_id % 160) / 8;
-       vlan_offset_byte_val = 1 << (vlan_id % 8);
+       vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
+       vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
+                          HCLGE_VLAN_BYTE_SIZE;
+       vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
 
        req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
        req->vlan_offset = vlan_offset_160;
@@ -7872,7 +7876,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
                                       proto);
        if (ret) {
                dev_err(&hdev->pdev->dev,
-                       "Set %d vport vlan filter config fail, ret =%d.\n",
+                       "Set %u vport vlan filter config fail, ret =%d.\n",
                        vport_id, ret);
                return ret;
        }
@@ -7884,7 +7888,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
 
        if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
                dev_err(&hdev->pdev->dev,
-                       "Add port vlan failed, vport %d is already in vlan %d\n",
+                       "Add port vlan failed, vport %u is already in vlan %u\n",
                        vport_id, vlan_id);
                return -EINVAL;
        }
@@ -7892,7 +7896,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
        if (is_kill &&
            !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
                dev_err(&hdev->pdev->dev,
-                       "Delete port vlan failed, vport %d is not in vlan %d\n",
+                       "Delete port vlan failed, vport %u is not in vlan %u\n",
                        vport_id, vlan_id);
                return -EINVAL;
        }
@@ -8548,6 +8552,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
        struct hclge_dev *hdev = vport->back;
        int i, max_frm_size, ret;
 
+       /* HW supprt 2 layer vlan */
        max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
        if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
            max_frm_size > HCLGE_MAC_MAX_FRAME)
@@ -8963,16 +8968,16 @@ static void hclge_info_show(struct hclge_dev *hdev)
 
        dev_info(dev, "PF info begin:\n");
 
-       dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
-       dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
-       dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
-       dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
-       dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
-       dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
-       dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
-       dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
-       dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
-       dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
+       dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+       dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+       dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+       dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+       dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
+       dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
+       dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+       dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
+       dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
+       dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
        dev_info(dev, "This is %s PF\n",
                 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
        dev_info(dev, "DCB %s\n",
@@ -8988,10 +8993,9 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
 {
        struct hnae3_client *client = vport->nic.client;
        struct hclge_dev *hdev = ae_dev->priv;
-       int rst_cnt;
+       int rst_cnt = hdev->rst_stats.reset_cnt;
        int ret;
 
-       rst_cnt = hdev->rst_stats.reset_cnt;
        ret = client->ops->init_instance(&vport->nic);
        if (ret)
                return ret;
@@ -9091,7 +9095,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
 
                switch (client->type) {
                case HNAE3_CLIENT_KNIC:
-
                        hdev->nic_client = client;
                        vport->nic.client = client;
                        ret = hclge_init_nic_client_instance(ae_dev, vport);
@@ -9290,7 +9293,7 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
                ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
                if (ret)
                        dev_warn(&hdev->pdev->dev,
-                                "clear vf(%d) rst failed %d!\n",
+                                "clear vf(%u) rst failed %d!\n",
                                 vport->vport_id, ret);
        }
 }
@@ -9312,6 +9315,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        hdev->reset_type = HNAE3_NONE_RESET;
        hdev->reset_level = HNAE3_FUNC_RESET;
        ae_dev->priv = hdev;
+
+       /* HW supprt 2 layer vlan */
        hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
 
        mutex_init(&hdev->vport_lock);
@@ -9909,8 +9914,8 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
        u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
        struct hclge_dev *hdev = vport->back;
        u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
-       int cur_rss_size = kinfo->rss_size;
-       int cur_tqps = kinfo->num_tqps;
+       u16 cur_rss_size = kinfo->rss_size;
+       u16 cur_tqps = kinfo->num_tqps;
        u16 tc_valid[HCLGE_MAX_TC_NUM];
        u16 roundup_size;
        u32 *rss_indir;
@@ -9964,7 +9969,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
 out:
        if (!ret)
                dev_info(&hdev->pdev->dev,
-                        "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+                        "Channels changed, rss_size from %u to %u, tqps from %u to %u",
                         cur_rss_size, kinfo->rss_size,
                         cur_tqps, kinfo->rss_size * kinfo->num_tc);
 
index 9e59f0e..599f76a 100644 (file)
 
 /* Factor used to calculate offset and bitmap of VF num */
 #define HCLGE_VF_NUM_PER_CMD           64
-#define HCLGE_VF_NUM_PER_BYTE          8
 
 enum HLCGE_PORT_TYPE {
        HOST_PORT,
@@ -656,7 +655,6 @@ struct hclge_rst_stats {
        u32 hw_reset_done_cnt;  /* the number of HW reset has completed */
        u32 pf_rst_cnt;         /* the number of PF reset */
        u32 flr_rst_cnt;        /* the number of FLR */
-       u32 core_rst_cnt;       /* the number of CORE reset */
        u32 global_rst_cnt;     /* the number of GLOBAL */
        u32 imp_rst_cnt;        /* the number of IMP reset */
        u32 reset_cnt;          /* the number of reset */
@@ -1005,4 +1003,5 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
 void hclge_report_hw_error(struct hclge_dev *hdev,
                           enum hnae3_hw_error_type type);
 void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
+void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
 #endif
index 97463e1..0b433eb 100644 (file)
@@ -26,7 +26,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
 
        if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
                dev_err(&hdev->pdev->dev,
-                       "PF fail to gen resp to VF len %d exceeds max len %d\n",
+                       "PF fail to gen resp to VF len %u exceeds max len %u\n",
                        resp_data_len,
                        HCLGE_MBX_MAX_RESP_DATA_SIZE);
                /* If resp_data_len is too long, set the value to max length
@@ -285,7 +285,7 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
                                                 false, HCLGE_MAC_ADDR_UC);
        } else {
                dev_err(&hdev->pdev->dev,
-                       "failed to set unicast mac addr, unknown subcode %d\n",
+                       "failed to set unicast mac addr, unknown subcode %u\n",
                        mbx_req->msg[1]);
                return -EIO;
        }
@@ -319,7 +319,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
                                                 false, HCLGE_MAC_ADDR_MC);
        } else {
                dev_err(&hdev->pdev->dev,
-                       "failed to set mcast mac addr, unknown subcode %d\n",
+                       "failed to set mcast mac addr, unknown subcode %u\n",
                        mbx_req->msg[1]);
                return -EIO;
        }
@@ -555,7 +555,7 @@ static void hclge_reset_vf(struct hclge_vport *vport,
        struct hclge_dev *hdev = vport->back;
        int ret;
 
-       dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
+       dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
                 vport->vport_id);
 
        ret = hclge_func_reset_cmd(hdev, vport->vport_id);
@@ -590,7 +590,8 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
        qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
        memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
 
-       return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
+       return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+                                   sizeof(resp_data));
 }
 
 static int hclge_get_rss_key(struct hclge_vport *vport,
@@ -680,7 +681,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
                if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
                        dev_warn(&hdev->pdev->dev,
-                                "dropped invalid mailbox message, code = %d\n",
+                                "dropped invalid mailbox message, code = %u\n",
                                 req->msg[0]);
 
                        /* dropping/not processing this invalid message */
@@ -827,7 +828,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                        break;
                default:
                        dev_err(&hdev->pdev->dev,
-                               "un-supported mailbox message, code = %d\n",
+                               "un-supported mailbox message, code = %u\n",
                                req->msg[0]);
                        break;
                }
index dc4dfd4..696c5ae 100644 (file)
@@ -134,7 +134,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
                         "no phy device is connected to mdio bus\n");
                return 0;
        } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
-               dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
+               dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n",
                        hdev->hw.mac.phy_addr);
                return -EINVAL;
        }
index b3c30e5..fbc39a2 100644 (file)
@@ -544,7 +544,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
                ret = hclge_cmd_send(&hdev->hw, &desc, 1);
                if (ret) {
                        dev_err(&hdev->pdev->dev,
-                               "vf%d, qs%u failed to set tx_rate:%d, ret=%d\n",
+                               "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
                                vport->vport_id, shap_cfg_cmd->qs_id,
                                max_tx_rate, ret);
                        return ret;
@@ -575,7 +575,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
        /* Set to user value, no larger than max_rss_size. */
        if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
            kinfo->req_rss_size <= max_rss_size) {
-               dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
+               dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
                         kinfo->rss_size, kinfo->req_rss_size);
                kinfo->rss_size = kinfo->req_rss_size;
        } else if (kinfo->rss_size > max_rss_size ||
index d5d1cc5..af2245e 100644 (file)
@@ -50,7 +50,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
        rmb(); /* Make sure head is ready before touch any data */
 
        if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
-               dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+               dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
                         csq->next_to_use, csq->next_to_clean);
                dev_warn(&hdev->pdev->dev,
                         "Disabling any further commands to IMP firmware\n");
@@ -92,9 +92,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
        u32 reg_val;
 
        if (ring->flag == HCLGEVF_TYPE_CSQ) {
-               reg_val = (u32)ring->desc_dma_addr;
+               reg_val = lower_32_bits(ring->desc_dma_addr);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
-               reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+               reg_val = upper_32_bits(ring->desc_dma_addr);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
 
                reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
@@ -105,9 +105,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
                hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
        } else {
-               reg_val = (u32)ring->desc_dma_addr;
+               reg_val = lower_32_bits(ring->desc_dma_addr);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
-               reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+               reg_val = upper_32_bits(ring->desc_dma_addr);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
 
                reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
index 408e386..25d78a5 100644 (file)
@@ -1549,12 +1549,39 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
        return ret;
 }
 
+static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
+{
+       dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
+                hdev->rst_stats.vf_func_rst_cnt);
+       dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
+                hdev->rst_stats.flr_rst_cnt);
+       dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
+                hdev->rst_stats.vf_rst_cnt);
+       dev_info(&hdev->pdev->dev, "reset done count: %u\n",
+                hdev->rst_stats.rst_done_cnt);
+       dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
+                hdev->rst_stats.hw_rst_done_cnt);
+       dev_info(&hdev->pdev->dev, "reset count: %u\n",
+                hdev->rst_stats.rst_cnt);
+       dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
+                hdev->rst_stats.rst_fail_cnt);
+       dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
+                hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
+       dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
+                hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG));
+       dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
+                hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
+       dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
+                hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
+       dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
+}
+
 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
 {
        /* recover handshake status with IMP when reset fail */
        hclgevf_reset_handshake(hdev, true);
        hdev->rst_stats.rst_fail_cnt++;
-       dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
+       dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
                hdev->rst_stats.rst_fail_cnt);
 
        if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
@@ -1563,6 +1590,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
        if (hclgevf_is_reset_pending(hdev)) {
                set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
                hclgevf_reset_task_schedule(hdev);
+       } else {
+               hclgevf_dump_rst_info(hdev);
        }
 }
 
@@ -1784,6 +1813,8 @@ static void hclgevf_service_timer(struct timer_list *t)
 
 static void hclgevf_reset_service_task(struct work_struct *work)
 {
+#define        HCLGEVF_MAX_RESET_ATTEMPTS_CNT  3
+
        struct hclgevf_dev *hdev =
                container_of(work, struct hclgevf_dev, rst_service_task);
        int ret;
@@ -1836,7 +1867,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
                 * We cannot do much for 2. but to check first we can try reset
                 * our PCIe + stack and see if it alleviates the problem.
                 */
-               if (hdev->reset_attempts > 3) {
+               if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
                        /* prepare for full reset of stack + pcie interface */
                        set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
 
@@ -2139,7 +2170,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
                ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
                if (ret)
                        return ret;
-
        }
 
        /* Initialize RSS indirect table */
@@ -2308,7 +2338,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
        }
        if (vectors < hdev->num_msi)
                dev_warn(&hdev->pdev->dev,
-                        "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+                        "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
                         hdev->num_msi, vectors);
 
        hdev->num_msi = vectors;
@@ -2384,12 +2414,12 @@ static void hclgevf_info_show(struct hclgevf_dev *hdev)
 
        dev_info(dev, "VF info begin:\n");
 
-       dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
-       dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
-       dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
-       dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
-       dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
-       dev_info(dev, "PF media type of this VF: %d\n",
+       dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+       dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+       dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+       dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+       dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+       dev_info(dev, "PF media type of this VF: %u\n",
                 hdev->hw.mac.media_type);
 
        dev_info(dev, "VF info end.\n");
index 72bacf8..7cbd715 100644 (file)
@@ -33,7 +33,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
 
        if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
                dev_err(&hdev->pdev->dev,
-                       "VF mbx response len(=%d) exceeds maximum(=%d)\n",
+                       "VF mbx response len(=%u) exceeds maximum(=%u)\n",
                        resp_len,
                        HCLGE_MBX_MAX_RESP_DATA_SIZE);
                return -EINVAL;
@@ -49,7 +49,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
 
        if (i >= HCLGEVF_MAX_TRY_TIMES) {
                dev_err(&hdev->pdev->dev,
-                       "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n",
+                       "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
                        code0, code1, hdev->mbx_resp.received_resp, i);
                return -EIO;
        }
@@ -68,10 +68,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
 
        if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
                dev_err(&hdev->pdev->dev,
-                       "VF could not match resp code(code0=%d,code1=%d), %d\n",
+                       "VF could not match resp code(code0=%u,code1=%u), %d\n",
                        code0, code1, mbx_resp->resp_status);
                dev_err(&hdev->pdev->dev,
-                       "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n",
+                       "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n",
                        r_code0, r_code1);
                return -EIO;
        }
@@ -168,7 +168,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
                if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
                        dev_warn(&hdev->pdev->dev,
-                                "dropped invalid mailbox message, code = %d\n",
+                                "dropped invalid mailbox message, code = %u\n",
                                 req->msg[0]);
 
                        /* dropping/not processing this invalid message */
@@ -187,7 +187,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                case HCLGE_MBX_PF_VF_RESP:
                        if (resp->received_resp)
                                dev_warn(&hdev->pdev->dev,
-                                        "VF mbx resp flag not clear(%d)\n",
+                                        "VF mbx resp flag not clear(%u)\n",
                                         req->msg[1]);
                        resp->received_resp = true;
 
@@ -219,7 +219,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                        if (atomic_read(&hdev->arq.count) >=
                            HCLGE_MBX_MAX_ARQ_MSG_NUM) {
                                dev_warn(&hdev->pdev->dev,
-                                        "Async Q full, dropping msg(%d)\n",
+                                        "Async Q full, dropping msg(%u)\n",
                                         req->msg[1]);
                                break;
                        }
@@ -236,7 +236,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                        break;
                default:
                        dev_err(&hdev->pdev->dev,
-                               "VF received unsupported(%d) mbx msg from PF\n",
+                               "VF received unsupported(%u) mbx msg from PF\n",
                                req->msg[0]);
                        break;
                }
@@ -327,7 +327,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
                        break;
                default:
                        dev_err(&hdev->pdev->dev,
-                               "fetched unsupported(%d) message from arq\n",
+                               "fetched unsupported(%u) message from arq\n",
                                msg_q[0]);
                        break;
                }
diff --git a/drivers/net/ethernet/hp/Kconfig b/drivers/net/ethernet/hp/Kconfig
deleted file mode 100644 (file)
index fb395cf..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# HP network device configuration
-#
-
-config NET_VENDOR_HP
-       bool "HP devices"
-       default y
-       depends on ISA || EISA || PCI
-       ---help---
-         If you have a network (Ethernet) card belonging to this class, say Y.
-
-         Note that the answer to this question doesn't directly affect the
-         kernel: saying N will just cause the configurator to skip all
-         the questions about HP cards. If you say Y, you will be asked for
-         your specific card in the following questions.
-
-if NET_VENDOR_HP
-
-config HP100
-       tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
-       depends on (ISA || EISA || PCI)
-       ---help---
-         If you have a network (Ethernet) card of this type, say Y here.
-
-         To compile this driver as a module, choose M here. The module
-         will be called hp100.
-
-endif # NET_VENDOR_HP
diff --git a/drivers/net/ethernet/hp/Makefile b/drivers/net/ethernet/hp/Makefile
deleted file mode 100644 (file)
index 5ed723b..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the HP network device drivers.
-#
-
-obj-$(CONFIG_HP100) += hp100.o
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
deleted file mode 100644 (file)
index 6ec78f5..0000000
+++ /dev/null
@@ -1,3037 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-** hp100.c
-** HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters
-**
-** $Id: hp100.c,v 1.58 2001/09/24 18:03:01 perex Exp perex $
-**
-** Based on the HP100 driver written by Jaroslav Kysela <perex@jcu.cz>
-** Extended for new busmaster capable chipsets by
-** Siegfried "Frieder" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>
-**
-** Maintained by: Jaroslav Kysela <perex@perex.cz>
-**
-** This driver has only been tested with
-** -- HP J2585B 10/100 Mbit/s PCI Busmaster
-** -- HP J2585A 10/100 Mbit/s PCI
-** -- HP J2970A 10 Mbit/s PCI Combo 10base-T/BNC
-** -- HP J2973A 10 Mbit/s PCI 10base-T
-** -- HP J2573  10/100 ISA
-** -- Compex ReadyLink ENET100-VG4  10/100 Mbit/s PCI / EISA
-** -- Compex FreedomLine 100/VG  10/100 Mbit/s ISA / EISA / PCI
-**
-** but it should also work with the other CASCADE based adapters.
-**
-** TODO:
-**       -  J2573 seems to hang sometimes when in shared memory mode.
-**       -  Mode for Priority TX
-**       -  Check PCI registers, performance might be improved?
-**       -  To reduce interrupt load in busmaster, one could switch off
-**          the interrupts that are used to refill the queues whenever the
-**          queues are filled up to more than a certain threshold.
-**       -  some updates for EISA version of card
-**
-**
-**
-** 1.57c -> 1.58
-**   - used indent to change coding-style
-**   - added KTI DP-200 EISA ID
-**   - ioremap is also used for low (<1MB) memory (multi-architecture support)
-**
-** 1.57b -> 1.57c - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-**   - release resources on failure in init_module
-**
-** 1.57 -> 1.57b - Jean II
-**   - fix spinlocks, SMP is now working !
-**
-** 1.56 -> 1.57
-**   - updates for new PCI interface for 2.1 kernels
-**
-** 1.55 -> 1.56
-**   - removed printk in misc. interrupt and update statistics to allow
-**     monitoring of card status
-**   - timing changes in xmit routines, relogin to 100VG hub added when
-**     driver does reset
-**   - included fix for Compex FreedomLine PCI adapter
-**
-** 1.54 -> 1.55
-**   - fixed bad initialization in init_module
-**   - added Compex FreedomLine adapter
-**   - some fixes in card initialization
-**
-** 1.53 -> 1.54
-**   - added hardware multicast filter support (doesn't work)
-**   - little changes in hp100_sense_lan routine
-**     - added support for Coax and AUI (J2970)
-**   - fix for multiple cards and hp100_mode parameter (insmod)
-**   - fix for shared IRQ
-**
-** 1.52 -> 1.53
-**   - fixed bug in multicast support
-**
-*/
-
-#define HP100_DEFAULT_PRIORITY_TX 0
-
-#undef HP100_DEBUG
-#undef HP100_DEBUG_B           /* Trace  */
-#undef HP100_DEBUG_BM          /* Debug busmaster code (PDL stuff) */
-
-#undef HP100_DEBUG_TRAINING    /* Debug login-to-hub procedure */
-#undef HP100_DEBUG_TX
-#undef HP100_DEBUG_IRQ
-#undef HP100_DEBUG_RX
-
-#undef HP100_MULTICAST_FILTER  /* Need to be debugged... */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/eisa.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-
-#include "hp100.h"
-
-/*
- *  defines
- */
-
-#define HP100_BUS_ISA     0
-#define HP100_BUS_EISA    1
-#define HP100_BUS_PCI     2
-
-#define HP100_REGION_SIZE      0x20    /* for ioports */
-#define HP100_SIG_LEN          8       /* same as EISA_SIG_LEN */
-
-#define HP100_MAX_PACKET_SIZE  (1536+4)
-#define HP100_MIN_PACKET_SIZE  60
-
-#ifndef HP100_DEFAULT_RX_RATIO
-/* default - 75% onboard memory on the card are used for RX packets */
-#define HP100_DEFAULT_RX_RATIO 75
-#endif
-
-#ifndef HP100_DEFAULT_PRIORITY_TX
-/* default - don't enable transmit outgoing packets as priority */
-#define HP100_DEFAULT_PRIORITY_TX 0
-#endif
-
-/*
- *  structures
- */
-
-struct hp100_private {
-       spinlock_t lock;
-       char id[HP100_SIG_LEN];
-       u_short chip;
-       u_short soft_model;
-       u_int memory_size;
-       u_int virt_memory_size;
-       u_short rx_ratio;       /* 1 - 99 */
-       u_short priority_tx;    /* != 0 - priority tx */
-       u_short mode;           /* PIO, Shared Mem or Busmaster */
-       u_char bus;
-       struct pci_dev *pci_dev;
-       short mem_mapped;       /* memory mapped access */
-       void __iomem *mem_ptr_virt;     /* virtual memory mapped area, maybe NULL */
-       unsigned long mem_ptr_phys;     /* physical memory mapped area */
-       short lan_type;         /* 10Mb/s, 100Mb/s or -1 (error) */
-       int hub_status;         /* was login to hub successful? */
-       u_char mac1_mode;
-       u_char mac2_mode;
-       u_char hash_bytes[8];
-
-       /* Rings for busmaster mode: */
-       hp100_ring_t *rxrhead;  /* Head (oldest) index into rxring */
-       hp100_ring_t *rxrtail;  /* Tail (newest) index into rxring */
-       hp100_ring_t *txrhead;  /* Head (oldest) index into txring */
-       hp100_ring_t *txrtail;  /* Tail (newest) index into txring */
-
-       hp100_ring_t rxring[MAX_RX_PDL];
-       hp100_ring_t txring[MAX_TX_PDL];
-
-       u_int *page_vaddr_algn; /* Aligned virtual address of allocated page */
-       u_long whatever_offset; /* Offset to bus/phys/dma address */
-       int rxrcommit;          /* # Rx PDLs committed to adapter */
-       int txrcommit;          /* # Tx PDLs committed to adapter */
-};
-
-/*
- *  variables
- */
-#ifdef CONFIG_ISA
-static const char *hp100_isa_tbl[] = {
-       "HWPF150", /* HP J2573 rev A */
-       "HWP1950", /* HP J2573 */
-};
-#endif
-
-static const struct eisa_device_id hp100_eisa_tbl[] = {
-       { "HWPF180" }, /* HP J2577 rev A */
-       { "HWP1920" }, /* HP 27248B */
-       { "HWP1940" }, /* HP J2577 */
-       { "HWP1990" }, /* HP J2577 */
-       { "CPX0301" }, /* ReadyLink ENET100-VG4 */
-       { "CPX0401" }, /* FreedomLine 100/VG */
-       { "" }         /* Mandatory final entry ! */
-};
-MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
-
-static const struct pci_device_id hp100_pci_tbl[] = {
-       {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
-       {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
-       {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
-       {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2973A, PCI_ANY_ID, PCI_ANY_ID,},
-       {PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_ENET100VG4, PCI_ANY_ID, PCI_ANY_ID,},
-       {PCI_VENDOR_ID_COMPEX2, PCI_DEVICE_ID_COMPEX2_100VG, PCI_ANY_ID, PCI_ANY_ID,},
-/*     {PCI_VENDOR_ID_KTI, PCI_DEVICE_ID_KTI_DP200, PCI_ANY_ID, PCI_ANY_ID }, */
-       {}                      /* Terminating entry */
-};
-MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
-
-static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
-static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
-static int hp100_mode = 1;
-
-module_param(hp100_rx_ratio, int, 0);
-module_param(hp100_priority_tx, int, 0);
-module_param(hp100_mode, int, 0);
-
-/*
- *  prototypes
- */
-
-static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
-                       struct pci_dev *pci_dev);
-
-
-static int hp100_open(struct net_device *dev);
-static int hp100_close(struct net_device *dev);
-static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
-                                   struct net_device *dev);
-static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
-                                      struct net_device *dev);
-static void hp100_rx(struct net_device *dev);
-static struct net_device_stats *hp100_get_stats(struct net_device *dev);
-static void hp100_misc_interrupt(struct net_device *dev);
-static void hp100_update_stats(struct net_device *dev);
-static void hp100_clear_stats(struct hp100_private *lp, int ioaddr);
-static void hp100_set_multicast_list(struct net_device *dev);
-static irqreturn_t hp100_interrupt(int irq, void *dev_id);
-static void hp100_start_interface(struct net_device *dev);
-static void hp100_stop_interface(struct net_device *dev);
-static void hp100_load_eeprom(struct net_device *dev, u_short ioaddr);
-static int hp100_sense_lan(struct net_device *dev);
-static int hp100_login_to_vg_hub(struct net_device *dev,
-                                u_short force_relogin);
-static int hp100_down_vg_link(struct net_device *dev);
-static void hp100_cascade_reset(struct net_device *dev, u_short enable);
-static void hp100_BM_shutdown(struct net_device *dev);
-static void hp100_mmuinit(struct net_device *dev);
-static void hp100_init_pdls(struct net_device *dev);
-static int hp100_init_rxpdl(struct net_device *dev,
-                           register hp100_ring_t * ringptr,
-                           register u_int * pdlptr);
-static int hp100_init_txpdl(struct net_device *dev,
-                           register hp100_ring_t * ringptr,
-                           register u_int * pdlptr);
-static void hp100_rxfill(struct net_device *dev);
-static void hp100_hwinit(struct net_device *dev);
-static void hp100_clean_txring(struct net_device *dev);
-#ifdef HP100_DEBUG
-static void hp100_RegisterDump(struct net_device *dev);
-#endif
-
-/* Conversion to new PCI API :
- * Convert an address in a kernel buffer to a bus/phys/dma address.
- * This work *only* for memory fragments part of lp->page_vaddr,
- * because it was properly DMA allocated via pci_alloc_consistent(),
- * so we just need to "retrieve" the original mapping to bus/phys/dma
- * address - Jean II */
-static inline dma_addr_t virt_to_whatever(struct net_device *dev, u32 * ptr)
-{
-       struct hp100_private *lp = netdev_priv(dev);
-       return ((u_long) ptr) + lp->whatever_offset;
-}
-
-static inline u_int pdl_map_data(struct hp100_private *lp, void *data)
-{
-       return pci_map_single(lp->pci_dev, data,
-                             MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE);
-}
-
-/* TODO: This function should not really be needed in a good design... */
-static void wait(void)
-{
-       mdelay(1);
-}
-
-/*
- *  probe functions
- *  These functions should - if possible - avoid doing write operations
- *  since this could cause problems when the card is not installed.
- */
-
-/*
- * Read board id and convert to string.
- * Effectively same code as decode_eisa_sig
- */
-static const char *hp100_read_id(int ioaddr)
-{
-       int i;
-       static char str[HP100_SIG_LEN];
-       unsigned char sig[4], sum;
-        unsigned short rev;
-
-       hp100_page(ID_MAC_ADDR);
-       sum = 0;
-       for (i = 0; i < 4; i++) {
-               sig[i] = hp100_inb(BOARD_ID + i);
-               sum += sig[i];
-       }
-
-       sum += hp100_inb(BOARD_ID + i);
-       if (sum != 0xff)
-               return NULL;    /* bad checksum */
-
-        str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1);
-        str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1);
-        str[2] = (sig[1] & 0x1f) + ('A' - 1);
-        rev = (sig[2] << 8) | sig[3];
-        sprintf(str + 3, "%04X", rev);
-
-       return str;
-}
-
-#ifdef CONFIG_ISA
-static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
-{
-       const char *sig;
-       int i;
-
-       if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100"))
-               goto err;
-
-       if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE) {
-               release_region(ioaddr, HP100_REGION_SIZE);
-               goto err;
-       }
-
-       sig = hp100_read_id(ioaddr);
-       release_region(ioaddr, HP100_REGION_SIZE);
-
-       if (sig == NULL)
-               goto err;
-
-       for (i = 0; i < ARRAY_SIZE(hp100_isa_tbl); i++) {
-               if (!strcmp(hp100_isa_tbl[i], sig))
-                       break;
-
-       }
-
-       if (i < ARRAY_SIZE(hp100_isa_tbl))
-               return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
- err:
-       return -ENODEV;
-
-}
-/*
- * Probe for ISA board.
- * EISA and PCI are handled by device infrastructure.
- */
-
-static int  __init hp100_isa_probe(struct net_device *dev, int addr)
-{
-       int err = -ENODEV;
-
-       /* Probe for a specific ISA address */
-       if (addr > 0xff && addr < 0x400)
-               err = hp100_isa_probe1(dev, addr);
-
-       else if (addr != 0)
-               err = -ENXIO;
-
-       else {
-               /* Probe all ISA possible port regions */
-               for (addr = 0x100; addr < 0x400; addr += 0x20) {
-                       err = hp100_isa_probe1(dev, addr);
-                       if (!err)
-                               break;
-               }
-       }
-       return err;
-}
-#endif /* CONFIG_ISA */
-
-#if !defined(MODULE) && defined(CONFIG_ISA)
-struct net_device * __init hp100_probe(int unit)
-{
-       struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
-       int err;
-
-       if (!dev)
-               return ERR_PTR(-ENODEV);
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4200, TRACE);
-       printk("hp100: %s: probe\n", dev->name);
-#endif
-
-       if (unit >= 0) {
-               sprintf(dev->name, "eth%d", unit);
-               netdev_boot_setup_check(dev);
-       }
-
-       err = hp100_isa_probe(dev, dev->base_addr);
-       if (err)
-               goto out;
-
-       return dev;
- out:
-       free_netdev(dev);
-       return ERR_PTR(err);
-}
-#endif /* !MODULE && CONFIG_ISA */
-
-static const struct net_device_ops hp100_bm_netdev_ops = {
-       .ndo_open               = hp100_open,
-       .ndo_stop               = hp100_close,
-       .ndo_start_xmit         = hp100_start_xmit_bm,
-       .ndo_get_stats          = hp100_get_stats,
-       .ndo_set_rx_mode        = hp100_set_multicast_list,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
-};
-
-static const struct net_device_ops hp100_netdev_ops = {
-       .ndo_open               = hp100_open,
-       .ndo_stop               = hp100_close,
-       .ndo_start_xmit         = hp100_start_xmit,
-       .ndo_get_stats          = hp100_get_stats,
-       .ndo_set_rx_mode        = hp100_set_multicast_list,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
-};
-
-static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
-                       struct pci_dev *pci_dev)
-{
-       int i;
-       int err = -ENODEV;
-       const char *eid;
-       u_int chip;
-       u_char uc;
-       u_int memory_size = 0, virt_memory_size = 0;
-       u_short local_mode, lsw;
-       short mem_mapped;
-       unsigned long mem_ptr_phys;
-       void __iomem *mem_ptr_virt;
-       struct hp100_private *lp;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4201, TRACE);
-       printk("hp100: %s: probe1\n", dev->name);
-#endif
-
-       /* memory region for programmed i/o */
-       if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100"))
-               goto out1;
-
-       if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE)
-               goto out2;
-
-       chip = hp100_inw(PAGING) & HP100_CHIPID_MASK;
-#ifdef HP100_DEBUG
-       if (chip == HP100_CHIPID_SHASTA)
-               printk("hp100: %s: Shasta Chip detected. (This is a pre 802.12 chip)\n", dev->name);
-       else if (chip == HP100_CHIPID_RAINIER)
-               printk("hp100: %s: Rainier Chip detected. (This is a pre 802.12 chip)\n", dev->name);
-       else if (chip == HP100_CHIPID_LASSEN)
-               printk("hp100: %s: Lassen Chip detected.\n", dev->name);
-       else
-               printk("hp100: %s: Warning: Unknown CASCADE chip (id=0x%.4x).\n", dev->name, chip);
-#endif
-
-       dev->base_addr = ioaddr;
-
-       eid = hp100_read_id(ioaddr);
-       if (eid == NULL) {      /* bad checksum? */
-               printk(KERN_WARNING "%s: bad ID checksum at base port 0x%x\n",
-                      __func__, ioaddr);
-               goto out2;
-       }
-
-       hp100_page(ID_MAC_ADDR);
-       for (i = uc = 0; i < 7; i++)
-               uc += hp100_inb(LAN_ADDR + i);
-       if (uc != 0xff) {
-               printk(KERN_WARNING
-                      "%s: bad lan address checksum at port 0x%x)\n",
-                      __func__, ioaddr);
-               err = -EIO;
-               goto out2;
-       }
-
-       /* Make sure, that all registers are correctly updated... */
-
-       hp100_load_eeprom(dev, ioaddr);
-       wait();
-
-       /*
-        * Determine driver operation mode
-        *
-        * Use the variable "hp100_mode" upon insmod or as kernel parameter to
-        * force driver modes:
-        * hp100_mode=1 -> default, use busmaster mode if configured.
-        * hp100_mode=2 -> enable shared memory mode
-        * hp100_mode=3 -> force use of i/o mapped mode.
-        * hp100_mode=4 -> same as 1, but re-set the enable bit on the card.
-        */
-
-       /*
-        * LSW values:
-        *   0x2278 -> J2585B, PnP shared memory mode
-        *   0x2270 -> J2585B, shared memory mode, 0xdc000
-        *   0xa23c -> J2585B, I/O mapped mode
-        *   0x2240 -> EISA COMPEX, BusMaster (Shasta Chip)
-        *   0x2220 -> EISA HP, I/O (Shasta Chip)
-        *   0x2260 -> EISA HP, BusMaster (Shasta Chip)
-        */
-
-#if 0
-       local_mode = 0x2270;
-       hp100_outw(0xfefe, OPTION_LSW);
-       hp100_outw(local_mode | HP100_SET_LB | HP100_SET_HB, OPTION_LSW);
-#endif
-
-       /* hp100_mode value maybe used in future by another card */
-       local_mode = hp100_mode;
-       if (local_mode < 1 || local_mode > 4)
-               local_mode = 1; /* default */
-#ifdef HP100_DEBUG
-       printk("hp100: %s: original LSW = 0x%x\n", dev->name,
-              hp100_inw(OPTION_LSW));
-#endif
-
-       if (local_mode == 3) {
-               hp100_outw(HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
-               hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
-               hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
-               printk("hp100: IO mapped mode forced.\n");
-       } else if (local_mode == 2) {
-               hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
-               hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
-               hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
-               printk("hp100: Shared memory mode requested.\n");
-       } else if (local_mode == 4) {
-               if (chip == HP100_CHIPID_LASSEN) {
-                       hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_SET_HB, OPTION_LSW);
-                       hp100_outw(HP100_IO_EN | HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
-                       printk("hp100: Busmaster mode requested.\n");
-               }
-               local_mode = 1;
-       }
-
-       if (local_mode == 1) {  /* default behaviour */
-               lsw = hp100_inw(OPTION_LSW);
-
-               if ((lsw & HP100_IO_EN) && (~lsw & HP100_MEM_EN) &&
-                   (~lsw & (HP100_BM_WRITE | HP100_BM_READ))) {
-#ifdef HP100_DEBUG
-                       printk("hp100: %s: IO_EN bit is set on card.\n", dev->name);
-#endif
-                       local_mode = 3;
-               } else if (chip == HP100_CHIPID_LASSEN &&
-                          (lsw & (HP100_BM_WRITE | HP100_BM_READ)) == (HP100_BM_WRITE | HP100_BM_READ)) {
-                       /* Conversion to new PCI API :
-                        * I don't have the doc, but I assume that the card
-                        * can map the full 32bit address space.
-                        * Also, we can have EISA Busmaster cards (not tested),
-                        * so beware !!! - Jean II */
-                       if((bus == HP100_BUS_PCI) &&
-                          (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)))) {
-                               /* Gracefully fallback to shared memory */
-                               goto busmasterfail;
-                       }
-                       printk("hp100: Busmaster mode enabled.\n");
-                       hp100_outw(HP100_MEM_EN | HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
-               } else {
-               busmasterfail:
-#ifdef HP100_DEBUG
-                       printk("hp100: %s: Card not configured for BM or BM not supported with this card.\n", dev->name);
-                       printk("hp100: %s: Trying shared memory mode.\n", dev->name);
-#endif
-                       /* In this case, try shared memory mode */
-                       local_mode = 2;
-                       hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
-                       /* hp100_outw(HP100_IO_EN|HP100_RESET_LB, OPTION_LSW); */
-               }
-       }
-#ifdef HP100_DEBUG
-       printk("hp100: %s: new LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW));
-#endif
-
-       /* Check for shared memory on the card, eventually remap it */
-       hp100_page(HW_MAP);
-       mem_mapped = ((hp100_inw(OPTION_LSW) & (HP100_MEM_EN)) != 0);
-       mem_ptr_phys = 0UL;
-       mem_ptr_virt = NULL;
-       memory_size = (8192 << ((hp100_inb(SRAM) >> 5) & 0x07));
-       virt_memory_size = 0;
-
-       /* For memory mapped or busmaster mode, we want the memory address */
-       if (mem_mapped || (local_mode == 1)) {
-               mem_ptr_phys = (hp100_inw(MEM_MAP_LSW) | (hp100_inw(MEM_MAP_MSW) << 16));
-               mem_ptr_phys &= ~0x1fff;        /* 8k alignment */
-
-               if (bus == HP100_BUS_ISA && (mem_ptr_phys & ~0xfffff) != 0) {
-                       printk("hp100: Can only use programmed i/o mode.\n");
-                       mem_ptr_phys = 0;
-                       mem_mapped = 0;
-                       local_mode = 3; /* Use programmed i/o */
-               }
-
-               /* We do not need access to shared memory in busmaster mode */
-               /* However in slave mode we need to remap high (>1GB) card memory  */
-               if (local_mode != 1) {  /* = not busmaster */
-                       /* We try with smaller memory sizes, if ioremap fails */
-                       for (virt_memory_size = memory_size; virt_memory_size > 16383; virt_memory_size >>= 1) {
-                               if ((mem_ptr_virt = ioremap((u_long) mem_ptr_phys, virt_memory_size)) == NULL) {
-#ifdef HP100_DEBUG
-                                       printk("hp100: %s: ioremap for 0x%x bytes high PCI memory at 0x%lx failed\n", dev->name, virt_memory_size, mem_ptr_phys);
-#endif
-                               } else {
-#ifdef HP100_DEBUG
-                                       printk("hp100: %s: remapped 0x%x bytes high PCI memory at 0x%lx to %p.\n", dev->name, virt_memory_size, mem_ptr_phys, mem_ptr_virt);
-#endif
-                                       break;
-                               }
-                       }
-
-                       if (mem_ptr_virt == NULL) {     /* all ioremap tries failed */
-                               printk("hp100: Failed to ioremap the PCI card memory. Will have to use i/o mapped mode.\n");
-                               local_mode = 3;
-                               virt_memory_size = 0;
-                       }
-               }
-       }
-
-       if (local_mode == 3) {  /* io mapped forced */
-               mem_mapped = 0;
-               mem_ptr_phys = 0;
-               mem_ptr_virt = NULL;
-               printk("hp100: Using (slow) programmed i/o mode.\n");
-       }
-
-       /* Initialise the "private" data structure for this card. */
-       lp = netdev_priv(dev);
-
-       spin_lock_init(&lp->lock);
-       strlcpy(lp->id, eid, HP100_SIG_LEN);
-       lp->chip = chip;
-       lp->mode = local_mode;
-       lp->bus = bus;
-       lp->pci_dev = pci_dev;
-       lp->priority_tx = hp100_priority_tx;
-       lp->rx_ratio = hp100_rx_ratio;
-       lp->mem_ptr_phys = mem_ptr_phys;
-       lp->mem_ptr_virt = mem_ptr_virt;
-       hp100_page(ID_MAC_ADDR);
-       lp->soft_model = hp100_inb(SOFT_MODEL);
-       lp->mac1_mode = HP100_MAC1MODE3;
-       lp->mac2_mode = HP100_MAC2MODE3;
-       memset(&lp->hash_bytes, 0x00, 8);
-
-       dev->base_addr = ioaddr;
-
-       lp->memory_size = memory_size;
-       lp->virt_memory_size = virt_memory_size;
-       lp->rx_ratio = hp100_rx_ratio;  /* can be conf'd with insmod */
-
-       if (lp->mode == 1)      /* busmaster */
-               dev->netdev_ops = &hp100_bm_netdev_ops;
-       else
-               dev->netdev_ops = &hp100_netdev_ops;
-
-       /* Ask the card for which IRQ line it is configured */
-       if (bus == HP100_BUS_PCI) {
-               dev->irq = pci_dev->irq;
-       } else {
-               hp100_page(HW_MAP);
-               dev->irq = hp100_inb(IRQ_CHANNEL) & HP100_IRQMASK;
-               if (dev->irq == 2)
-                       dev->irq = 9;
-       }
-
-       if (lp->mode == 1)      /* busmaster */
-               dev->dma = 4;
-
-       /* Ask the card for its MAC address and store it for later use. */
-       hp100_page(ID_MAC_ADDR);
-       for (i = uc = 0; i < 6; i++)
-               dev->dev_addr[i] = hp100_inb(LAN_ADDR + i);
-
-       /* Reset statistics (counters) */
-       hp100_clear_stats(lp, ioaddr);
-
-       /* If busmaster mode is wanted, a dma-capable memory area is needed for
-        * the rx and tx PDLs
-        * PCI cards can access the whole PC memory. Therefore GFP_DMA is not
-        * needed for the allocation of the memory area.
-        */
-
-       /* TODO: We do not need this with old cards, where PDLs are stored
-        * in the cards shared memory area. But currently, busmaster has been
-        * implemented/tested only with the lassen chip anyway... */
-       if (lp->mode == 1) {    /* busmaster */
-               dma_addr_t page_baddr;
-               /* Get physically continuous memory for TX & RX PDLs    */
-               /* Conversion to new PCI API :
-                * Pages are always aligned and zeroed, no need to it ourself.
-                * Doc says should be OK for EISA bus as well - Jean II */
-               lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr);
-               if (!lp->page_vaddr_algn) {
-                       err = -ENOMEM;
-                       goto out_mem_ptr;
-               }
-               lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn);
-
-#ifdef HP100_DEBUG_BM
-               printk("hp100: %s: Reserved DMA memory from 0x%x to 0x%x\n", dev->name, (u_int) lp->page_vaddr_algn, (u_int) lp->page_vaddr_algn + MAX_RINGSIZE);
-#endif
-               lp->rxrcommit = lp->txrcommit = 0;
-               lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
-               lp->txrhead = lp->txrtail = &(lp->txring[0]);
-       }
-
-       /* Initialise the card. */
-       /* (I'm not really sure if it's a good idea to do this during probing, but
-        * like this it's assured that the lan connection type can be sensed
-        * correctly)
-        */
-       hp100_hwinit(dev);
-
-       /* Try to find out which kind of LAN the card is connected to. */
-       lp->lan_type = hp100_sense_lan(dev);
-
-       /* Print out a message what about what we think we have probed. */
-       printk("hp100: at 0x%x, IRQ %d, ", ioaddr, dev->irq);
-       switch (bus) {
-       case HP100_BUS_EISA:
-               printk("EISA");
-               break;
-       case HP100_BUS_PCI:
-               printk("PCI");
-               break;
-       default:
-               printk("ISA");
-               break;
-       }
-       printk(" bus, %dk SRAM (rx/tx %d%%).\n", lp->memory_size >> 10, lp->rx_ratio);
-
-       if (lp->mode == 2) {    /* memory mapped */
-               printk("hp100: Memory area at 0x%lx-0x%lx", mem_ptr_phys,
-                               (mem_ptr_phys + (mem_ptr_phys > 0x100000 ? (u_long) lp->memory_size : 16 * 1024)) - 1);
-               if (mem_ptr_virt)
-                       printk(" (virtual base %p)", mem_ptr_virt);
-               printk(".\n");
-
-               /* Set for info when doing ifconfig */
-               dev->mem_start = mem_ptr_phys;
-               dev->mem_end = mem_ptr_phys + lp->memory_size;
-       }
-
-       printk("hp100: ");
-       if (lp->lan_type != HP100_LAN_ERR)
-               printk("Adapter is attached to ");
-       switch (lp->lan_type) {
-       case HP100_LAN_100:
-               printk("100Mb/s Voice Grade AnyLAN network.\n");
-               break;
-       case HP100_LAN_10:
-               printk("10Mb/s network (10baseT).\n");
-               break;
-       case HP100_LAN_COAX:
-               printk("10Mb/s network (coax).\n");
-               break;
-       default:
-               printk("Warning! Link down.\n");
-       }
-
-       err = register_netdev(dev);
-       if (err)
-               goto out3;
-
-       return 0;
-out3:
-       if (local_mode == 1)
-               pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f,
-                                   lp->page_vaddr_algn,
-                                   virt_to_whatever(dev, lp->page_vaddr_algn));
-out_mem_ptr:
-       if (mem_ptr_virt)
-               iounmap(mem_ptr_virt);
-out2:
-       release_region(ioaddr, HP100_REGION_SIZE);
-out1:
-       return err;
-}
-
-/* This procedure puts the card into a stable init state */
-static void hp100_hwinit(struct net_device *dev)
-{
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4202, TRACE);
-       printk("hp100: %s: hwinit\n", dev->name);
-#endif
-
-       /* Initialise the card. -------------------------------------------- */
-
-       /* Clear all pending Ints and disable Ints */
-       hp100_page(PERFORMANCE);
-       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
-       hp100_outw(0xffff, IRQ_STATUS); /* clear all pending ints */
-
-       hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
-       hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
-
-       if (lp->mode == 1) {
-               hp100_BM_shutdown(dev); /* disables BM, puts cascade in reset */
-               wait();
-       } else {
-               hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
-               hp100_cascade_reset(dev, 1);
-               hp100_page(MAC_CTRL);
-               hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);
-       }
-
-       /* Initiate EEPROM reload */
-       hp100_load_eeprom(dev, 0);
-
-       wait();
-
-       /* Go into reset again. */
-       hp100_cascade_reset(dev, 1);
-
-       /* Set Option Registers to a safe state  */
-       hp100_outw(HP100_DEBUG_EN |
-                  HP100_RX_HDR |
-                  HP100_EE_EN |
-                  HP100_BM_WRITE |
-                  HP100_BM_READ | HP100_RESET_HB |
-                  HP100_FAKE_INT |
-                  HP100_INT_EN |
-                  HP100_MEM_EN |
-                  HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
-
-       hp100_outw(HP100_TRI_INT |
-                  HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW);
-
-       hp100_outb(HP100_PRIORITY_TX |
-                  HP100_ADV_NXT_PKT |
-                  HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW);
-
-       /* TODO: Configure MMU for Ram Test. */
-       /* TODO: Ram Test. */
-
-       /* Re-check if adapter is still at same i/o location      */
-       /* (If the base i/o in eeprom has been changed but the    */
-       /* registers had not been changed, a reload of the eeprom */
-       /* would move the adapter to the address stored in eeprom */
-
-       /* TODO: Code to implement. */
-
-       /* Until here it was code from HWdiscover procedure. */
-       /* Next comes code from mmuinit procedure of SCO BM driver which is
-        * called from HWconfigure in the SCO driver.  */
-
-       /* Initialise MMU, eventually switch on Busmaster Mode, initialise
-        * multicast filter...
-        */
-       hp100_mmuinit(dev);
-
-       /* We don't turn the interrupts on here - this is done by start_interface. */
-       wait();                 /* TODO: Do we really need this? */
-
-       /* Enable Hardware (e.g. unreset) */
-       hp100_cascade_reset(dev, 0);
-
-       /* ------- initialisation complete ----------- */
-
-       /* Finally try to log in the Hub if there may be a VG connection. */
-       if ((lp->lan_type == HP100_LAN_100) || (lp->lan_type == HP100_LAN_ERR))
-               hp100_login_to_vg_hub(dev, 0);  /* relogin */
-
-}
-
-
-/*
- * mmuinit - Reinitialise Cascade MMU and MAC settings.
- * Note: Must already be in reset and leaves card in reset.
- */
-static void hp100_mmuinit(struct net_device *dev)
-{
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-       int i;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4203, TRACE);
-       printk("hp100: %s: mmuinit\n", dev->name);
-#endif
-
-#ifdef HP100_DEBUG
-       if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) {
-               printk("hp100: %s: Not in reset when entering mmuinit. Fix me.\n", dev->name);
-               return;
-       }
-#endif
-
-       /* Make sure IRQs are masked off and ack'ed. */
-       hp100_page(PERFORMANCE);
-       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
-       hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
-
-       /*
-        * Enable Hardware
-        * - Clear Debug En, Rx Hdr Pipe, EE En, I/O En, Fake Int and Intr En
-        * - Set Tri-State Int, Bus Master Rd/Wr, and Mem Map Disable
-        * - Clear Priority, Advance Pkt and Xmit Cmd
-        */
-
-       hp100_outw(HP100_DEBUG_EN |
-                  HP100_RX_HDR |
-                  HP100_EE_EN | HP100_RESET_HB |
-                  HP100_IO_EN |
-                  HP100_FAKE_INT |
-                  HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
-
-       hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
-
-       if (lp->mode == 1) {    /* busmaster */
-               hp100_outw(HP100_BM_WRITE |
-                          HP100_BM_READ |
-                          HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW);
-       } else if (lp->mode == 2) {     /* memory mapped */
-               hp100_outw(HP100_BM_WRITE |
-                          HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
-               hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW);
-               hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
-               hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
-       } else if (lp->mode == 3) {     /* i/o mapped mode */
-               hp100_outw(HP100_MMAP_DIS | HP100_SET_HB |
-                          HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
-       }
-
-       hp100_page(HW_MAP);
-       hp100_outb(0, EARLYRXCFG);
-       hp100_outw(0, EARLYTXCFG);
-
-       /*
-        * Enable Bus Master mode
-        */
-       if (lp->mode == 1) {    /* busmaster */
-               /* Experimental: Set some PCI configuration bits */
-               hp100_page(HW_MAP);
-               hp100_andb(~HP100_PDL_USE3, MODECTRL1); /* BM engine read maximum */
-               hp100_andb(~HP100_TX_DUALQ, MODECTRL1); /* No Queue for Priority TX */
-
-               /* PCI Bus failures should result in a Misc. Interrupt */
-               hp100_orb(HP100_EN_BUS_FAIL, MODECTRL2);
-
-               hp100_outw(HP100_BM_READ | HP100_BM_WRITE | HP100_SET_HB, OPTION_LSW);
-               hp100_page(HW_MAP);
-               /* Use Burst Mode and switch on PAGE_CK */
-               hp100_orb(HP100_BM_BURST_RD | HP100_BM_BURST_WR, BM);
-               if ((lp->chip == HP100_CHIPID_RAINIER) || (lp->chip == HP100_CHIPID_SHASTA))
-                       hp100_orb(HP100_BM_PAGE_CK, BM);
-               hp100_orb(HP100_BM_MASTER, BM);
-       } else {                /* not busmaster */
-
-               hp100_page(HW_MAP);
-               hp100_andb(~HP100_BM_MASTER, BM);
-       }
-
-       /*
-        * Divide card memory into regions for Rx, Tx and, if non-ETR chip, PDLs
-        */
-       hp100_page(MMU_CFG);
-       if (lp->mode == 1) {    /* only needed for Busmaster */
-               int xmit_stop, recv_stop;
-
-               if ((lp->chip == HP100_CHIPID_RAINIER) ||
-                   (lp->chip == HP100_CHIPID_SHASTA)) {
-                       int pdl_stop;
-
-                       /*
-                        * Each pdl is 508 bytes long. (63 frags * 4 bytes for address and
-                        * 4 bytes for header). We will leave NUM_RXPDLS * 508 (rounded
-                        * to the next higher 1k boundary) bytes for the rx-pdl's
-                        * Note: For non-etr chips the transmit stop register must be
-                        * programmed on a 1k boundary, i.e. bits 9:0 must be zero.
-                        */
-                       pdl_stop = lp->memory_size;
-                       xmit_stop = (pdl_stop - 508 * (MAX_RX_PDL) - 16) & ~(0x03ff);
-                       recv_stop = (xmit_stop * (lp->rx_ratio) / 100) & ~(0x03ff);
-                       hp100_outw((pdl_stop >> 4) - 1, PDL_MEM_STOP);
-#ifdef HP100_DEBUG_BM
-                       printk("hp100: %s: PDL_STOP = 0x%x\n", dev->name, pdl_stop);
-#endif
-               } else {
-                       /* ETR chip (Lassen) in busmaster mode */
-                       xmit_stop = (lp->memory_size) - 1;
-                       recv_stop = ((lp->memory_size * lp->rx_ratio) / 100) & ~(0x03ff);
-               }
-
-               hp100_outw(xmit_stop >> 4, TX_MEM_STOP);
-               hp100_outw(recv_stop >> 4, RX_MEM_STOP);
-#ifdef HP100_DEBUG_BM
-               printk("hp100: %s: TX_STOP  = 0x%x\n", dev->name, xmit_stop >> 4);
-               printk("hp100: %s: RX_STOP  = 0x%x\n", dev->name, recv_stop >> 4);
-#endif
-       } else {
-               /* Slave modes (memory mapped and programmed io)  */
-               hp100_outw((((lp->memory_size * lp->rx_ratio) / 100) >> 4), RX_MEM_STOP);
-               hp100_outw(((lp->memory_size - 1) >> 4), TX_MEM_STOP);
-#ifdef HP100_DEBUG
-               printk("hp100: %s: TX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(TX_MEM_STOP));
-               printk("hp100: %s: RX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(RX_MEM_STOP));
-#endif
-       }
-
-       /* Write MAC address into page 1 */
-       hp100_page(MAC_ADDRESS);
-       for (i = 0; i < 6; i++)
-               hp100_outb(dev->dev_addr[i], MAC_ADDR + i);
-
-       /* Zero the multicast hash registers */
-       for (i = 0; i < 8; i++)
-               hp100_outb(0x0, HASH_BYTE0 + i);
-
-       /* Set up MAC defaults */
-       hp100_page(MAC_CTRL);
-
-       /* Go to LAN Page and zero all filter bits */
-       /* Zero accept error, accept multicast, accept broadcast and accept */
-       /* all directed packet bits */
-       hp100_andb(~(HP100_RX_EN |
-                    HP100_TX_EN |
-                    HP100_ACC_ERRORED |
-                    HP100_ACC_MC |
-                    HP100_ACC_BC | HP100_ACC_PHY), MAC_CFG_1);
-
-       hp100_outb(0x00, MAC_CFG_2);
-
-       /* Zero the frame format bit. This works around a training bug in the */
-       /* new hubs. */
-       hp100_outb(0x00, VG_LAN_CFG_2); /* (use 802.3) */
-
-       if (lp->priority_tx)
-               hp100_outb(HP100_PRIORITY_TX | HP100_SET_LB, OPTION_MSW);
-       else
-               hp100_outb(HP100_PRIORITY_TX | HP100_RESET_LB, OPTION_MSW);
-
-       hp100_outb(HP100_ADV_NXT_PKT |
-                  HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW);
-
-       /* If busmaster, initialize the PDLs */
-       if (lp->mode == 1)
-               hp100_init_pdls(dev);
-
-       /* Go to performance page and initialize isr and imr registers */
-       hp100_page(PERFORMANCE);
-       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
-       hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
-}
-
-/*
- *  open/close functions
- */
-
-static int hp100_open(struct net_device *dev)
-{
-       struct hp100_private *lp = netdev_priv(dev);
-#ifdef HP100_DEBUG_B
-       int ioaddr = dev->base_addr;
-#endif
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4204, TRACE);
-       printk("hp100: %s: open\n", dev->name);
-#endif
-
-       /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
-       if (request_irq(dev->irq, hp100_interrupt,
-                       lp->bus == HP100_BUS_PCI || lp->bus ==
-                       HP100_BUS_EISA ? IRQF_SHARED : 0,
-                       dev->name, dev)) {
-               printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
-               return -EAGAIN;
-       }
-
-       netif_trans_update(dev); /* prevent tx timeout */
-       netif_start_queue(dev);
-
-       lp->lan_type = hp100_sense_lan(dev);
-       lp->mac1_mode = HP100_MAC1MODE3;
-       lp->mac2_mode = HP100_MAC2MODE3;
-       memset(&lp->hash_bytes, 0x00, 8);
-
-       hp100_stop_interface(dev);
-
-       hp100_hwinit(dev);
-
-       hp100_start_interface(dev);     /* sets mac modes, enables interrupts */
-
-       return 0;
-}
-
-/* The close function is called when the interface is to be brought down */
-static int hp100_close(struct net_device *dev)
-{
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4205, TRACE);
-       printk("hp100: %s: close\n", dev->name);
-#endif
-
-       hp100_page(PERFORMANCE);
-       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all IRQs */
-
-       hp100_stop_interface(dev);
-
-       if (lp->lan_type == HP100_LAN_100)
-               lp->hub_status = hp100_login_to_vg_hub(dev, 0);
-
-       netif_stop_queue(dev);
-
-       free_irq(dev->irq, dev);
-
-#ifdef HP100_DEBUG
-       printk("hp100: %s: close LSW = 0x%x\n", dev->name,
-              hp100_inw(OPTION_LSW));
-#endif
-
-       return 0;
-}
-
-
-/*
- * Configure the PDL Rx rings and LAN
- */
-static void hp100_init_pdls(struct net_device *dev)
-{
-       struct hp100_private *lp = netdev_priv(dev);
-       hp100_ring_t *ringptr;
-       u_int *pageptr;         /* Warning : increment by 4 - Jean II */
-       int i;
-
-#ifdef HP100_DEBUG_B
-       int ioaddr = dev->base_addr;
-#endif
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4206, TRACE);
-       printk("hp100: %s: init pdls\n", dev->name);
-#endif
-
-       if (!lp->page_vaddr_algn)
-               printk("hp100: %s: Warning: lp->page_vaddr_algn not initialised!\n", dev->name);
-       else {
-               /* pageptr shall point into the DMA accessible memory region  */
-               /* we use this pointer to status the upper limit of allocated */
-               /* memory in the allocated page. */
-               /* note: align the pointers to the pci cache line size */
-               memset(lp->page_vaddr_algn, 0, MAX_RINGSIZE);   /* Zero  Rx/Tx ring page */
-               pageptr = lp->page_vaddr_algn;
-
-               lp->rxrcommit = 0;
-               ringptr = lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
-
-               /* Initialise Rx Ring */
-               for (i = MAX_RX_PDL - 1; i >= 0; i--) {
-                       lp->rxring[i].next = ringptr;
-                       ringptr = &(lp->rxring[i]);
-                       pageptr += hp100_init_rxpdl(dev, ringptr, pageptr);
-               }
-
-               /* Initialise Tx Ring */
-               lp->txrcommit = 0;
-               ringptr = lp->txrhead = lp->txrtail = &(lp->txring[0]);
-               for (i = MAX_TX_PDL - 1; i >= 0; i--) {
-                       lp->txring[i].next = ringptr;
-                       ringptr = &(lp->txring[i]);
-                       pageptr += hp100_init_txpdl(dev, ringptr, pageptr);
-               }
-       }
-}
-
-
-/* These functions "format" the entries in the pdl structure   */
-/* They return how much memory the fragments need.            */
-static int hp100_init_rxpdl(struct net_device *dev,
-                           register hp100_ring_t * ringptr,
-                           register u32 * pdlptr)
-{
-       /* pdlptr is starting address for this pdl */
-
-       if (0 != (((unsigned long) pdlptr) & 0xf))
-               printk("hp100: %s: Init rxpdl: Unaligned pdlptr 0x%lx.\n",
-                      dev->name, (unsigned long) pdlptr);
-
-       ringptr->pdl = pdlptr + 1;
-       ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
-       ringptr->skb = NULL;
-
-       /*
-        * Write address and length of first PDL Fragment (which is used for
-        * storing the RX-Header
-        * We use the 4 bytes _before_ the PDH in the pdl memory area to
-        * store this information. (PDH is at offset 0x04)
-        */
-       /* Note that pdlptr+1 and not pdlptr is the pointer to the PDH */
-
-       *(pdlptr + 2) = (u_int) virt_to_whatever(dev, pdlptr);  /* Address Frag 1 */
-       *(pdlptr + 3) = 4;      /* Length  Frag 1 */
-
-       return roundup(MAX_RX_FRAG * 2 + 2, 4);
-}
-
-
-static int hp100_init_txpdl(struct net_device *dev,
-                           register hp100_ring_t * ringptr,
-                           register u32 * pdlptr)
-{
-       if (0 != (((unsigned long) pdlptr) & 0xf))
-               printk("hp100: %s: Init txpdl: Unaligned pdlptr 0x%lx.\n", dev->name, (unsigned long) pdlptr);
-
-       ringptr->pdl = pdlptr;  /* +1; */
-       ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr);     /* +1 */
-       ringptr->skb = NULL;
-
-       return roundup(MAX_TX_FRAG * 2 + 2, 4);
-}
-
-/*
- * hp100_build_rx_pdl allocates an skb_buff of maximum size plus two bytes
- * for possible odd word alignment rounding up to next dword and set PDL
- * address for fragment#2
- * Returns: 0 if unable to allocate skb_buff
- *          1 if successful
- */
-static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
-                             struct net_device *dev)
-{
-#ifdef HP100_DEBUG_B
-       int ioaddr = dev->base_addr;
-#endif
-#ifdef HP100_DEBUG_BM
-       u_int *p;
-#endif
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4207, TRACE);
-       printk("hp100: %s: build rx pdl\n", dev->name);
-#endif
-
-       /* Allocate skb buffer of maximum size */
-       /* Note: This depends on the alloc_skb functions allocating more
-        * space than requested, i.e. aligning to 16bytes */
-
-       ringptr->skb = netdev_alloc_skb(dev, roundup(MAX_ETHER_SIZE + 2, 4));
-
-       if (NULL != ringptr->skb) {
-               /*
-                * Reserve 2 bytes at the head of the buffer to land the IP header
-                * on a long word boundary (According to the Network Driver section
-                * in the Linux KHG, this should help to increase performance.)
-                */
-               skb_reserve(ringptr->skb, 2);
-
-               ringptr->skb->data = skb_put(ringptr->skb, MAX_ETHER_SIZE);
-
-               /* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
-               /* Note: 1st Fragment is used for the 4 byte packet status
-                * (receive header). Its PDL entries are set up by init_rxpdl. So
-                * here we only have to set up the PDL fragment entries for the data
-                * part. Those 4 bytes will be stored in the DMA memory region
-                * directly before the PDL.
-                */
-#ifdef HP100_DEBUG_BM
-               printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n",
-                                    dev->name, (u_int) ringptr->pdl,
-                                    roundup(MAX_ETHER_SIZE + 2, 4),
-                                    (unsigned int) ringptr->skb->data);
-#endif
-
-               /* Conversion to new PCI API : map skbuf data to PCI bus.
-                * Doc says it's OK for EISA as well - Jean II */
-               ringptr->pdl[0] = 0x00020000;   /* Write PDH */
-               ringptr->pdl[3] = pdl_map_data(netdev_priv(dev),
-                                              ringptr->skb->data);
-               ringptr->pdl[4] = MAX_ETHER_SIZE;       /* Length of Data */
-
-#ifdef HP100_DEBUG_BM
-               for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++)
-                       printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p);
-#endif
-               return 1;
-       }
-       /* else: */
-       /* alloc_skb failed (no memory) -> still can receive the header
-        * fragment into PDL memory. make PDL safe by clearing msgptr and
-        * making the PDL only 1 fragment (i.e. the 4 byte packet status)
-        */
-#ifdef HP100_DEBUG_BM
-       printk("hp100: %s: build_rx_pdl: PDH@0x%x, No space for skb.\n", dev->name, (u_int) ringptr->pdl);
-#endif
-
-       ringptr->pdl[0] = 0x00010000;   /* PDH: Count=1 Fragment */
-
-       return 0;
-}
-
-/*
- *  hp100_rxfill - attempt to fill the Rx Ring will empty skb's
- *
- * Makes assumption that skb's are always contiguous memory areas and
- * therefore PDLs contain only 2 physical fragments.
- * -  While the number of Rx PDLs with buffers is less than maximum
- *      a.  Get a maximum packet size skb
- *      b.  Put the physical address of the buffer into the PDL.
- *      c.  Output physical address of PDL to adapter.
- */
-static void hp100_rxfill(struct net_device *dev)
-{
-       int ioaddr = dev->base_addr;
-
-       struct hp100_private *lp = netdev_priv(dev);
-       hp100_ring_t *ringptr;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4208, TRACE);
-       printk("hp100: %s: rxfill\n", dev->name);
-#endif
-
-       hp100_page(PERFORMANCE);
-
-       while (lp->rxrcommit < MAX_RX_PDL) {
-               /*
-                  ** Attempt to get a buffer and build a Rx PDL.
-                */
-               ringptr = lp->rxrtail;
-               if (0 == hp100_build_rx_pdl(ringptr, dev)) {
-                       return; /* None available, return */
-               }
-
-               /* Hand this PDL over to the card */
-               /* Note: This needs performance page selected! */
-#ifdef HP100_DEBUG_BM
-               printk("hp100: %s: rxfill: Hand to card: pdl #%d @0x%x phys:0x%x, buffer: 0x%x\n",
-                                    dev->name, lp->rxrcommit, (u_int) ringptr->pdl,
-                                    (u_int) ringptr->pdl_paddr, (u_int) ringptr->pdl[3]);
-#endif
-
-               hp100_outl((u32) ringptr->pdl_paddr, RX_PDA);
-
-               lp->rxrcommit += 1;
-               lp->rxrtail = ringptr->next;
-       }
-}
-
-/*
- * BM_shutdown - shutdown bus mastering and leave chip in reset state
- */
-
-static void hp100_BM_shutdown(struct net_device *dev)
-{
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-       unsigned long time;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4209, TRACE);
-       printk("hp100: %s: bm shutdown\n", dev->name);
-#endif
-
-       hp100_page(PERFORMANCE);
-       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
-       hp100_outw(0xffff, IRQ_STATUS); /* Ack all ints */
-
-       /* Ensure Interrupts are off */
-       hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
-
-       /* Disable all MAC activity */
-       hp100_page(MAC_CTRL);
-       hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);    /* stop rx/tx */
-
-       /* If cascade MMU is not already in reset */
-       if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) {
-               /* Wait 1.3ms (10Mb max packet time) to ensure MAC is idle so
-                * MMU pointers will not be reset out from underneath
-                */
-               hp100_page(MAC_CTRL);
-               for (time = 0; time < 5000; time++) {
-                       if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE))
-                               break;
-               }
-
-               /* Shutdown algorithm depends on the generation of Cascade */
-               if (lp->chip == HP100_CHIPID_LASSEN) {  /* ETR shutdown/reset */
-                       /* Disable Busmaster mode and wait for bit to go to zero. */
-                       hp100_page(HW_MAP);
-                       hp100_andb(~HP100_BM_MASTER, BM);
-                       /* 100 ms timeout */
-                       for (time = 0; time < 32000; time++) {
-                               if (0 == (hp100_inb(BM) & HP100_BM_MASTER))
-                                       break;
-                       }
-               } else {        /* Shasta or Rainier Shutdown/Reset */
-                       /* To ensure all bus master inloading activity has ceased,
-                        * wait for no Rx PDAs or no Rx packets on card.
-                        */
-                       hp100_page(PERFORMANCE);
-                       /* 100 ms timeout */
-                       for (time = 0; time < 10000; time++) {
-                               /* RX_PDL: PDLs not executed. */
-                               /* RX_PKT_CNT: RX'd packets on card. */
-                               if ((hp100_inb(RX_PDL) == 0) && (hp100_inb(RX_PKT_CNT) == 0))
-                                       break;
-                       }
-
-                       if (time >= 10000)
-                               printk("hp100: %s: BM shutdown error.\n", dev->name);
-
-                       /* To ensure all bus master outloading activity has ceased,
-                        * wait until the Tx PDA count goes to zero or no more Tx space
-                        * available in the Tx region of the card.
-                        */
-                       /* 100 ms timeout */
-                       for (time = 0; time < 10000; time++) {
-                               if ((0 == hp100_inb(TX_PKT_CNT)) &&
-                                   (0 != (hp100_inb(TX_MEM_FREE) & HP100_AUTO_COMPARE)))
-                                       break;
-                       }
-
-                       /* Disable Busmaster mode */
-                       hp100_page(HW_MAP);
-                       hp100_andb(~HP100_BM_MASTER, BM);
-               }       /* end of shutdown procedure for non-etr parts */
-
-               hp100_cascade_reset(dev, 1);
-       }
-       hp100_page(PERFORMANCE);
-       /* hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_RESET_HB, OPTION_LSW ); */
-       /* Busmaster mode should be shut down now. */
-}
-
-static int hp100_check_lan(struct net_device *dev)
-{
-       struct hp100_private *lp = netdev_priv(dev);
-
-       if (lp->lan_type < 0) { /* no LAN type detected yet? */
-               hp100_stop_interface(dev);
-               if ((lp->lan_type = hp100_sense_lan(dev)) < 0) {
-                       printk("hp100: %s: no connection found - check wire\n", dev->name);
-                       hp100_start_interface(dev);     /* 10Mb/s RX packets maybe handled */
-                       return -EIO;
-               }
-               if (lp->lan_type == HP100_LAN_100)
-                       lp->hub_status = hp100_login_to_vg_hub(dev, 0); /* relogin */
-               hp100_start_interface(dev);
-       }
-       return 0;
-}
-
-/*
- *  transmit functions
- */
-
-/* tx function for busmaster mode */
-static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
-                                      struct net_device *dev)
-{
-       unsigned long flags;
-       int i, ok_flag;
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-       hp100_ring_t *ringptr;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4210, TRACE);
-       printk("hp100: %s: start_xmit_bm\n", dev->name);
-#endif
-       if (skb->len <= 0)
-               goto drop;
-
-       if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN))
-               return NETDEV_TX_OK;
-
-       /* Get Tx ring tail pointer */
-       if (lp->txrtail->next == lp->txrhead) {
-               /* No memory. */
-#ifdef HP100_DEBUG
-               printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
-#endif
-               /* not waited long enough since last tx? */
-               if (time_before(jiffies, dev_trans_start(dev) + HZ))
-                       goto drop;
-
-               if (hp100_check_lan(dev))
-                       goto drop;
-
-               if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
-                       /* we have a 100Mb/s adapter but it isn't connected to hub */
-                       printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name);
-                       hp100_stop_interface(dev);
-                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
-                       hp100_start_interface(dev);
-               } else {
-                       spin_lock_irqsave(&lp->lock, flags);
-                       hp100_ints_off();       /* Useful ? Jean II */
-                       i = hp100_sense_lan(dev);
-                       hp100_ints_on();
-                       spin_unlock_irqrestore(&lp->lock, flags);
-                       if (i == HP100_LAN_ERR)
-                               printk("hp100: %s: link down detected\n", dev->name);
-                       else if (lp->lan_type != i) {   /* cable change! */
-                               /* it's very hard - all network settings must be changed!!! */
-                               printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name);
-                               lp->lan_type = i;
-                               hp100_stop_interface(dev);
-                               if (lp->lan_type == HP100_LAN_100)
-                                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
-                               hp100_start_interface(dev);
-                       } else {
-                               printk("hp100: %s: interface reset\n", dev->name);
-                               hp100_stop_interface(dev);
-                               if (lp->lan_type == HP100_LAN_100)
-                                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
-                               hp100_start_interface(dev);
-                       }
-               }
-
-               goto drop;
-       }
-
-       /*
-        * we have to turn int's off before modifying this, otherwise
-        * a tx_pdl_cleanup could occur at the same time
-        */
-       spin_lock_irqsave(&lp->lock, flags);
-       ringptr = lp->txrtail;
-       lp->txrtail = ringptr->next;
-
-       /* Check whether packet has minimal packet size */
-       ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
-       i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
-
-       ringptr->skb = skb;
-       ringptr->pdl[0] = ((1 << 16) | i);      /* PDH: 1 Fragment & length */
-       if (lp->chip == HP100_CHIPID_SHASTA) {
-               /* TODO:Could someone who has the EISA card please check if this works? */
-               ringptr->pdl[2] = i;
-       } else {                /* Lassen */
-               /* In the PDL, don't use the padded size but the real packet size: */
-               ringptr->pdl[2] = skb->len;     /* 1st Frag: Length of frag */
-       }
-       /* Conversion to new PCI API : map skbuf data to PCI bus.
-        * Doc says it's OK for EISA as well - Jean II */
-       ringptr->pdl[1] = ((u32) pci_map_single(lp->pci_dev, skb->data, ringptr->pdl[2], PCI_DMA_TODEVICE));    /* 1st Frag: Adr. of data */
-
-       /* Hand this PDL to the card. */
-       hp100_outl(ringptr->pdl_paddr, TX_PDA_L);       /* Low Prio. Queue */
-
-       lp->txrcommit++;
-
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       return NETDEV_TX_OK;
-
-drop:
-       dev_kfree_skb(skb);
-       return NETDEV_TX_OK;
-}
-
-
-/* clean_txring checks if packets have been sent by the card by reading
- * the TX_PDL register from the performance page and comparing it to the
- * number of committed packets. It then frees the skb's of the packets that
- * obviously have been sent to the network.
- *
- * Needs the PERFORMANCE page selected.
- */
-static void hp100_clean_txring(struct net_device *dev)
-{
-       struct hp100_private *lp = netdev_priv(dev);
-       int ioaddr = dev->base_addr;
-       int donecount;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4211, TRACE);
-       printk("hp100: %s: clean txring\n", dev->name);
-#endif
-
-       /* How many PDLs have been transmitted? */
-       donecount = (lp->txrcommit) - hp100_inb(TX_PDL);
-
-#ifdef HP100_DEBUG
-       if (donecount > MAX_TX_PDL)
-               printk("hp100: %s: Warning: More PDLs transmitted than committed to card???\n", dev->name);
-#endif
-
-       for (; 0 != donecount; donecount--) {
-#ifdef HP100_DEBUG_BM
-               printk("hp100: %s: Free skb: data @0x%.8x txrcommit=0x%x TXPDL=0x%x, done=0x%x\n",
-                               dev->name, (u_int) lp->txrhead->skb->data,
-                               lp->txrcommit, hp100_inb(TX_PDL), donecount);
-#endif
-               /* Conversion to new PCI API : NOP */
-               pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
-               dev_consume_skb_any(lp->txrhead->skb);
-               lp->txrhead->skb = NULL;
-               lp->txrhead = lp->txrhead->next;
-               lp->txrcommit--;
-       }
-}
-
-/* tx function for slave modes */
-static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
-                                   struct net_device *dev)
-{
-       unsigned long flags;
-       int i, ok_flag;
-       int ioaddr = dev->base_addr;
-       u_short val;
-       struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4212, TRACE);
-       printk("hp100: %s: start_xmit\n", dev->name);
-#endif
-       if (skb->len <= 0)
-               goto drop;
-
-       if (hp100_check_lan(dev))
-               goto drop;
-
-       /* If there is not enough free memory on the card... */
-       i = hp100_inl(TX_MEM_FREE) & 0x7fffffff;
-       if (!(((i / 2) - 539) > (skb->len + 16) && (hp100_inb(TX_PKT_CNT) < 255))) {
-#ifdef HP100_DEBUG
-               printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
-#endif
-               /* not waited long enough since last failed tx try? */
-               if (time_before(jiffies, dev_trans_start(dev) + HZ)) {
-#ifdef HP100_DEBUG
-                       printk("hp100: %s: trans_start timing problem\n",
-                              dev->name);
-#endif
-                       goto drop;
-               }
-               if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
-                       /* we have a 100Mb/s adapter but it isn't connected to hub */
-                       printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name);
-                       hp100_stop_interface(dev);
-                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
-                       hp100_start_interface(dev);
-               } else {
-                       spin_lock_irqsave(&lp->lock, flags);
-                       hp100_ints_off();       /* Useful ? Jean II */
-                       i = hp100_sense_lan(dev);
-                       hp100_ints_on();
-                       spin_unlock_irqrestore(&lp->lock, flags);
-                       if (i == HP100_LAN_ERR)
-                               printk("hp100: %s: link down detected\n", dev->name);
-                       else if (lp->lan_type != i) {   /* cable change! */
-                               /* it's very hard - all network setting must be changed!!! */
-                               printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name);
-                               lp->lan_type = i;
-                               hp100_stop_interface(dev);
-                               if (lp->lan_type == HP100_LAN_100)
-                                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
-                               hp100_start_interface(dev);
-                       } else {
-                               printk("hp100: %s: interface reset\n", dev->name);
-                               hp100_stop_interface(dev);
-                               if (lp->lan_type == HP100_LAN_100)
-                                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
-                               hp100_start_interface(dev);
-                               mdelay(1);
-                       }
-               }
-               goto drop;
-       }
-
-       for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) {
-#ifdef HP100_DEBUG_TX
-               printk("hp100: %s: start_xmit: busy\n", dev->name);
-#endif
-       }
-
-       spin_lock_irqsave(&lp->lock, flags);
-       hp100_ints_off();
-       val = hp100_inw(IRQ_STATUS);
-       /* Ack / clear the interrupt TX_COMPLETE interrupt - this interrupt is set
-        * when the current packet being transmitted on the wire is completed. */
-       hp100_outw(HP100_TX_COMPLETE, IRQ_STATUS);
-#ifdef HP100_DEBUG_TX
-       printk("hp100: %s: start_xmit: irq_status=0x%.4x, irqmask=0x%.4x, len=%d\n",
-                       dev->name, val, hp100_inw(IRQ_MASK), (int) skb->len);
-#endif
-
-       ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
-       i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
-
-       hp100_outw(i, DATA32);  /* tell card the total packet length */
-       hp100_outw(i, FRAGMENT_LEN);    /* and first/only fragment length    */
-
-       if (lp->mode == 2) {    /* memory mapped */
-               /* Note: The J2585B needs alignment to 32bits here!  */
-               memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3);
-               if (!ok_flag)
-                       memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len);
-       } else {                /* programmed i/o */
-               outsl(ioaddr + HP100_REG_DATA32, skb->data,
-                     (skb->len + 3) >> 2);
-               if (!ok_flag)
-                       for (i = (skb->len + 3) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4)
-                               hp100_outl(0, DATA32);
-       }
-
-       hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW);    /* send packet */
-
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-       hp100_ints_on();
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       dev_consume_skb_any(skb);
-
-#ifdef HP100_DEBUG_TX
-       printk("hp100: %s: start_xmit: end\n", dev->name);
-#endif
-
-       return NETDEV_TX_OK;
-
-drop:
-       dev_kfree_skb(skb);
-       return NETDEV_TX_OK;
-
-}
-
-
-/*
- * Receive Function (Non-Busmaster mode)
- * Called when an "Receive Packet" interrupt occurs, i.e. the receive
- * packet counter is non-zero.
- * For non-busmaster, this function does the whole work of transferring
- * the packet to the host memory and then up to higher layers via skb
- * and netif_rx.
- */
-
-static void hp100_rx(struct net_device *dev)
-{
-       int packets, pkt_len;
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-       u_int header;
-       struct sk_buff *skb;
-
-#ifdef DEBUG_B
-       hp100_outw(0x4213, TRACE);
-       printk("hp100: %s: rx\n", dev->name);
-#endif
-
-       /* First get indication of received lan packet */
-       /* RX_PKT_CND indicates the number of packets which have been fully */
-       /* received onto the card but have not been fully transferred of the card */
-       packets = hp100_inb(RX_PKT_CNT);
-#ifdef HP100_DEBUG_RX
-       if (packets > 1)
-               printk("hp100: %s: rx: waiting packets = %d\n", dev->name, packets);
-#endif
-
-       while (packets-- > 0) {
-               /* If ADV_NXT_PKT is still set, we have to wait until the card has */
-               /* really advanced to the next packet. */
-               for (pkt_len = 0; pkt_len < 6000 && (hp100_inb(OPTION_MSW) & HP100_ADV_NXT_PKT); pkt_len++) {
-#ifdef HP100_DEBUG_RX
-                       printk ("hp100: %s: rx: busy, remaining packets = %d\n", dev->name, packets);
-#endif
-               }
-
-               /* First we get the header, which contains information about the */
-               /* actual length of the received packet. */
-               if (lp->mode == 2) {    /* memory mapped mode */
-                       header = readl(lp->mem_ptr_virt);
-               } else          /* programmed i/o */
-                       header = hp100_inl(DATA32);
-
-               pkt_len = ((header & HP100_PKT_LEN_MASK) + 3) & ~3;
-
-#ifdef HP100_DEBUG_RX
-               printk("hp100: %s: rx: new packet - length=%d, errors=0x%x, dest=0x%x\n",
-                                    dev->name, header & HP100_PKT_LEN_MASK,
-                                    (header >> 16) & 0xfff8, (header >> 16) & 7);
-#endif
-
-               /* Now we allocate the skb and transfer the data into it. */
-               skb = netdev_alloc_skb(dev, pkt_len + 2);
-               if (skb == NULL) {      /* Not enough memory->drop packet */
-#ifdef HP100_DEBUG
-                       printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n",
-                                            dev->name, pkt_len);
-#endif
-                       dev->stats.rx_dropped++;
-               } else {        /* skb successfully allocated */
-
-                       u_char *ptr;
-
-                       skb_reserve(skb,2);
-
-                       /* ptr to start of the sk_buff data area */
-                       skb_put(skb, pkt_len);
-                       ptr = skb->data;
-
-                       /* Now transfer the data from the card into that area */
-                       if (lp->mode == 2)
-                               memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len);
-                       else    /* io mapped */
-                               insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2);
-
-                       skb->protocol = eth_type_trans(skb, dev);
-
-#ifdef HP100_DEBUG_RX
-                       printk("hp100: %s: rx: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
-                                       dev->name, ptr[0], ptr[1], ptr[2], ptr[3],
-                                       ptr[4], ptr[5], ptr[6], ptr[7], ptr[8],
-                                       ptr[9], ptr[10], ptr[11]);
-#endif
-                       netif_rx(skb);
-                       dev->stats.rx_packets++;
-                       dev->stats.rx_bytes += pkt_len;
-               }
-
-               /* Indicate the card that we have got the packet */
-               hp100_outb(HP100_ADV_NXT_PKT | HP100_SET_LB, OPTION_MSW);
-
-               switch (header & 0x00070000) {
-               case (HP100_MULTI_ADDR_HASH << 16):
-               case (HP100_MULTI_ADDR_NO_HASH << 16):
-                       dev->stats.multicast++;
-                       break;
-               }
-       }                       /* end of while(there are packets) loop */
-#ifdef HP100_DEBUG_RX
-       printk("hp100_rx: %s: end\n", dev->name);
-#endif
-}
-
-/*
- * Receive Function for Busmaster Mode
- */
-static void hp100_rx_bm(struct net_device *dev)
-{
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-       hp100_ring_t *ptr;
-       u_int header;
-       int pkt_len;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4214, TRACE);
-       printk("hp100: %s: rx_bm\n", dev->name);
-#endif
-
-#ifdef HP100_DEBUG
-       if (0 == lp->rxrcommit) {
-               printk("hp100: %s: rx_bm called although no PDLs were committed to adapter?\n", dev->name);
-               return;
-       } else
-               /* RX_PKT_CNT states how many PDLs are currently formatted and available to
-                * the cards BM engine */
-       if ((hp100_inw(RX_PKT_CNT) & 0x00ff) >= lp->rxrcommit) {
-               printk("hp100: %s: More packets received than committed? RX_PKT_CNT=0x%x, commit=0x%x\n",
-                                    dev->name, hp100_inw(RX_PKT_CNT) & 0x00ff,
-                                    lp->rxrcommit);
-               return;
-       }
-#endif
-
-       while ((lp->rxrcommit > hp100_inb(RX_PDL))) {
-               /*
-                * The packet was received into the pdl pointed to by lp->rxrhead (
-                * the oldest pdl in the ring
-                */
-
-               /* First we get the header, which contains information about the */
-               /* actual length of the received packet. */
-
-               ptr = lp->rxrhead;
-
-               header = *(ptr->pdl - 1);
-               pkt_len = (header & HP100_PKT_LEN_MASK);
-
-               /* Conversion to new PCI API : NOP */
-               pci_unmap_single(lp->pci_dev, (dma_addr_t) ptr->pdl[3], MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE);
-
-#ifdef HP100_DEBUG_BM
-               printk("hp100: %s: rx_bm: header@0x%x=0x%x length=%d, errors=0x%x, dest=0x%x\n",
-                               dev->name, (u_int) (ptr->pdl - 1), (u_int) header,
-                               pkt_len, (header >> 16) & 0xfff8, (header >> 16) & 7);
-               printk("hp100: %s: RX_PDL_COUNT:0x%x TX_PDL_COUNT:0x%x, RX_PKT_CNT=0x%x PDH=0x%x, Data@0x%x len=0x%x\n",
-                               dev->name, hp100_inb(RX_PDL), hp100_inb(TX_PDL),
-                               hp100_inb(RX_PKT_CNT), (u_int) * (ptr->pdl),
-                               (u_int) * (ptr->pdl + 3), (u_int) * (ptr->pdl + 4));
-#endif
-
-               if ((pkt_len >= MIN_ETHER_SIZE) &&
-                   (pkt_len <= MAX_ETHER_SIZE)) {
-                       if (ptr->skb == NULL) {
-                               printk("hp100: %s: rx_bm: skb null\n", dev->name);
-                               /* can happen if we only allocated room for the pdh due to memory shortage. */
-                               dev->stats.rx_dropped++;
-                       } else {
-                               skb_trim(ptr->skb, pkt_len);    /* Shorten it */
-                               ptr->skb->protocol =
-                                   eth_type_trans(ptr->skb, dev);
-
-                               netif_rx(ptr->skb);     /* Up and away... */
-
-                               dev->stats.rx_packets++;
-                               dev->stats.rx_bytes += pkt_len;
-                       }
-
-                       switch (header & 0x00070000) {
-                       case (HP100_MULTI_ADDR_HASH << 16):
-                       case (HP100_MULTI_ADDR_NO_HASH << 16):
-                               dev->stats.multicast++;
-                               break;
-                       }
-               } else {
-#ifdef HP100_DEBUG
-                       printk("hp100: %s: rx_bm: Received bad packet (length=%d)\n", dev->name, pkt_len);
-#endif
-                       if (ptr->skb != NULL)
-                               dev_kfree_skb_any(ptr->skb);
-                       dev->stats.rx_errors++;
-               }
-
-               lp->rxrhead = lp->rxrhead->next;
-
-               /* Allocate a new rx PDL (so lp->rxrcommit stays the same) */
-               if (0 == hp100_build_rx_pdl(lp->rxrtail, dev)) {
-                       /* No space for skb, header can still be received. */
-#ifdef HP100_DEBUG
-                       printk("hp100: %s: rx_bm: No space for new PDL.\n", dev->name);
-#endif
-                       return;
-               } else {        /* successfully allocated new PDL - put it in ringlist at tail. */
-                       hp100_outl((u32) lp->rxrtail->pdl_paddr, RX_PDA);
-                       lp->rxrtail = lp->rxrtail->next;
-               }
-
-       }
-}
-
-/*
- *  statistics
- */
-static struct net_device_stats *hp100_get_stats(struct net_device *dev)
-{
-       unsigned long flags;
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4215, TRACE);
-#endif
-
-       spin_lock_irqsave(&lp->lock, flags);
-       hp100_ints_off();       /* Useful ? Jean II */
-       hp100_update_stats(dev);
-       hp100_ints_on();
-       spin_unlock_irqrestore(&lp->lock, flags);
-       return &(dev->stats);
-}
-
-static void hp100_update_stats(struct net_device *dev)
-{
-       int ioaddr = dev->base_addr;
-       u_short val;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4216, TRACE);
-       printk("hp100: %s: update-stats\n", dev->name);
-#endif
-
-       /* Note: Statistics counters clear when read. */
-       hp100_page(MAC_CTRL);
-       val = hp100_inw(DROPPED) & 0x0fff;
-       dev->stats.rx_errors += val;
-       dev->stats.rx_over_errors += val;
-       val = hp100_inb(CRC);
-       dev->stats.rx_errors += val;
-       dev->stats.rx_crc_errors += val;
-       val = hp100_inb(ABORT);
-       dev->stats.tx_errors += val;
-       dev->stats.tx_aborted_errors += val;
-       hp100_page(PERFORMANCE);
-}
-
-static void hp100_misc_interrupt(struct net_device *dev)
-{
-#ifdef HP100_DEBUG_B
-       int ioaddr = dev->base_addr;
-#endif
-
-#ifdef HP100_DEBUG_B
-       int ioaddr = dev->base_addr;
-       hp100_outw(0x4216, TRACE);
-       printk("hp100: %s: misc_interrupt\n", dev->name);
-#endif
-
-       /* Note: Statistics counters clear when read. */
-       dev->stats.rx_errors++;
-       dev->stats.tx_errors++;
-}
-
-static void hp100_clear_stats(struct hp100_private *lp, int ioaddr)
-{
-       unsigned long flags;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4217, TRACE);
-       printk("hp100: %s: clear_stats\n", dev->name);
-#endif
-
-       spin_lock_irqsave(&lp->lock, flags);
-       hp100_page(MAC_CTRL);   /* get all statistics bytes */
-       hp100_inw(DROPPED);
-       hp100_inb(CRC);
-       hp100_inb(ABORT);
-       hp100_page(PERFORMANCE);
-       spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-
-/*
- *  multicast setup
- */
-
-/*
- *  Set or clear the multicast filter for this adapter.
- */
-
-static void hp100_set_multicast_list(struct net_device *dev)
-{
-       unsigned long flags;
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4218, TRACE);
-       printk("hp100: %s: set_mc_list\n", dev->name);
-#endif
-
-       spin_lock_irqsave(&lp->lock, flags);
-       hp100_ints_off();
-       hp100_page(MAC_CTRL);
-       hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);    /* stop rx/tx */
-
-       if (dev->flags & IFF_PROMISC) {
-               lp->mac2_mode = HP100_MAC2MODE6;        /* promiscuous mode = get all good */
-               lp->mac1_mode = HP100_MAC1MODE6;        /* packets on the net */
-               memset(&lp->hash_bytes, 0xff, 8);
-       } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
-               lp->mac2_mode = HP100_MAC2MODE5;        /* multicast mode = get packets for */
-               lp->mac1_mode = HP100_MAC1MODE5;        /* me, broadcasts and all multicasts */
-#ifdef HP100_MULTICAST_FILTER  /* doesn't work!!! */
-               if (dev->flags & IFF_ALLMULTI) {
-                       /* set hash filter to receive all multicast packets */
-                       memset(&lp->hash_bytes, 0xff, 8);
-               } else {
-                       int i, idx;
-                       u_char *addrs;
-                       struct netdev_hw_addr *ha;
-
-                       memset(&lp->hash_bytes, 0x00, 8);
-#ifdef HP100_DEBUG
-                       printk("hp100: %s: computing hash filter - mc_count = %i\n",
-                              dev->name, netdev_mc_count(dev));
-#endif
-                       netdev_for_each_mc_addr(ha, dev) {
-                               addrs = ha->addr;
-#ifdef HP100_DEBUG
-                               printk("hp100: %s: multicast = %pM, ",
-                                            dev->name, addrs);
-#endif
-                               for (i = idx = 0; i < 6; i++) {
-                                       idx ^= *addrs++ & 0x3f;
-                                       printk(":%02x:", idx);
-                               }
-#ifdef HP100_DEBUG
-                               printk("idx = %i\n", idx);
-#endif
-                               lp->hash_bytes[idx >> 3] |= (1 << (idx & 7));
-                       }
-               }
-#else
-               memset(&lp->hash_bytes, 0xff, 8);
-#endif
-       } else {
-               lp->mac2_mode = HP100_MAC2MODE3;        /* normal mode = get packets for me */
-               lp->mac1_mode = HP100_MAC1MODE3;        /* and broadcasts */
-               memset(&lp->hash_bytes, 0x00, 8);
-       }
-
-       if (((hp100_inb(MAC_CFG_1) & 0x0f) != lp->mac1_mode) ||
-           (hp100_inb(MAC_CFG_2) != lp->mac2_mode)) {
-               int i;
-
-               hp100_outb(lp->mac2_mode, MAC_CFG_2);
-               hp100_andb(HP100_MAC1MODEMASK, MAC_CFG_1);      /* clear mac1 mode bits */
-               hp100_orb(lp->mac1_mode, MAC_CFG_1);    /* and set the new mode */
-
-               hp100_page(MAC_ADDRESS);
-               for (i = 0; i < 8; i++)
-                       hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i);
-#ifdef HP100_DEBUG
-               printk("hp100: %s: mac1 = 0x%x, mac2 = 0x%x, multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
-                                    dev->name, lp->mac1_mode, lp->mac2_mode,
-                                    lp->hash_bytes[0], lp->hash_bytes[1],
-                                    lp->hash_bytes[2], lp->hash_bytes[3],
-                                    lp->hash_bytes[4], lp->hash_bytes[5],
-                                    lp->hash_bytes[6], lp->hash_bytes[7]);
-#endif
-
-               if (lp->lan_type == HP100_LAN_100) {
-#ifdef HP100_DEBUG
-                       printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
-#endif
-                       lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */
-               }
-       } else {
-               int i;
-               u_char old_hash_bytes[8];
-
-               hp100_page(MAC_ADDRESS);
-               for (i = 0; i < 8; i++)
-                       old_hash_bytes[i] = hp100_inb(HASH_BYTE0 + i);
-               if (memcmp(old_hash_bytes, &lp->hash_bytes, 8)) {
-                       for (i = 0; i < 8; i++)
-                               hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i);
-#ifdef HP100_DEBUG
-                       printk("hp100: %s: multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
-                                       dev->name, lp->hash_bytes[0],
-                                       lp->hash_bytes[1], lp->hash_bytes[2],
-                                       lp->hash_bytes[3], lp->hash_bytes[4],
-                                       lp->hash_bytes[5], lp->hash_bytes[6],
-                                       lp->hash_bytes[7]);
-#endif
-
-                       if (lp->lan_type == HP100_LAN_100) {
-#ifdef HP100_DEBUG
-                               printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
-#endif
-                               lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */
-                       }
-               }
-       }
-
-       hp100_page(MAC_CTRL);
-       hp100_orb(HP100_RX_EN | HP100_RX_IDLE | /* enable rx */
-                 HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1);      /* enable tx */
-
-       hp100_page(PERFORMANCE);
-       hp100_ints_on();
-       spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-/*
- *  hardware interrupt handling
- */
-
-static irqreturn_t hp100_interrupt(int irq, void *dev_id)
-{
-       struct net_device *dev = (struct net_device *) dev_id;
-       struct hp100_private *lp = netdev_priv(dev);
-
-       int ioaddr;
-       u_int val;
-
-       if (dev == NULL)
-               return IRQ_NONE;
-       ioaddr = dev->base_addr;
-
-       spin_lock(&lp->lock);
-
-       hp100_ints_off();
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4219, TRACE);
-#endif
-
-       /*  hp100_page( PERFORMANCE ); */
-       val = hp100_inw(IRQ_STATUS);
-#ifdef HP100_DEBUG_IRQ
-       printk("hp100: %s: mode=%x,IRQ_STAT=0x%.4x,RXPKTCNT=0x%.2x RXPDL=0x%.2x TXPKTCNT=0x%.2x TXPDL=0x%.2x\n",
-                            dev->name, lp->mode, (u_int) val, hp100_inb(RX_PKT_CNT),
-                            hp100_inb(RX_PDL), hp100_inb(TX_PKT_CNT), hp100_inb(TX_PDL));
-#endif
-
-       if (val == 0) {         /* might be a shared interrupt */
-               spin_unlock(&lp->lock);
-               hp100_ints_on();
-               return IRQ_NONE;
-       }
-       /* We're only interested in those interrupts we really enabled. */
-       /* val &= hp100_inw( IRQ_MASK ); */
-
-       /*
-        * RX_PDL_FILL_COMPL is set whenever a RX_PDL has been executed. A RX_PDL
-        * is considered executed whenever the RX_PDL data structure is no longer
-        * needed.
-        */
-       if (val & HP100_RX_PDL_FILL_COMPL) {
-               if (lp->mode == 1)
-                       hp100_rx_bm(dev);
-               else {
-                       printk("hp100: %s: rx_pdl_fill_compl interrupt although not busmaster?\n", dev->name);
-               }
-       }
-
-       /*
-        * The RX_PACKET interrupt is set, when the receive packet counter is
-        * non zero. We use this interrupt for receiving in slave mode. In
-        * busmaster mode, we use it to make sure we did not miss any rx_pdl_fill
-        * interrupts. If rx_pdl_fill_compl is not set and rx_packet is set, then
-        * we somehow have missed a rx_pdl_fill_compl interrupt.
-        */
-
-       if (val & HP100_RX_PACKET) {    /* Receive Packet Counter is non zero */
-               if (lp->mode != 1)      /* non busmaster */
-                       hp100_rx(dev);
-               else if (!(val & HP100_RX_PDL_FILL_COMPL)) {
-                       /* Shouldn't happen - maybe we missed a RX_PDL_FILL Interrupt?  */
-                       hp100_rx_bm(dev);
-               }
-       }
-
-       /*
-        * Ack. that we have noticed the interrupt and thereby allow next one.
-        * Note that this is now done after the slave rx function, since first
-        * acknowledging and then setting ADV_NXT_PKT caused an extra interrupt
-        * on the J2573.
-        */
-       hp100_outw(val, IRQ_STATUS);
-
-       /*
-        * RX_ERROR is set when a packet is dropped due to no memory resources on
-        * the card or when a RCV_ERR occurs.
-        * TX_ERROR is set when a TX_ABORT condition occurs in the MAC->exists
-        * only in the 802.3 MAC and happens when 16 collisions occur during a TX
-        */
-       if (val & (HP100_TX_ERROR | HP100_RX_ERROR)) {
-#ifdef HP100_DEBUG_IRQ
-               printk("hp100: %s: TX/RX Error IRQ\n", dev->name);
-#endif
-               hp100_update_stats(dev);
-               if (lp->mode == 1) {
-                       hp100_rxfill(dev);
-                       hp100_clean_txring(dev);
-               }
-       }
-
-       /*
-        * RX_PDA_ZERO is set when the PDA count goes from non-zero to zero.
-        */
-       if ((lp->mode == 1) && (val & (HP100_RX_PDA_ZERO)))
-               hp100_rxfill(dev);
-
-       /*
-        * HP100_TX_COMPLETE interrupt occurs when packet transmitted on wire
-        * is completed
-        */
-       if ((lp->mode == 1) && (val & (HP100_TX_COMPLETE)))
-               hp100_clean_txring(dev);
-
-       /*
-        * MISC_ERROR is set when either the LAN link goes down or a detected
-        * bus error occurs.
-        */
-       if (val & HP100_MISC_ERROR) {   /* New for J2585B */
-#ifdef HP100_DEBUG_IRQ
-               printk
-                   ("hp100: %s: Misc. Error Interrupt - Check cabling.\n",
-                    dev->name);
-#endif
-               if (lp->mode == 1) {
-                       hp100_clean_txring(dev);
-                       hp100_rxfill(dev);
-               }
-               hp100_misc_interrupt(dev);
-       }
-
-       spin_unlock(&lp->lock);
-       hp100_ints_on();
-       return IRQ_HANDLED;
-}
-
-/*
- *  some misc functions
- */
-
-static void hp100_start_interface(struct net_device *dev)
-{
-       unsigned long flags;
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4220, TRACE);
-       printk("hp100: %s: hp100_start_interface\n", dev->name);
-#endif
-
-       spin_lock_irqsave(&lp->lock, flags);
-
-       /* Ensure the adapter does not want to request an interrupt when */
-       /* enabling the IRQ line to be active on the bus (i.e. not tri-stated) */
-       hp100_page(PERFORMANCE);
-       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
-       hp100_outw(0xffff, IRQ_STATUS); /* ack all IRQs */
-       hp100_outw(HP100_FAKE_INT | HP100_INT_EN | HP100_RESET_LB,
-                  OPTION_LSW);
-       /* Un Tri-state int. TODO: Check if shared interrupts can be realised? */
-       hp100_outw(HP100_TRI_INT | HP100_RESET_HB, OPTION_LSW);
-
-       if (lp->mode == 1) {
-               /* Make sure BM bit is set... */
-               hp100_page(HW_MAP);
-               hp100_orb(HP100_BM_MASTER, BM);
-               hp100_rxfill(dev);
-       } else if (lp->mode == 2) {
-               /* Enable memory mapping. Note: Don't do this when busmaster. */
-               hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW);
-       }
-
-       hp100_page(PERFORMANCE);
-       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
-       hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
-
-       /* enable a few interrupts: */
-       if (lp->mode == 1) {    /* busmaster mode */
-               hp100_outw(HP100_RX_PDL_FILL_COMPL |
-                          HP100_RX_PDA_ZERO | HP100_RX_ERROR |
-                          /* HP100_RX_PACKET    | */
-                          /* HP100_RX_EARLY_INT |  */ HP100_SET_HB |
-                          /* HP100_TX_PDA_ZERO  |  */
-                          HP100_TX_COMPLETE |
-                          /* HP100_MISC_ERROR   |  */
-                          HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK);
-       } else {
-               hp100_outw(HP100_RX_PACKET |
-                          HP100_RX_ERROR | HP100_SET_HB |
-                          HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK);
-       }
-
-       /* Note : before hp100_set_multicast_list(), because it will play with
-        * spinlock itself... Jean II */
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       /* Enable MAC Tx and RX, set MAC modes, ... */
-       hp100_set_multicast_list(dev);
-}
-
-static void hp100_stop_interface(struct net_device *dev)
-{
-       struct hp100_private *lp = netdev_priv(dev);
-       int ioaddr = dev->base_addr;
-       u_int val;
-
-#ifdef HP100_DEBUG_B
-       printk("hp100: %s: hp100_stop_interface\n", dev->name);
-       hp100_outw(0x4221, TRACE);
-#endif
-
-       if (lp->mode == 1)
-               hp100_BM_shutdown(dev);
-       else {
-               /* Note: MMAP_DIS will be reenabled by start_interface */
-               hp100_outw(HP100_INT_EN | HP100_RESET_LB |
-                          HP100_TRI_INT | HP100_MMAP_DIS | HP100_SET_HB,
-                          OPTION_LSW);
-               val = hp100_inw(OPTION_LSW);
-
-               hp100_page(MAC_CTRL);
-               hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);
-
-               if (!(val & HP100_HW_RST))
-                       return; /* If reset, imm. return ... */
-               /* ... else: busy wait until idle */
-               for (val = 0; val < 6000; val++)
-                       if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE)) {
-                               hp100_page(PERFORMANCE);
-                               return;
-                       }
-               printk("hp100: %s: hp100_stop_interface - timeout\n", dev->name);
-               hp100_page(PERFORMANCE);
-       }
-}
-
-static void hp100_load_eeprom(struct net_device *dev, u_short probe_ioaddr)
-{
-       int i;
-       int ioaddr = probe_ioaddr > 0 ? probe_ioaddr : dev->base_addr;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4222, TRACE);
-#endif
-
-       hp100_page(EEPROM_CTRL);
-       hp100_andw(~HP100_EEPROM_LOAD, EEPROM_CTRL);
-       hp100_orw(HP100_EEPROM_LOAD, EEPROM_CTRL);
-       for (i = 0; i < 10000; i++)
-               if (!(hp100_inb(OPTION_MSW) & HP100_EE_LOAD))
-                       return;
-       printk("hp100: %s: hp100_load_eeprom - timeout\n", dev->name);
-}
-
-/*  Sense connection status.
- *  return values: LAN_10  - Connected to 10Mbit/s network
- *                 LAN_100 - Connected to 100Mbit/s network
- *                 LAN_ERR - not connected or 100Mbit/s Hub down
- */
-static int hp100_sense_lan(struct net_device *dev)
-{
-       int ioaddr = dev->base_addr;
-       u_short val_VG, val_10;
-       struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4223, TRACE);
-#endif
-
-       hp100_page(MAC_CTRL);
-       val_10 = hp100_inb(10_LAN_CFG_1);
-       val_VG = hp100_inb(VG_LAN_CFG_1);
-       hp100_page(PERFORMANCE);
-#ifdef HP100_DEBUG
-       printk("hp100: %s: sense_lan: val_VG = 0x%04x, val_10 = 0x%04x\n",
-              dev->name, val_VG, val_10);
-#endif
-
-       if (val_10 & HP100_LINK_BEAT_ST)        /* 10Mb connection is active */
-               return HP100_LAN_10;
-
-       if (val_10 & HP100_AUI_ST) {    /* have we BNC or AUI onboard? */
-               /*
-                * This can be overriden by dos utility, so if this has no effect,
-                * perhaps you need to download that utility from HP and set card
-                * back to "auto detect".
-                */
-               val_10 |= HP100_AUI_SEL | HP100_LOW_TH;
-               hp100_page(MAC_CTRL);
-               hp100_outb(val_10, 10_LAN_CFG_1);
-               hp100_page(PERFORMANCE);
-               return HP100_LAN_COAX;
-       }
-
-       /* Those cards don't have a 100 Mbit connector */
-       if ( !strcmp(lp->id, "HWP1920")  ||
-            (lp->pci_dev &&
-             lp->pci_dev->vendor == PCI_VENDOR_ID &&
-             (lp->pci_dev->device == PCI_DEVICE_ID_HP_J2970A ||
-              lp->pci_dev->device == PCI_DEVICE_ID_HP_J2973A)))
-               return HP100_LAN_ERR;
-
-       if (val_VG & HP100_LINK_CABLE_ST)       /* Can hear the HUBs tone. */
-               return HP100_LAN_100;
-       return HP100_LAN_ERR;
-}
-
-static int hp100_down_vg_link(struct net_device *dev)
-{
-       struct hp100_private *lp = netdev_priv(dev);
-       int ioaddr = dev->base_addr;
-       unsigned long time;
-       long savelan, newlan;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4224, TRACE);
-       printk("hp100: %s: down_vg_link\n", dev->name);
-#endif
-
-       hp100_page(MAC_CTRL);
-       time = jiffies + (HZ / 4);
-       do {
-               if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
-                       break;
-               if (!in_interrupt())
-                       schedule_timeout_interruptible(1);
-       } while (time_after(time, jiffies));
-
-       if (time_after_eq(jiffies, time))       /* no signal->no logout */
-               return 0;
-
-       /* Drop the VG Link by clearing the link up cmd and load addr. */
-
-       hp100_andb(~(HP100_LOAD_ADDR | HP100_LINK_CMD), VG_LAN_CFG_1);
-       hp100_orb(HP100_VG_SEL, VG_LAN_CFG_1);
-
-       /* Conditionally stall for >250ms on Link-Up Status (to go down) */
-       time = jiffies + (HZ / 2);
-       do {
-               if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
-                       break;
-               if (!in_interrupt())
-                       schedule_timeout_interruptible(1);
-       } while (time_after(time, jiffies));
-
-#ifdef HP100_DEBUG
-       if (time_after_eq(jiffies, time))
-               printk("hp100: %s: down_vg_link: Link does not go down?\n", dev->name);
-#endif
-
-       /* To prevent condition where Rev 1 VG MAC and old hubs do not complete */
-       /* logout under traffic (even though all the status bits are cleared),  */
-       /* do this workaround to get the Rev 1 MAC in its idle state */
-       if (lp->chip == HP100_CHIPID_LASSEN) {
-               /* Reset VG MAC to insure it leaves the logoff state even if */
-               /* the Hub is still emitting tones */
-               hp100_andb(~HP100_VG_RESET, VG_LAN_CFG_1);
-               udelay(1500);   /* wait for >1ms */
-               hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1);        /* Release Reset */
-               udelay(1500);
-       }
-
-       /* New: For lassen, switch to 10 Mbps mac briefly to clear training ACK */
-       /* to get the VG mac to full reset. This is not req.d with later chips */
-       /* Note: It will take the between 1 and 2 seconds for the VG mac to be */
-       /* selected again! This will be left to the connect hub function to */
-       /* perform if desired.  */
-       if (lp->chip == HP100_CHIPID_LASSEN) {
-               /* Have to write to 10 and 100VG control registers simultaneously */
-               savelan = newlan = hp100_inl(10_LAN_CFG_1);     /* read 10+100 LAN_CFG regs */
-               newlan &= ~(HP100_VG_SEL << 16);
-               newlan |= (HP100_DOT3_MAC) << 8;
-               hp100_andb(~HP100_AUTO_MODE, MAC_CFG_3);        /* Autosel off */
-               hp100_outl(newlan, 10_LAN_CFG_1);
-
-               /* Conditionally stall for 5sec on VG selected. */
-               time = jiffies + (HZ * 5);
-               do {
-                       if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST))
-                               break;
-                       if (!in_interrupt())
-                               schedule_timeout_interruptible(1);
-               } while (time_after(time, jiffies));
-
-               hp100_orb(HP100_AUTO_MODE, MAC_CFG_3);  /* Autosel back on */
-               hp100_outl(savelan, 10_LAN_CFG_1);
-       }
-
-       time = jiffies + (3 * HZ);      /* Timeout 3s */
-       do {
-               if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0)
-                       break;
-               if (!in_interrupt())
-                       schedule_timeout_interruptible(1);
-       } while (time_after(time, jiffies));
-
-       if (time_before_eq(time, jiffies)) {
-#ifdef HP100_DEBUG
-               printk("hp100: %s: down_vg_link: timeout\n", dev->name);
-#endif
-               return -EIO;
-       }
-
-       time = jiffies + (2 * HZ);      /* This seems to take a while.... */
-       do {
-               if (!in_interrupt())
-                       schedule_timeout_interruptible(1);
-       } while (time_after(time, jiffies));
-
-       return 0;
-}
-
-static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
-{
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-       u_short val = 0;
-       unsigned long time;
-       int startst;
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4225, TRACE);
-       printk("hp100: %s: login_to_vg_hub\n", dev->name);
-#endif
-
-       /* Initiate a login sequence iff VG MAC is enabled and either Load Address
-        * bit is zero or the force relogin flag is set (e.g. due to MAC address or
-        * promiscuous mode change)
-        */
-       hp100_page(MAC_CTRL);
-       startst = hp100_inb(VG_LAN_CFG_1);
-       if ((force_relogin == 1) || (hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) {
-#ifdef HP100_DEBUG_TRAINING
-               printk("hp100: %s: Start training\n", dev->name);
-#endif
-
-               /* Ensure VG Reset bit is 1 (i.e., do not reset) */
-               hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1);
-
-               /* If Lassen AND auto-select-mode AND VG tones were sensed on */
-               /* entry then temporarily put them into force 100Mbit mode */
-               if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST))
-                       hp100_andb(~HP100_DOT3_MAC, 10_LAN_CFG_2);
-
-               /* Drop the VG link by zeroing Link Up Command and Load Address  */
-               hp100_andb(~(HP100_LINK_CMD /* |HP100_LOAD_ADDR */ ), VG_LAN_CFG_1);
-
-#ifdef HP100_DEBUG_TRAINING
-               printk("hp100: %s: Bring down the link\n", dev->name);
-#endif
-
-               /* Wait for link to drop */
-               time = jiffies + (HZ / 10);
-               do {
-                       if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
-                               break;
-                       if (!in_interrupt())
-                               schedule_timeout_interruptible(1);
-               } while (time_after(time, jiffies));
-
-               /* Start an addressed training and optionally request promiscuous port */
-               if ((dev->flags) & IFF_PROMISC) {
-                       hp100_orb(HP100_PROM_MODE, VG_LAN_CFG_2);
-                       if (lp->chip == HP100_CHIPID_LASSEN)
-                               hp100_orw(HP100_MACRQ_PROMSC, TRAIN_REQUEST);
-               } else {
-                       hp100_andb(~HP100_PROM_MODE, VG_LAN_CFG_2);
-                       /* For ETR parts we need to reset the prom. bit in the training
-                        * register, otherwise promiscious mode won't be disabled.
-                        */
-                       if (lp->chip == HP100_CHIPID_LASSEN) {
-                               hp100_andw(~HP100_MACRQ_PROMSC, TRAIN_REQUEST);
-                       }
-               }
-
-               /* With ETR parts, frame format request bits can be set. */
-               if (lp->chip == HP100_CHIPID_LASSEN)
-                       hp100_orb(HP100_MACRQ_FRAMEFMT_EITHER, TRAIN_REQUEST);
-
-               hp100_orb(HP100_LINK_CMD | HP100_LOAD_ADDR | HP100_VG_RESET, VG_LAN_CFG_1);
-
-               /* Note: Next wait could be omitted for Hood and earlier chips under */
-               /* certain circumstances */
-               /* TODO: check if hood/earlier and skip wait. */
-
-               /* Wait for either short timeout for VG tones or long for login    */
-               /* Wait for the card hardware to signalise link cable status ok... */
-               hp100_page(MAC_CTRL);
-               time = jiffies + (1 * HZ);      /* 1 sec timeout for cable st */
-               do {
-                       if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
-                               break;
-                       if (!in_interrupt())
-                               schedule_timeout_interruptible(1);
-               } while (time_before(jiffies, time));
-
-               if (time_after_eq(jiffies, time)) {
-#ifdef HP100_DEBUG_TRAINING
-                       printk("hp100: %s: Link cable status not ok? Training aborted.\n", dev->name);
-#endif
-               } else {
-#ifdef HP100_DEBUG_TRAINING
-                       printk
-                           ("hp100: %s: HUB tones detected. Trying to train.\n",
-                            dev->name);
-#endif
-
-                       time = jiffies + (2 * HZ);      /* again a timeout */
-                       do {
-                               val = hp100_inb(VG_LAN_CFG_1);
-                               if ((val & (HP100_LINK_UP_ST))) {
-#ifdef HP100_DEBUG_TRAINING
-                                       printk("hp100: %s: Passed training.\n", dev->name);
-#endif
-                                       break;
-                               }
-                               if (!in_interrupt())
-                                       schedule_timeout_interruptible(1);
-                       } while (time_after(time, jiffies));
-               }
-
-               /* If LINK_UP_ST is set, then we are logged into the hub. */
-               if (time_before_eq(jiffies, time) && (val & HP100_LINK_UP_ST)) {
-#ifdef HP100_DEBUG_TRAINING
-                       printk("hp100: %s: Successfully logged into the HUB.\n", dev->name);
-                       if (lp->chip == HP100_CHIPID_LASSEN) {
-                               val = hp100_inw(TRAIN_ALLOW);
-                               printk("hp100: %s: Card supports 100VG MAC Version \"%s\" ",
-                                            dev->name, (hp100_inw(TRAIN_REQUEST) & HP100_CARD_MACVER) ? "802.12" : "Pre");
-                               printk("Driver will use MAC Version \"%s\"\n", (val & HP100_HUB_MACVER) ? "802.12" : "Pre");
-                               printk("hp100: %s: Frame format is %s.\n", dev->name, (val & HP100_MALLOW_FRAMEFMT) ? "802.5" : "802.3");
-                       }
-#endif
-               } else {
-                       /* If LINK_UP_ST is not set, login was not successful */
-                       printk("hp100: %s: Problem logging into the HUB.\n", dev->name);
-                       if (lp->chip == HP100_CHIPID_LASSEN) {
-                               /* Check allowed Register to find out why there is a problem. */
-                               val = hp100_inw(TRAIN_ALLOW);   /* won't work on non-ETR card */
-#ifdef HP100_DEBUG_TRAINING
-                               printk("hp100: %s: MAC Configuration requested: 0x%04x, HUB allowed: 0x%04x\n", dev->name, hp100_inw(TRAIN_REQUEST), val);
-#endif
-                               if (val & HP100_MALLOW_ACCDENIED)
-                                       printk("hp100: %s: HUB access denied.\n", dev->name);
-                               if (val & HP100_MALLOW_CONFIGURE)
-                                       printk("hp100: %s: MAC Configuration is incompatible with the Network.\n", dev->name);
-                               if (val & HP100_MALLOW_DUPADDR)
-                                       printk("hp100: %s: Duplicate MAC Address on the Network.\n", dev->name);
-                       }
-               }
-
-               /* If we have put the chip into forced 100 Mbit mode earlier, go back */
-               /* to auto-select mode */
-
-               if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST)) {
-                       hp100_page(MAC_CTRL);
-                       hp100_orb(HP100_DOT3_MAC, 10_LAN_CFG_2);
-               }
-
-               val = hp100_inb(VG_LAN_CFG_1);
-
-               /* Clear the MISC_ERROR Interrupt, which might be generated when doing the relogin */
-               hp100_page(PERFORMANCE);
-               hp100_outw(HP100_MISC_ERROR, IRQ_STATUS);
-
-               if (val & HP100_LINK_UP_ST)
-                       return 0;       /* login was ok */
-               else {
-                       printk("hp100: %s: Training failed.\n", dev->name);
-                       hp100_down_vg_link(dev);
-                       return -EIO;
-               }
-       }
-       /* no forced relogin & already link there->no training. */
-       return -EIO;
-}
-
-static void hp100_cascade_reset(struct net_device *dev, u_short enable)
-{
-       int ioaddr = dev->base_addr;
-       struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
-       hp100_outw(0x4226, TRACE);
-       printk("hp100: %s: cascade_reset\n", dev->name);
-#endif
-
-       if (enable) {
-               hp100_outw(HP100_HW_RST | HP100_RESET_LB, OPTION_LSW);
-               if (lp->chip == HP100_CHIPID_LASSEN) {
-                       /* Lassen requires a PCI transmit fifo reset */
-                       hp100_page(HW_MAP);
-                       hp100_andb(~HP100_PCI_RESET, PCICTRL2);
-                       hp100_orb(HP100_PCI_RESET, PCICTRL2);
-                       /* Wait for min. 300 ns */
-                       /* we can't use jiffies here, because it may be */
-                       /* that we have disabled the timer... */
-                       udelay(400);
-                       hp100_andb(~HP100_PCI_RESET, PCICTRL2);
-                       hp100_page(PERFORMANCE);
-               }
-       } else {                /* bring out of reset */
-               hp100_outw(HP100_HW_RST | HP100_SET_LB, OPTION_LSW);
-               udelay(400);
-               hp100_page(PERFORMANCE);
-       }
-}
-
-#ifdef HP100_DEBUG
-void hp100_RegisterDump(struct net_device *dev)
-{
-       int ioaddr = dev->base_addr;
-       int Page;
-       int Register;
-
-       /* Dump common registers */
-       printk("hp100: %s: Cascade Register Dump\n", dev->name);
-       printk("hardware id #1: 0x%.2x\n", hp100_inb(HW_ID));
-       printk("hardware id #2/paging: 0x%.2x\n", hp100_inb(PAGING));
-       printk("option #1: 0x%.4x\n", hp100_inw(OPTION_LSW));
-       printk("option #2: 0x%.4x\n", hp100_inw(OPTION_MSW));
-
-       /* Dump paged registers */
-       for (Page = 0; Page < 8; Page++) {
-               /* Dump registers */
-               printk("page: 0x%.2x\n", Page);
-               outw(Page, ioaddr + 0x02);
-               for (Register = 0x8; Register < 0x22; Register += 2) {
-                       /* Display Register contents except data port */
-                       if (((Register != 0x10) && (Register != 0x12)) || (Page > 0)) {
-                               printk("0x%.2x = 0x%.4x\n", Register, inw(ioaddr + Register));
-                       }
-               }
-       }
-       hp100_page(PERFORMANCE);
-}
-#endif
-
-
-static void cleanup_dev(struct net_device *d)
-{
-       struct hp100_private *p = netdev_priv(d);
-
-       unregister_netdev(d);
-       release_region(d->base_addr, HP100_REGION_SIZE);
-
-       if (p->mode == 1)       /* busmaster */
-               pci_free_consistent(p->pci_dev, MAX_RINGSIZE + 0x0f,
-                                   p->page_vaddr_algn,
-                                   virt_to_whatever(d, p->page_vaddr_algn));
-       if (p->mem_ptr_virt)
-               iounmap(p->mem_ptr_virt);
-
-       free_netdev(d);
-}
-
-static int hp100_eisa_probe(struct device *gendev)
-{
-       struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
-       struct eisa_device *edev = to_eisa_device(gendev);
-       int err;
-
-       if (!dev)
-               return -ENOMEM;
-
-       SET_NETDEV_DEV(dev, &edev->dev);
-
-       err = hp100_probe1(dev, edev->base_addr + 0xC38, HP100_BUS_EISA, NULL);
-       if (err)
-               goto out1;
-
-#ifdef HP100_DEBUG
-       printk("hp100: %s: EISA adapter found at 0x%x\n", dev->name,
-              dev->base_addr);
-#endif
-       dev_set_drvdata(gendev, dev);
-       return 0;
- out1:
-       free_netdev(dev);
-       return err;
-}
-
-static int hp100_eisa_remove(struct device *gendev)
-{
-       struct net_device *dev = dev_get_drvdata(gendev);
-       cleanup_dev(dev);
-       return 0;
-}
-
-static struct eisa_driver hp100_eisa_driver = {
-        .id_table = hp100_eisa_tbl,
-        .driver   = {
-                .name    = "hp100",
-                .probe   = hp100_eisa_probe,
-               .remove  = hp100_eisa_remove,
-        }
-};
-
-static int hp100_pci_probe(struct pci_dev *pdev,
-                          const struct pci_device_id *ent)
-{
-       struct net_device *dev;
-       int ioaddr;
-       u_short pci_command;
-       int err;
-
-       if (pci_enable_device(pdev))
-               return -ENODEV;
-
-       dev = alloc_etherdev(sizeof(struct hp100_private));
-       if (!dev) {
-               err = -ENOMEM;
-               goto out0;
-       }
-
-       SET_NETDEV_DEV(dev, &pdev->dev);
-
-       pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
-       if (!(pci_command & PCI_COMMAND_IO)) {
-#ifdef HP100_DEBUG
-               printk("hp100: %s: PCI I/O Bit has not been set. Setting...\n", dev->name);
-#endif
-               pci_command |= PCI_COMMAND_IO;
-               pci_write_config_word(pdev, PCI_COMMAND, pci_command);
-       }
-
-       if (!(pci_command & PCI_COMMAND_MASTER)) {
-#ifdef HP100_DEBUG
-               printk("hp100: %s: PCI Master Bit has not been set. Setting...\n", dev->name);
-#endif
-               pci_command |= PCI_COMMAND_MASTER;
-               pci_write_config_word(pdev, PCI_COMMAND, pci_command);
-       }
-
-       ioaddr = pci_resource_start(pdev, 0);
-       err = hp100_probe1(dev, ioaddr, HP100_BUS_PCI, pdev);
-       if (err)
-               goto out1;
-
-#ifdef HP100_DEBUG
-       printk("hp100: %s: PCI adapter found at 0x%x\n", dev->name, ioaddr);
-#endif
-       pci_set_drvdata(pdev, dev);
-       return 0;
- out1:
-       free_netdev(dev);
- out0:
-       pci_disable_device(pdev);
-       return err;
-}
-
-static void hp100_pci_remove(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-
-       cleanup_dev(dev);
-       pci_disable_device(pdev);
-}
-
-
-static struct pci_driver hp100_pci_driver = {
-       .name           = "hp100",
-       .id_table       = hp100_pci_tbl,
-       .probe          = hp100_pci_probe,
-       .remove         = hp100_pci_remove,
-};
-
-/*
- *  module section
- */
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, "
-              "Siegfried \"Frieder\" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>");
-MODULE_DESCRIPTION("HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters");
-
-/*
- * Note: to register three isa devices, use:
- * option hp100 hp100_port=0,0,0
- *        to register one card at io 0x280 as eth239, use:
- * option hp100 hp100_port=0x280
- */
-#if defined(MODULE) && defined(CONFIG_ISA)
-#define HP100_DEVICES 5
-/* Parameters set by insmod */
-static int hp100_port[HP100_DEVICES] = { 0, [1 ... (HP100_DEVICES-1)] = -1 };
-module_param_hw_array(hp100_port, int, ioport, NULL, 0);
-
-/* List of devices */
-static struct net_device *hp100_devlist[HP100_DEVICES];
-
-static int __init hp100_isa_init(void)
-{
-       struct net_device *dev;
-       int i, err, cards = 0;
-
-       /* Don't autoprobe ISA bus */
-       if (hp100_port[0] == 0)
-               return -ENODEV;
-
-       /* Loop on all possible base addresses */
-       for (i = 0; i < HP100_DEVICES && hp100_port[i] != -1; ++i) {
-               dev = alloc_etherdev(sizeof(struct hp100_private));
-               if (!dev) {
-                       while (cards > 0)
-                               cleanup_dev(hp100_devlist[--cards]);
-
-                       return -ENOMEM;
-               }
-
-               err = hp100_isa_probe(dev, hp100_port[i]);
-               if (!err)
-                       hp100_devlist[cards++] = dev;
-               else
-                       free_netdev(dev);
-       }
-
-       return cards > 0 ? 0 : -ENODEV;
-}
-
-static void hp100_isa_cleanup(void)
-{
-       int i;
-
-       for (i = 0; i < HP100_DEVICES; i++) {
-               struct net_device *dev = hp100_devlist[i];
-               if (dev)
-                       cleanup_dev(dev);
-       }
-}
-#else
-#define hp100_isa_init()       (0)
-#define hp100_isa_cleanup()    do { } while(0)
-#endif
-
-static int __init hp100_module_init(void)
-{
-       int err;
-
-       err = hp100_isa_init();
-       if (err && err != -ENODEV)
-               goto out;
-       err = eisa_driver_register(&hp100_eisa_driver);
-       if (err && err != -ENODEV)
-               goto out2;
-       err = pci_register_driver(&hp100_pci_driver);
-       if (err && err != -ENODEV)
-               goto out3;
- out:
-       return err;
- out3:
-       eisa_driver_unregister (&hp100_eisa_driver);
- out2:
-       hp100_isa_cleanup();
-       goto out;
-}
-
-
-static void __exit hp100_module_exit(void)
-{
-       hp100_isa_cleanup();
-       eisa_driver_unregister (&hp100_eisa_driver);
-       pci_unregister_driver (&hp100_pci_driver);
-}
-
-module_init(hp100_module_init)
-module_exit(hp100_module_exit)
diff --git a/drivers/net/ethernet/hp/hp100.h b/drivers/net/ethernet/hp/hp100.h
deleted file mode 100644 (file)
index 7239b94..0000000
+++ /dev/null
@@ -1,611 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * hp100.h: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux.
- *
- * $Id: hp100.h,v 1.51 1997/04/08 14:26:42 floeff Exp floeff $
- *
- * Authors:  Jaroslav Kysela, <perex@pf.jcu.cz>
- *           Siegfried Loeffler <floeff@tunix.mathematik.uni-stuttgart.de>
- *
- * This driver is based on the 'hpfepkt' crynwr packet driver.
- */
-
-/****************************************************************************
- *  Hardware Constants
- ****************************************************************************/
-
-/*
- * Page Identifiers
- * (Swap Paging Register, PAGING, bits 3:0, Offset 0x02)
- */
-
-#define HP100_PAGE_PERFORMANCE 0x0     /* Page 0 */
-#define HP100_PAGE_MAC_ADDRESS 0x1     /* Page 1 */
-#define HP100_PAGE_HW_MAP      0x2     /* Page 2 */
-#define HP100_PAGE_EEPROM_CTRL 0x3     /* Page 3 */
-#define HP100_PAGE_MAC_CTRL    0x4     /* Page 4 */
-#define HP100_PAGE_MMU_CFG     0x5     /* Page 5 */
-#define HP100_PAGE_ID_MAC_ADDR 0x6     /* Page 6 */
-#define HP100_PAGE_MMU_POINTER 0x7     /* Page 7 */
-
-
-/* Registers that are present on all pages  */
-
-#define HP100_REG_HW_ID                0x00    /* R:  (16) Unique card ID           */
-#define HP100_REG_TRACE                0x00    /* W:  (16) Used for debug output    */
-#define HP100_REG_PAGING       0x02    /* R:  (16),15:4 Card ID             */
-                                       /* W:  (16),3:0 Switch pages         */
-#define HP100_REG_OPTION_LSW   0x04    /* RW: (16) Select card functions    */
-#define HP100_REG_OPTION_MSW   0x06    /* RW: (16) Select card functions    */
-
-/*  Page 0 - Performance  */
-
-#define HP100_REG_IRQ_STATUS   0x08    /* RW: (16) Which ints are pending   */
-#define HP100_REG_IRQ_MASK     0x0a    /* RW: (16) Select ints to allow     */
-#define HP100_REG_FRAGMENT_LEN 0x0c    /* W: (16)12:0 Current fragment len */
-/* Note: For 32 bit systems, fragment len and offset registers are available */
-/*       at offset 0x28 and 0x2c, where they can be written as 32bit values. */
-#define HP100_REG_OFFSET       0x0e    /* RW: (16)12:0 Offset to start read */
-#define HP100_REG_DATA32       0x10    /* RW: (32) I/O mode data port       */
-#define HP100_REG_DATA16       0x12    /* RW: WORDs must be read from here  */
-#define HP100_REG_TX_MEM_FREE  0x14    /* RD: (32) Amount of free Tx mem    */
-#define HP100_REG_TX_PDA_L      0x14   /* W: (32) BM: Ptr to PDL, Low Pri  */
-#define HP100_REG_TX_PDA_H      0x1c   /* W: (32) BM: Ptr to PDL, High Pri */
-#define HP100_REG_RX_PKT_CNT   0x18    /* RD: (8) Rx count of pkts on card  */
-#define HP100_REG_TX_PKT_CNT   0x19    /* RD: (8) Tx count of pkts on card  */
-#define HP100_REG_RX_PDL        0x1a   /* R: (8) BM: # rx pdl not executed */
-#define HP100_REG_TX_PDL        0x1b   /* R: (8) BM: # tx pdl not executed */
-#define HP100_REG_RX_PDA        0x18   /* W: (32) BM: Up to 31 addresses */
-                                       /*             which point to a PDL */
-#define HP100_REG_SL_EARLY      0x1c   /*    (32) Enhanced Slave Early Rx */
-#define HP100_REG_STAT_DROPPED  0x20   /* R (12) Dropped Packet Counter */
-#define HP100_REG_STAT_ERRORED  0x22   /* R (8) Errored Packet Counter */
-#define HP100_REG_STAT_ABORT    0x23   /* R (8) Abort Counter/OW Coll. Flag */
-#define HP100_REG_RX_RING       0x24   /* W (32) Slave: RX Ring Pointers */
-#define HP100_REG_32_FRAGMENT_LEN 0x28 /* W (13) Slave: Fragment Length Reg */
-#define HP100_REG_32_OFFSET     0x2c   /* W (16) Slave: Offset Register */
-
-/*  Page 1 - MAC Address/Hash Table  */
-
-#define HP100_REG_MAC_ADDR     0x08    /* RW: (8) Cards MAC address         */
-#define HP100_REG_HASH_BYTE0   0x10    /* RW: (8) Cards multicast filter    */
-
-/*  Page 2 - Hardware Mapping  */
-
-#define HP100_REG_MEM_MAP_LSW  0x08    /* RW: (16) LSW of cards mem addr    */
-#define HP100_REG_MEM_MAP_MSW  0x0a    /* RW: (16) MSW of cards mem addr    */
-#define HP100_REG_IO_MAP       0x0c    /* RW: (8) Cards I/O address         */
-#define HP100_REG_IRQ_CHANNEL  0x0d    /* RW: (8) IRQ and edge/level int    */
-#define HP100_REG_SRAM         0x0e    /* RW: (8) How much RAM on card      */
-#define HP100_REG_BM           0x0f    /* RW: (8) Controls BM functions     */
-
-/* New on Page 2 for ETR chips: */
-#define HP100_REG_MODECTRL1     0x10   /* RW: (8) Mode Control 1 */
-#define HP100_REG_MODECTRL2     0x11   /* RW: (8) Mode Control 2 */
-#define HP100_REG_PCICTRL1      0x12   /* RW: (8) PCI Cfg 1 */
-#define HP100_REG_PCICTRL2      0x13   /* RW: (8) PCI Cfg 2 */
-#define HP100_REG_PCIBUSMLAT    0x15   /* RW: (8) PCI Bus Master Latency */
-#define HP100_REG_EARLYTXCFG    0x16   /* RW: (16) Early TX Cfg/Cntrl Reg */
-#define HP100_REG_EARLYRXCFG    0x18   /* RW: (8) Early RX Cfg/Cntrl Reg */
-#define HP100_REG_ISAPNPCFG1    0x1a   /* RW: (8) ISA PnP Cfg/Cntrl Reg 1 */
-#define HP100_REG_ISAPNPCFG2    0x1b   /* RW: (8) ISA PnP Cfg/Cntrl Reg 2 */
-
-/*  Page 3 - EEPROM/Boot ROM  */
-
-#define HP100_REG_EEPROM_CTRL  0x08    /* RW: (16) Used to load EEPROM      */
-#define HP100_REG_BOOTROM_CTRL  0x0a
-
-/*  Page 4 - LAN Configuration  (MAC_CTRL) */
-
-#define HP100_REG_10_LAN_CFG_1 0x08    /* RW: (8) Set 10M XCVR functions   */
-#define HP100_REG_10_LAN_CFG_2  0x09   /* RW: (8)     10M XCVR functions   */
-#define HP100_REG_VG_LAN_CFG_1 0x0a    /* RW: (8) Set 100M XCVR functions  */
-#define HP100_REG_VG_LAN_CFG_2  0x0b   /* RW: (8) 100M LAN Training cfgregs */
-#define HP100_REG_MAC_CFG_1    0x0c    /* RW: (8) Types of pkts to accept   */
-#define HP100_REG_MAC_CFG_2    0x0d    /* RW: (8) Misc MAC functions        */
-#define HP100_REG_MAC_CFG_3     0x0e   /* RW: (8) Misc MAC functions */
-#define HP100_REG_MAC_CFG_4     0x0f   /* R:  (8) Misc MAC states */
-#define HP100_REG_DROPPED      0x10    /* R:  (16),11:0 Pkts can't fit in mem */
-#define HP100_REG_CRC          0x12    /* R:  (8) Pkts with CRC             */
-#define HP100_REG_ABORT                0x13    /* R:  (8) Aborted Tx pkts           */
-#define HP100_REG_TRAIN_REQUEST 0x14   /* RW: (16) Endnode MAC register. */
-#define HP100_REG_TRAIN_ALLOW   0x16   /* R:  (16) Hub allowed register */
-
-/*  Page 5 - MMU  */
-
-#define HP100_REG_RX_MEM_STOP  0x0c    /* RW: (16) End of Rx ring addr      */
-#define HP100_REG_TX_MEM_STOP  0x0e    /* RW: (16) End of Tx ring addr      */
-#define HP100_REG_PDL_MEM_STOP  0x10   /* Not used by 802.12 devices */
-#define HP100_REG_ECB_MEM_STOP  0x14   /* I've no idea what this is */
-
-/*  Page 6 - Card ID/Physical LAN Address  */
-
-#define HP100_REG_BOARD_ID     0x08    /* R:  (8) EISA/ISA card ID          */
-#define HP100_REG_BOARD_IO_CHCK 0x0c   /* R:  (8) Added to ID to get FFh    */
-#define HP100_REG_SOFT_MODEL   0x0d    /* R:  (8) Config program defined    */
-#define HP100_REG_LAN_ADDR     0x10    /* R:  (8) MAC addr of card          */
-#define HP100_REG_LAN_ADDR_CHCK 0x16   /* R:  (8) Added to addr to get FFh  */
-
-/*  Page 7 - MMU Current Pointers  */
-
-#define HP100_REG_PTR_RXSTART  0x08    /* R:  (16) Current begin of Rx ring */
-#define HP100_REG_PTR_RXEND    0x0a    /* R:  (16) Current end of Rx ring   */
-#define HP100_REG_PTR_TXSTART  0x0c    /* R:  (16) Current begin of Tx ring */
-#define HP100_REG_PTR_TXEND    0x0e    /* R:  (16) Current end of Rx ring   */
-#define HP100_REG_PTR_RPDLSTART 0x10
-#define HP100_REG_PTR_RPDLEND   0x12
-#define HP100_REG_PTR_RINGPTRS  0x14
-#define HP100_REG_PTR_MEMDEBUG  0x1a
-/* ------------------------------------------------------------------------ */
-
-
-/*
- * Hardware ID Register I (Always available, HW_ID, Offset 0x00)
- */
-#define HP100_HW_ID_CASCADE     0x4850 /* Identifies Cascade Chip */
-
-/*
- * Hardware ID Register 2 & Paging Register
- * (Always available, PAGING, Offset 0x02)
- * Bits 15:4 are for the Chip ID
- */
-#define HP100_CHIPID_MASK        0xFFF0
-#define HP100_CHIPID_SHASTA      0x5350        /* Not 802.12 compliant */
-                                        /* EISA BM/SL, MCA16/32 SL, ISA SL */
-#define HP100_CHIPID_RAINIER     0x5360        /* Not 802.12 compliant EISA BM, */
-                                        /* PCI SL, MCA16/32 SL, ISA SL */
-#define HP100_CHIPID_LASSEN      0x5370        /* 802.12 compliant PCI BM, PCI SL */
-                                        /* LRF supported */
-
-/*
- *  Option Registers I and II
- * (Always available, OPTION_LSW, Offset 0x04-0x05)
- */
-#define HP100_DEBUG_EN         0x8000  /* 0:Dis., 1:Enable Debug Dump Ptr. */
-#define HP100_RX_HDR           0x4000  /* 0:Dis., 1:Enable putting pkt into */
-                                       /*   system mem. before Rx interrupt */
-#define HP100_MMAP_DIS         0x2000  /* 0:Enable, 1:Disable mem.mapping. */
-                                       /*   MMAP_DIS must be 0 and MEM_EN */
-                                       /*   must be 1 for memory-mapped */
-                                       /*   mode to be enabled */
-#define HP100_EE_EN            0x1000  /* 0:Disable,1:Enable EEPROM writing */
-#define HP100_BM_WRITE         0x0800  /* 0:Slave, 1:Bus Master for Tx data */
-#define HP100_BM_READ          0x0400  /* 0:Slave, 1:Bus Master for Rx data */
-#define HP100_TRI_INT          0x0200  /* 0:Don't, 1:Do tri-state the int */
-#define HP100_MEM_EN           0x0040  /* Config program set this to */
-                                       /*   0:Disable, 1:Enable mem map. */
-                                       /*   See MMAP_DIS. */
-#define HP100_IO_EN            0x0020  /* 1:Enable I/O transfers */
-#define HP100_BOOT_EN          0x0010  /* 1:Enable boot ROM access */
-#define HP100_FAKE_INT         0x0008  /* 1:int */
-#define HP100_INT_EN           0x0004  /* 1:Enable ints from card */
-#define HP100_HW_RST           0x0002  /* 0:Reset, 1:Out of reset */
-                                       /* NIC reset on 0 to 1 transition */
-
-/*
- *  Option Register III
- * (Always available, OPTION_MSW, Offset 0x06)
- */
-#define HP100_PRIORITY_TX      0x0080  /* 1:Do all Tx pkts as priority */
-#define HP100_EE_LOAD          0x0040  /* 1:EEPROM loading, 0 when done */
-#define HP100_ADV_NXT_PKT      0x0004  /* 1:Advance to next pkt in Rx queue */
-                                       /*   h/w will set to 0 when done */
-#define HP100_TX_CMD           0x0002  /* 1:Tell h/w download done, h/w */
-                                       /*   will set to 0 when done */
-
-/*
- * Interrupt Status Registers I and II
- * (Page PERFORMANCE, IRQ_STATUS, Offset 0x08-0x09)
- * Note: With old chips, these Registers will clear when 1 is written to them
- *       with new chips this depends on setting of CLR_ISMODE
- */
-#define HP100_RX_EARLY_INT      0x2000
-#define HP100_RX_PDA_ZERO       0x1000
-#define HP100_RX_PDL_FILL_COMPL 0x0800
-#define HP100_RX_PACKET                0x0400  /* 0:No, 1:Yes pkt has been Rx */
-#define HP100_RX_ERROR         0x0200  /* 0:No, 1:Yes Rx pkt had error */
-#define HP100_TX_PDA_ZERO       0x0020 /* 1 when PDA count goes to zero */
-#define HP100_TX_SPACE_AVAIL   0x0010  /* 0:<8192, 1:>=8192 Tx free bytes */
-#define HP100_TX_COMPLETE      0x0008  /* 0:No, 1:Yes a Tx has completed */
-#define HP100_MISC_ERROR        0x0004 /* 0:No, 1:Lan Link down or bus error */
-#define HP100_TX_ERROR         0x0002  /* 0:No, 1:Yes Tx pkt had error */
-
-/*
- * Xmit Memory Free Count
- * (Page PERFORMANCE, TX_MEM_FREE, Offset 0x14) (Read only, 32bit)
- */
-#define HP100_AUTO_COMPARE     0x80000000      /* Tx Space avail & pkts<255 */
-#define HP100_FREE_SPACE       0x7fffffe0      /* Tx free memory */
-
-/*
- *  IRQ Channel
- * (Page HW_MAP, IRQ_CHANNEL, Offset 0x0d)
- */
-#define HP100_ZERO_WAIT_EN     0x80    /* 0:No, 1:Yes asserts NOWS signal */
-#define HP100_IRQ_SCRAMBLE      0x40
-#define HP100_BOND_HP           0x20
-#define HP100_LEVEL_IRQ                0x10    /* 0:Edge, 1:Level type interrupts. */
-                                       /* (Only valid on EISA cards) */
-#define HP100_IRQMASK          0x0F    /* Isolate the IRQ bits */
-
-/*
- * SRAM Parameters
- * (Page HW_MAP, SRAM, Offset 0x0e)
- */
-#define HP100_RAM_SIZE_MASK    0xe0    /* AND to get SRAM size index */
-#define HP100_RAM_SIZE_SHIFT   0x05    /* Shift count(put index in lwr bits) */
-
-/*
- * Bus Master Register
- * (Page HW_MAP, BM, Offset 0x0f)
- */
-#define HP100_BM_BURST_RD       0x01   /* EISA only: 1=Use burst trans. fm system */
-                                       /* memory to chip (tx) */
-#define HP100_BM_BURST_WR       0x02   /* EISA only: 1=Use burst trans. fm system */
-                                       /* memory to chip (rx) */
-#define HP100_BM_MASTER                0x04    /* 0:Slave, 1:BM mode */
-#define HP100_BM_PAGE_CK        0x08   /* This bit should be set whenever in */
-                                       /* an EISA system */
-#define HP100_BM_PCI_8CLK       0x40   /* ... cycles 8 clocks apart */
-
-
-/*
- * Mode Control Register I
- * (Page HW_MAP, MODECTRL1, Offset0x10)
- */
-#define HP100_TX_DUALQ          0x10
-   /* If set and BM -> dual tx pda queues */
-#define HP100_ISR_CLRMODE       0x02   /* If set ISR will clear all pending */
-                                      /* interrupts on read (etr only?) */
-#define HP100_EE_NOLOAD         0x04   /* Status whether res will be loaded */
-                                      /* from the eeprom */
-#define HP100_TX_CNT_FLG        0x08   /* Controls Early TX Reg Cnt Field */
-#define HP100_PDL_USE3          0x10   /* If set BM engine will read only */
-                                      /* first three data elements of a PDL */
-                                      /* on the first access. */
-#define HP100_BUSTYPE_MASK      0xe0   /* Three bit bus type info */
-
-/*
- * Mode Control Register II
- * (Page HW_MAP, MODECTRL2, Offset0x11)
- */
-#define HP100_EE_MASK           0x0f   /* Tell EEPROM circuit not to load */
-                                      /* certain resources */
-#define HP100_DIS_CANCEL        0x20   /* For tx dualq mode operation */
-#define HP100_EN_PDL_WB         0x40   /* 1: Status of PDL completion may be */
-                                      /* written back to system mem */
-#define HP100_EN_BUS_FAIL       0x80   /* Enables bus-fail portion of misc */
-                                      /* interrupt */
-
-/*
- * PCI Configuration and Control Register I
- * (Page HW_MAP, PCICTRL1, Offset 0x12)
- */
-#define HP100_LO_MEM            0x01   /* 1: Mapped Mem requested below 1MB */
-#define HP100_NO_MEM            0x02   /* 1: Disables Req for sysmem to PCI */
-                                      /* bios */
-#define HP100_USE_ISA           0x04   /* 1: isa type decodes will occur */
-                                      /* simultaneously with PCI decodes */
-#define HP100_IRQ_HI_MASK       0xf0   /* pgmed by pci bios */
-#define HP100_PCI_IRQ_HI_MASK   0x78   /* Isolate 4 bits for PCI IRQ  */
-
-/*
- * PCI Configuration and Control Register II
- * (Page HW_MAP, PCICTRL2, Offset 0x13)
- */
-#define HP100_RD_LINE_PDL       0x01   /* 1: PCI command Memory Read Line en */
-#define HP100_RD_TX_DATA_MASK   0x06   /* choose PCI memread cmds for TX */
-#define HP100_MWI               0x08   /* 1: en. PCI memory write invalidate */
-#define HP100_ARB_MODE          0x10   /* Select PCI arbitor type */
-#define HP100_STOP_EN           0x20   /* Enables PCI state machine to issue */
-                                      /* pci stop if cascade not ready */
-#define HP100_IGNORE_PAR        0x40   /* 1: PCI state machine ignores parity */
-#define HP100_PCI_RESET         0x80   /* 0->1: Reset PCI block */
-
-/*
- * Early TX Configuration and Control Register
- * (Page HW_MAP, EARLYTXCFG, Offset 0x16)
- */
-#define HP100_EN_EARLY_TX       0x8000 /* 1=Enable Early TX */
-#define HP100_EN_ADAPTIVE       0x4000 /* 1=Enable adaptive mode */
-#define HP100_EN_TX_UR_IRQ      0x2000 /* reserved, must be 0 */
-#define HP100_EN_LOW_TX         0x1000 /* reserved, must be 0 */
-#define HP100_ET_CNT_MASK       0x0fff /* bits 11..0: ET counters */
-
-/*
- * Early RX Configuration and Control Register
- * (Page HW_MAP, EARLYRXCFG, Offset 0x18)
- */
-#define HP100_EN_EARLY_RX       0x80   /* 1=Enable Early RX */
-#define HP100_EN_LOW_RX         0x40   /* reserved, must be 0 */
-#define HP100_RX_TRIP_MASK      0x1f   /* bits 4..0: threshold at which the
-                                        * early rx circuit will start the
-                                        * dma of received packet into system
-                                        * memory for BM */
-
-/*
- *  Serial Devices Control Register
- * (Page EEPROM_CTRL, EEPROM_CTRL, Offset 0x08)
- */
-#define HP100_EEPROM_LOAD      0x0001  /* 0->1 loads EEPROM into registers. */
-                                       /* When it goes back to 0, load is   */
-                                       /* complete. This should take ~600us. */
-
-/*
- * 10MB LAN Control and Configuration Register I
- * (Page MAC_CTRL, 10_LAN_CFG_1, Offset 0x08)
- */
-#define HP100_MAC10_SEL                0xc0    /* Get bits to indicate MAC */
-#define HP100_AUI_SEL          0x20    /* Status of AUI selection */
-#define HP100_LOW_TH           0x10    /* 0:No, 1:Yes allow better cabling */
-#define HP100_LINK_BEAT_DIS    0x08    /* 0:Enable, 1:Disable link beat */
-#define HP100_LINK_BEAT_ST     0x04    /* 0:No, 1:Yes link beat being Rx */
-#define HP100_R_ROL_ST         0x02    /* 0:No, 1:Yes Rx twisted pair has */
-                                       /*             been reversed */
-#define HP100_AUI_ST           0x01    /* 0:No, 1:Yes use AUI on TP card */
-
-/*
- * 10 MB LAN Control and Configuration Register II
- * (Page MAC_CTRL, 10_LAN_CFG_2, Offset 0x09)
- */
-#define HP100_SQU_ST           0x01    /* 0:No, 1:Yes collision signal sent */
-                                       /*       after Tx.Only used for AUI. */
-#define HP100_FULLDUP           0x02   /* 1: LXT901 XCVR fullduplx enabled */
-#define HP100_DOT3_MAC          0x04   /* 1: DOT 3 Mac sel. unless Autosel */
-
-/*
- * MAC Selection, use with MAC10_SEL bits
- */
-#define HP100_AUTO_SEL_10      0x0     /* Auto select */
-#define HP100_XCVR_LXT901_10   0x1     /* LXT901 10BaseT transceiver */
-#define HP100_XCVR_7213                0x2     /* 7213 transceiver */
-#define HP100_XCVR_82503       0x3     /* 82503 transceiver */
-
-/*
- *  100MB LAN Training Register
- * (Page MAC_CTRL, VG_LAN_CFG_2, Offset 0x0b) (old, pre 802.12)
- */
-#define HP100_FRAME_FORMAT     0x08    /* 0:802.3, 1:802.5 frames */
-#define HP100_BRIDGE           0x04    /* 0:No, 1:Yes tell hub i am a bridge */
-#define HP100_PROM_MODE                0x02    /* 0:No, 1:Yes tell hub card is */
-                                       /*         promiscuous */
-#define HP100_REPEATER         0x01    /* 0:No, 1:Yes tell hub MAC wants to */
-                                       /*         be a cascaded repeater */
-
-/*
- * 100MB LAN Control and Configuration Register
- * (Page MAC_CTRL, VG_LAN_CFG_1, Offset 0x0a)
- */
-#define HP100_VG_SEL           0x80    /* 0:No, 1:Yes use 100 Mbit MAC */
-#define HP100_LINK_UP_ST       0x40    /* 0:No, 1:Yes endnode logged in */
-#define HP100_LINK_CABLE_ST    0x20    /* 0:No, 1:Yes cable can hear tones */
-                                       /*         from  hub */
-#define HP100_LOAD_ADDR                0x10    /* 0->1 card addr will be sent  */
-                                       /* 100ms later the link status  */
-                                       /* bits are valid */
-#define HP100_LINK_CMD         0x08    /* 0->1 link will attempt to log in. */
-                                       /* 100ms later the link status */
-                                       /* bits are valid */
-#define HP100_TRN_DONE          0x04   /* NEW ETR-Chips only: Will be reset */
-                                       /* after LinkUp Cmd is given and set */
-                                       /* when training has completed. */
-#define HP100_LINK_GOOD_ST     0x02    /* 0:No, 1:Yes cable passed training */
-#define HP100_VG_RESET         0x01    /* 0:Yes, 1:No reset the 100VG MAC */
-
-
-/*
- *  MAC Configuration Register I
- * (Page MAC_CTRL, MAC_CFG_1, Offset 0x0c)
- */
-#define HP100_RX_IDLE          0x80    /* 0:Yes, 1:No currently receiving pkts */
-#define HP100_TX_IDLE          0x40    /* 0:Yes, 1:No currently Txing pkts */
-#define HP100_RX_EN            0x20    /* 1: allow receiving of pkts */
-#define HP100_TX_EN            0x10    /* 1: allow transmitting of pkts */
-#define HP100_ACC_ERRORED      0x08    /* 0:No, 1:Yes allow Rx of errored pkts */
-#define HP100_ACC_MC           0x04    /* 0:No, 1:Yes allow Rx of multicast pkts */
-#define HP100_ACC_BC           0x02    /* 0:No, 1:Yes allow Rx of broadcast pkts */
-#define HP100_ACC_PHY          0x01    /* 0:No, 1:Yes allow Rx of ALL phys. pkts */
-#define HP100_MAC1MODEMASK     0xf0    /* Hide ACC bits */
-#define HP100_MAC1MODE1                0x00    /* Receive nothing, must also disable RX */
-#define HP100_MAC1MODE2                0x00
-#define HP100_MAC1MODE3                HP100_MAC1MODE2 | HP100_ACC_BC
-#define HP100_MAC1MODE4                HP100_MAC1MODE3 | HP100_ACC_MC
-#define HP100_MAC1MODE5                HP100_MAC1MODE4 /* set mc hash to all ones also */
-#define HP100_MAC1MODE6                HP100_MAC1MODE5 | HP100_ACC_PHY /* Promiscuous */
-/* Note MODE6 will receive all GOOD packets on the LAN. This really needs
-   a mode 7 defined to be LAN Analyzer mode, which will receive errored and
-   runt packets, and keep the CRC bytes. */
-#define HP100_MAC1MODE7                HP100_MAC1MODE6 | HP100_ACC_ERRORED
-
-/*
- *  MAC Configuration Register II
- * (Page MAC_CTRL, MAC_CFG_2, Offset 0x0d)
- */
-#define HP100_TR_MODE          0x80    /* 0:No, 1:Yes support Token Ring formats */
-#define HP100_TX_SAME          0x40    /* 0:No, 1:Yes Tx same packet continuous */
-#define HP100_LBK_XCVR         0x20    /* 0:No, 1:Yes loopback through MAC & */
-                                       /*   transceiver */
-#define HP100_LBK_MAC          0x10    /* 0:No, 1:Yes loopback through MAC */
-#define HP100_CRC_I            0x08    /* 0:No, 1:Yes inhibit CRC on Tx packets */
-#define HP100_ACCNA             0x04   /* 1: For 802.5: Accept only token ring
-                                        * group addr that maches NA mask */
-#define HP100_KEEP_CRC         0x02    /* 0:No, 1:Yes keep CRC on Rx packets. */
-                                       /*   The length will reflect this. */
-#define HP100_ACCFA             0x01   /* 1: For 802.5: Accept only functional
-                                        * addrs that match FA mask (page1) */
-#define HP100_MAC2MODEMASK     0x02
-#define HP100_MAC2MODE1                0x00
-#define HP100_MAC2MODE2                0x00
-#define HP100_MAC2MODE3                0x00
-#define HP100_MAC2MODE4                0x00
-#define HP100_MAC2MODE5                0x00
-#define HP100_MAC2MODE6                0x00
-#define HP100_MAC2MODE7                KEEP_CRC
-
-/*
- * MAC Configuration Register III
- * (Page MAC_CTRL, MAC_CFG_3, Offset 0x0e)
- */
-#define HP100_PACKET_PACE       0x03   /* Packet Pacing:
-                                        * 00: No packet pacing
-                                        * 01: 8 to 16 uS delay
-                                        * 10: 16 to 32 uS delay
-                                        * 11: 32 to 64 uS delay
-                                        */
-#define HP100_LRF_EN            0x04   /* 1: External LAN Rcv Filter and
-                                        * TCP/IP Checksumming enabled. */
-#define HP100_AUTO_MODE         0x10   /* 1: AutoSelect between 10/100 */
-
-/*
- * MAC Configuration Register IV
- * (Page MAC_CTRL, MAC_CFG_4, Offset 0x0f)
- */
-#define HP100_MAC_SEL_ST        0x01   /* (R): Status of external VGSEL
-                                        * Signal, 1=100VG, 0=10Mbit sel. */
-#define HP100_LINK_FAIL_ST      0x02   /* (R): Status of Link Fail portion
-                                        * of the Misc. Interrupt */
-
-/*
- *  100 MB LAN Training Request/Allowed Registers
- * (Page MAC_CTRL, TRAIN_REQUEST and TRAIN_ALLOW, Offset 0x14-0x16)(ETR parts only)
- */
-#define HP100_MACRQ_REPEATER         0x0001    /* 1: MAC tells HUB it wants to be
-                                                *    a cascaded repeater
-                                                * 0: ... wants to be a DTE */
-#define HP100_MACRQ_PROMSC           0x0006    /* 2 bits: Promiscious mode
-                                                * 00: Rcv only unicast packets
-                                                *     specifically addr to this
-                                                *     endnode
-                                                * 10: Rcv all pckts fwded by
-                                                *     the local repeater */
-#define HP100_MACRQ_FRAMEFMT_EITHER  0x0018    /* 11: either format allowed */
-#define HP100_MACRQ_FRAMEFMT_802_3   0x0000    /* 00: 802.3 is requested */
-#define HP100_MACRQ_FRAMEFMT_802_5   0x0010    /* 10: 802.5 format is requested */
-#define HP100_CARD_MACVER            0xe000    /* R: 3 bit Cards 100VG MAC version */
-#define HP100_MALLOW_REPEATER        0x0001    /* If reset, requested access as an
-                                                * end node is allowed */
-#define HP100_MALLOW_PROMSC          0x0004    /* 2 bits: Promiscious mode
-                                                * 00: Rcv only unicast packets
-                                                *     specifically addr to this
-                                                *     endnode
-                                                * 10: Rcv all pckts fwded by
-                                                *     the local repeater */
-#define HP100_MALLOW_FRAMEFMT        0x00e0    /* 2 bits: Frame Format
-                                                * 00: 802.3 format will be used
-                                                * 10: 802.5 format will be used */
-#define HP100_MALLOW_ACCDENIED       0x0400    /* N bit */
-#define HP100_MALLOW_CONFIGURE       0x0f00    /* C bit */
-#define HP100_MALLOW_DUPADDR         0x1000    /* D bit */
-#define HP100_HUB_MACVER             0xe000    /* R: 3 bit 802.12 MAC/RMAC training */
-                                            /*    protocol of repeater */
-
-/* ****************************************************************************** */
-
-/*
- *  Set/Reset bits
- */
-#define HP100_SET_HB           0x0100  /* 0:Set fields to 0 whose mask is 1 */
-#define HP100_SET_LB           0x0001  /* HB sets upper byte, LB sets lower byte */
-#define HP100_RESET_HB         0x0000  /* For readability when resetting bits */
-#define HP100_RESET_LB         0x0000  /* For readability when resetting bits */
-
-/*
- *  Misc. Constants
- */
-#define HP100_LAN_100          100     /* lan_type value for VG */
-#define HP100_LAN_10           10      /* lan_type value for 10BaseT */
-#define HP100_LAN_COAX         9       /* lan_type value for Coax */
-#define HP100_LAN_ERR          (-1)    /* lan_type value for link down */
-
-/*
- * Bus Master Data Structures  ----------------------------------------------
- */
-
-#define MAX_RX_PDL              30     /* Card limit = 31 */
-#define MAX_RX_FRAG             2      /* Don't need more... */
-#define MAX_TX_PDL              29
-#define MAX_TX_FRAG             2      /* Limit = 31 */
-
-/* Define total PDL area size in bytes (should be 4096) */
-/* This is the size of kernel (dma) memory that will be allocated. */
-#define MAX_RINGSIZE ((MAX_RX_FRAG*8+4+4)*MAX_RX_PDL+(MAX_TX_FRAG*8+4+4)*MAX_TX_PDL)+16
-
-/* Ethernet Packet Sizes */
-#define MIN_ETHER_SIZE          60
-#define MAX_ETHER_SIZE          1514   /* Needed for preallocation of */
-                                       /* skb buffer when busmastering */
-
-/* Tx or Rx Ring Entry */
-typedef struct hp100_ring {
-       u_int *pdl;             /* Address of PDLs PDH, dword before
-                                * this address is used for rx hdr */
-       u_int pdl_paddr;        /* Physical address of PDL */
-       struct sk_buff *skb;
-       struct hp100_ring *next;
-} hp100_ring_t;
-
-
-
-/* Mask for Header Descriptor */
-#define HP100_PKT_LEN_MASK     0x1FFF  /* AND with RxLength to get length */
-
-
-/* Receive Packet Status.  Note, the error bits are only valid if ACC_ERRORED
-   bit in the MAC Configuration Register 1 is set. */
-#define HP100_RX_PRI           0x8000  /* 0:No, 1:Yes packet is priority */
-#define HP100_SDF_ERR          0x4000  /* 0:No, 1:Yes start of frame error */
-#define HP100_SKEW_ERR         0x2000  /* 0:No, 1:Yes skew out of range */
-#define HP100_BAD_SYMBOL_ERR   0x1000  /* 0:No, 1:Yes invalid symbol received */
-#define HP100_RCV_IPM_ERR      0x0800  /* 0:No, 1:Yes pkt had an invalid packet */
-                                       /*   marker */
-#define HP100_SYMBOL_BAL_ERR   0x0400  /* 0:No, 1:Yes symbol balance error */
-#define HP100_VG_ALN_ERR       0x0200  /* 0:No, 1:Yes non-octet received */
-#define HP100_TRUNC_ERR                0x0100  /* 0:No, 1:Yes the packet was truncated */
-#define HP100_RUNT_ERR         0x0040  /* 0:No, 1:Yes pkt length < Min Pkt */
-                                       /*   Length Reg. */
-#define HP100_ALN_ERR          0x0010  /* 0:No, 1:Yes align error. */
-#define HP100_CRC_ERR          0x0008  /* 0:No, 1:Yes CRC occurred. */
-
-/* The last three bits indicate the type of destination address */
-
-#define HP100_MULTI_ADDR_HASH  0x0006  /* 110: Addr multicast, matched hash */
-#define HP100_BROADCAST_ADDR   0x0003  /* x11: Addr broadcast */
-#define HP100_MULTI_ADDR_NO_HASH 0x0002        /* 010: Addr multicast, didn't match hash */
-#define HP100_PHYS_ADDR_MATCH  0x0001  /* x01: Addr was physical and mine */
-#define HP100_PHYS_ADDR_NO_MATCH 0x0000        /* x00: Addr was physical but not mine */
-
-/*
- *  macros
- */
-
-#define hp100_inb( reg ) \
-        inb( ioaddr + HP100_REG_##reg )
-#define hp100_inw( reg ) \
-       inw( ioaddr + HP100_REG_##reg )
-#define hp100_inl( reg ) \
-       inl( ioaddr + HP100_REG_##reg )
-#define hp100_outb( data, reg ) \
-       outb( data, ioaddr + HP100_REG_##reg )
-#define hp100_outw( data, reg ) \
-       outw( data, ioaddr + HP100_REG_##reg )
-#define hp100_outl( data, reg ) \
-       outl( data, ioaddr + HP100_REG_##reg )
-#define hp100_orb( data, reg ) \
-       outb( inb( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
-#define hp100_orw( data, reg ) \
-       outw( inw( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
-#define hp100_andb( data, reg ) \
-       outb( inb( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
-#define hp100_andw( data, reg ) \
-       outw( inw( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
-
-#define hp100_page( page ) \
-       outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING )
-#define hp100_ints_off() \
-       outw( HP100_INT_EN | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW )
-#define hp100_ints_on() \
-       outw( HP100_INT_EN | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW )
-#define hp100_mem_map_enable() \
-       outw( HP100_MMAP_DIS | HP100_RESET_HB, ioaddr + HP100_REG_OPTION_LSW )
-#define hp100_mem_map_disable() \
-       outw( HP100_MMAP_DIS | HP100_SET_HB, ioaddr + HP100_REG_OPTION_LSW )
index 71d3d88..be56e63 100644 (file)
@@ -607,6 +607,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
        for (i = 0; i < adapter->num_rx_queues; i++)
                rxdr[i].count = rxdr->count;
 
+       err = 0;
        if (netif_running(adapter->netdev)) {
                /* Try to get new resources before deleting old */
                err = e1000_setup_all_rx_resources(adapter);
@@ -627,14 +628,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
                adapter->rx_ring = rxdr;
                adapter->tx_ring = txdr;
                err = e1000_up(adapter);
-               if (err)
-                       goto err_setup;
        }
        kfree(tx_old);
        kfree(rx_old);
 
        clear_bit(__E1000_RESETTING, &adapter->flags);
-       return 0;
+       return err;
+
 err_setup_tx:
        e1000_free_all_rx_resources(adapter);
 err_setup_rx:
@@ -646,7 +646,6 @@ err_alloc_rx:
 err_alloc_tx:
        if (netif_running(adapter->netdev))
                e1000_up(adapter);
-err_setup:
        clear_bit(__E1000_RESETTING, &adapter->flags);
        return err;
 }
index de8c581..adce7e3 100644 (file)
@@ -894,8 +894,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
        case e1000_pch2lan:
        case e1000_pch_lpt:
        case e1000_pch_spt:
-               /* fall through */
        case e1000_pch_cnp:
+               /* fall through */
+       case e1000_pch_tgp:
                mask |= BIT(18);
                break;
        default:
@@ -1559,6 +1560,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
        switch (hw->mac.type) {
        case e1000_pch_spt:
        case e1000_pch_cnp:
+       case e1000_pch_tgp:
                fext_nvm11 = er32(FEXTNVM11);
                fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
                ew32(FEXTNVM11, fext_nvm11);
index eff75bd..f556163 100644 (file)
@@ -86,6 +86,17 @@ struct e1000_hw;
 #define E1000_DEV_ID_PCH_ICP_I219_V8           0x15E0
 #define E1000_DEV_ID_PCH_ICP_I219_LM9          0x15E1
 #define E1000_DEV_ID_PCH_ICP_I219_V9           0x15E2
+#define E1000_DEV_ID_PCH_CMP_I219_LM10         0x0D4E
+#define E1000_DEV_ID_PCH_CMP_I219_V10          0x0D4F
+#define E1000_DEV_ID_PCH_CMP_I219_LM11         0x0D4C
+#define E1000_DEV_ID_PCH_CMP_I219_V11          0x0D4D
+#define E1000_DEV_ID_PCH_CMP_I219_LM12         0x0D53
+#define E1000_DEV_ID_PCH_CMP_I219_V12          0x0D55
+#define E1000_DEV_ID_PCH_TGP_I219_LM13         0x15FB
+#define E1000_DEV_ID_PCH_TGP_I219_V13          0x15FC
+#define E1000_DEV_ID_PCH_TGP_I219_LM14         0x15F9
+#define E1000_DEV_ID_PCH_TGP_I219_V14          0x15FA
+#define E1000_DEV_ID_PCH_TGP_I219_LM15         0x15F4
 
 #define E1000_REVISION_4       4
 
@@ -109,6 +120,7 @@ enum e1000_mac_type {
        e1000_pch_lpt,
        e1000_pch_spt,
        e1000_pch_cnp,
+       e1000_pch_tgp,
 };
 
 enum e1000_media_type {
index a1fab77..b4135c5 100644 (file)
@@ -316,6 +316,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
+       case e1000_pch_tgp:
                if (e1000_phy_is_accessible_pchlan(hw))
                        break;
 
@@ -458,6 +459,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
                case e1000_pch_lpt:
                case e1000_pch_spt:
                case e1000_pch_cnp:
+               case e1000_pch_tgp:
                        /* In case the PHY needs to be in mdio slow mode,
                         * set slow mode and try to get the PHY id again.
                         */
@@ -700,6 +702,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
+       case e1000_pch_tgp:
        case e1000_pchlan:
                /* check management mode */
                mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1638,6 +1641,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
+       case e1000_pch_tgp:
                rc = e1000_init_phy_params_pchlan(hw);
                break;
        default:
@@ -2090,6 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
+       case e1000_pch_tgp:
                sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
                break;
        default:
@@ -3127,6 +3132,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
        switch (hw->mac.type) {
        case e1000_pch_spt:
        case e1000_pch_cnp:
+       case e1000_pch_tgp:
                bank1_offset = nvm->flash_bank_size;
                act_offset = E1000_ICH_NVM_SIG_WORD;
 
@@ -4070,6 +4076,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
+       case e1000_pch_tgp:
                word = NVM_COMPAT;
                valid_csum_mask = NVM_COMPAT_VALID_CSUM;
                break;
index 42f57ab..032b886 100644 (file)
@@ -3538,6 +3538,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
                adapter->cc.shift = shift;
                break;
        case e1000_pch_cnp:
+       case e1000_pch_tgp:
                if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
                        /* Stable 24MHz frequency */
                        incperiod = INCPERIOD_24MHZ;
@@ -4049,6 +4050,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
+               /* fall-through */
+       case e1000_pch_tgp:
                fc->refresh_time = 0xFFFF;
                fc->pause_time = 0xFFFF;
 
@@ -4715,12 +4718,12 @@ int e1000e_close(struct net_device *netdev)
 
        pm_runtime_get_sync(&pdev->dev);
 
-       if (!test_bit(__E1000_DOWN, &adapter->state)) {
+       if (netif_device_present(netdev)) {
                e1000e_down(adapter, true);
                e1000_free_irq(adapter);
 
                /* Link status message must follow this format */
-               pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+               pr_info("%s NIC Link is Down\n", netdev->name);
        }
 
        napi_disable(&adapter->napi);
@@ -6294,6 +6297,7 @@ fl_out:
        pm_runtime_put_sync(netdev->dev.parent);
 }
 
+#ifdef CONFIG_PM_SLEEP
 /* S0ix implementation */
 static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
 {
@@ -6461,15 +6465,20 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
        mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
        ew32(CTRL_EXT, mac_data);
 }
+#endif /* CONFIG_PM_SLEEP */
 
 static int e1000e_pm_freeze(struct device *dev)
 {
        struct net_device *netdev = dev_get_drvdata(dev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       bool present;
+
+       rtnl_lock();
 
+       present = netif_device_present(netdev);
        netif_device_detach(netdev);
 
-       if (netif_running(netdev)) {
+       if (present && netif_running(netdev)) {
                int count = E1000_CHECK_RESET_COUNT;
 
                while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
@@ -6481,6 +6490,8 @@ static int e1000e_pm_freeze(struct device *dev)
                e1000e_down(adapter, false);
                e1000_free_irq(adapter);
        }
+       rtnl_unlock();
+
        e1000e_reset_interrupt_capability(adapter);
 
        /* Allow time for pending master requests to run */
@@ -6728,6 +6739,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
        __e1000e_disable_aspm(pdev, state, 1);
 }
 
+static int e1000e_pm_thaw(struct device *dev)
+{
+       struct net_device *netdev = dev_get_drvdata(dev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       int rc = 0;
+
+       e1000e_set_interrupt_capability(adapter);
+
+       rtnl_lock();
+       if (netif_running(netdev)) {
+               rc = e1000_request_irq(adapter);
+               if (rc)
+                       goto err_irq;
+
+               e1000e_up(adapter);
+       }
+
+       netif_device_attach(netdev);
+err_irq:
+       rtnl_unlock();
+
+       return rc;
+}
+
 #ifdef CONFIG_PM
 static int __e1000_resume(struct pci_dev *pdev)
 {
@@ -6795,26 +6830,6 @@ static int __e1000_resume(struct pci_dev *pdev)
 }
 
 #ifdef CONFIG_PM_SLEEP
-static int e1000e_pm_thaw(struct device *dev)
-{
-       struct net_device *netdev = dev_get_drvdata(dev);
-       struct e1000_adapter *adapter = netdev_priv(netdev);
-
-       e1000e_set_interrupt_capability(adapter);
-       if (netif_running(netdev)) {
-               u32 err = e1000_request_irq(adapter);
-
-               if (err)
-                       return err;
-
-               e1000e_up(adapter);
-       }
-
-       netif_device_attach(netdev);
-
-       return 0;
-}
-
 static int e1000e_pm_suspend(struct device *dev)
 {
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -7000,16 +7015,11 @@ static void e1000_netpoll(struct net_device *netdev)
 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
                                                pci_channel_state_t state)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct e1000_adapter *adapter = netdev_priv(netdev);
-
-       netif_device_detach(netdev);
+       e1000e_pm_freeze(&pdev->dev);
 
        if (state == pci_channel_io_perm_failure)
                return PCI_ERS_RESULT_DISCONNECT;
 
-       if (netif_running(netdev))
-               e1000e_down(adapter, true);
        pci_disable_device(pdev);
 
        /* Request a slot slot reset. */
@@ -7075,10 +7085,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
 
        e1000_init_manageability_pt(adapter);
 
-       if (netif_running(netdev))
-               e1000e_up(adapter);
-
-       netif_device_attach(netdev);
+       e1000e_pm_thaw(&pdev->dev);
 
        /* If the controller has AMT, do not set DRV_LOAD until the interface
         * is up.  For all other cases, let the f/w know that the h/w is now
@@ -7589,15 +7596,13 @@ static void e1000_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       bool down = test_bit(__E1000_DOWN, &adapter->state);
 
        e1000e_ptp_remove(adapter);
 
        /* The timers may be rescheduled, so explicitly disable them
         * from being rescheduled.
         */
-       if (!down)
-               set_bit(__E1000_DOWN, &adapter->state);
+       set_bit(__E1000_DOWN, &adapter->state);
        del_timer_sync(&adapter->phy_info_timer);
 
        cancel_work_sync(&adapter->reset_task);
@@ -7617,9 +7622,6 @@ static void e1000_remove(struct pci_dev *pdev)
                }
        }
 
-       /* Don't lie to e1000_close() down the road. */
-       if (!down)
-               clear_bit(__E1000_DOWN, &adapter->state);
        unregister_netdev(netdev);
 
        if (pci_dev_run_wake(pdev))
@@ -7749,6 +7751,17 @@ static const struct pci_device_id e1000_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
 
        { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
 };
index 1a4c65d..eaa5a0f 100644 (file)
@@ -295,6 +295,8 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
+               /* fall-through */
+       case e1000_pch_tgp:
                if ((hw->mac.type < e1000_pch_lpt) ||
                    (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
                        adapter->ptp_clock_info.max_adj = 24000000 - 1;
index 2af9f63..cb63673 100644 (file)
@@ -1118,6 +1118,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
                                            const u8 *macaddr);
 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+int i40e_count_filters(struct i40e_vsi *vsi);
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
 #ifdef CONFIG_I40E_DCB
index 72c0488..9f0a4e9 100644 (file)
@@ -507,6 +507,59 @@ shutdown_arq_out:
        return ret_code;
 }
 
+/**
+ *  i40e_set_hw_flags - set HW flags
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_set_hw_flags(struct i40e_hw *hw)
+{
+       struct i40e_adminq_info *aq = &hw->aq;
+
+       hw->flags = 0;
+
+       switch (hw->mac.type) {
+       case I40E_MAC_XL710:
+               if (aq->api_maj_ver > 1 ||
+                   (aq->api_maj_ver == 1 &&
+                    aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+                       hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+                       hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+                       /* The ability to RX (not drop) 802.1ad frames */
+                       hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+               }
+               break;
+       case I40E_MAC_X722:
+               hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+                            I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+               if (aq->api_maj_ver > 1 ||
+                   (aq->api_maj_ver == 1 &&
+                    aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+                       hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+               /* fall through */
+       default:
+               break;
+       }
+
+       /* Newer versions of firmware require lock when reading the NVM */
+       if (aq->api_maj_ver > 1 ||
+           (aq->api_maj_ver == 1 &&
+            aq->api_min_ver >= 5))
+               hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+       if (aq->api_maj_ver > 1 ||
+           (aq->api_maj_ver == 1 &&
+            aq->api_min_ver >= 8)) {
+               hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+               hw->flags |= I40E_HW_FLAG_DROP_MODE;
+       }
+
+       if (aq->api_maj_ver > 1 ||
+           (aq->api_maj_ver == 1 &&
+            aq->api_min_ver >= 9))
+               hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
+}
+
 /**
  *  i40e_init_adminq - main initialization routine for Admin Queue
  *  @hw: pointer to the hardware structure
@@ -571,6 +624,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
        if (ret_code != I40E_SUCCESS)
                goto init_adminq_free_arq;
 
+       /* Some features were introduced in different FW API version
+        * for different MAC type.
+        */
+       i40e_set_hw_flags(hw);
+
        /* get the NVM version info */
        i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
                           &hw->nvm.version);
@@ -596,25 +654,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
                hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
        }
 
-       /* Newer versions of firmware require lock when reading the NVM */
-       if (hw->aq.api_maj_ver > 1 ||
-           (hw->aq.api_maj_ver == 1 &&
-            hw->aq.api_min_ver >= 5))
-               hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
        /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
        if (hw->aq.api_maj_ver > 1 ||
            (hw->aq.api_maj_ver == 1 &&
             hw->aq.api_min_ver >= 7))
                hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
 
-       if (hw->aq.api_maj_ver > 1 ||
-           (hw->aq.api_maj_ver == 1 &&
-            hw->aq.api_min_ver >= 8)) {
-               hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
-               hw->flags |= I40E_HW_FLAG_DROP_MODE;
-       }
-
        if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
                ret_code = I40E_ERR_FIRMWARE_API_VERSION;
                goto init_adminq_free_arq;
index 530613f..a23f89f 100644 (file)
@@ -2249,7 +2249,13 @@ struct i40e_aqc_phy_register_access {
 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL        1
 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
        u8      dev_address;
-       u8      reserved1[2];
+       u8      cmd_flags;
+#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE   0x01
+#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER      0x02
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT    2
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK     (0x3 << \
+               I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT)
+       u8      reserved1;
        __le32  reg_address;
        __le32  reg_value;
        u8      reserved2[4];
index d37c6e0..8b25a6d 100644 (file)
@@ -933,10 +933,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
        else
                hw->pf_id = (u8)(func_rid & 0x7);
 
-       if (hw->mac.type == I40E_MAC_X722)
-               hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
-                            I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
        status = i40e_init_nvm(hw);
        return status;
 }
@@ -1441,9 +1437,9 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
        u32 gpio_val = 0;
        u32 port;
 
-       if (!hw->func_caps.led[idx])
+       if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
+           !hw->func_caps.led[idx])
                return 0;
-
        gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
        port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
                I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
@@ -1462,8 +1458,15 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
 #define I40E_FILTER_ACTIVITY 0xE
 #define I40E_LINK_ACTIVITY 0xC
 #define I40E_MAC_ACTIVITY 0xD
+#define I40E_FW_LED BIT(4)
+#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
+                            I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+
 #define I40E_LED0 22
 
+#define I40E_PIN_FUNC_SDP 0x0
+#define I40E_PIN_FUNC_LED 0x1
+
 /**
  * i40e_led_get - return current on/off mode
  * @hw: pointer to the hw struct
@@ -1508,8 +1511,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 {
        int i;
 
-       if (mode & 0xfffffff0)
+       if (mode & ~I40E_LED_MODE_VALID) {
                hw_dbg(hw, "invalid mode passed in %X\n", mode);
+               return;
+       }
 
        /* as per the documentation GPIO 22-29 are the LED
         * GPIO pins named LED0..LED7
@@ -1519,6 +1524,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 
                if (!gpio_val)
                        continue;
+
+               if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
+                       u32 pin_func = 0;
+
+                       if (mode & I40E_FW_LED)
+                               pin_func = I40E_PIN_FUNC_SDP;
+                       else
+                               pin_func = I40E_PIN_FUNC_LED;
+
+                       gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
+                       gpio_val |= ((pin_func <<
+                                    I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
+                                    I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
+               }
                gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
                /* this & is a bit of paranoia, but serves as a range check */
                gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -2570,9 +2589,16 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
                if (status)
                        return status;
 
-               hw->phy.link_info.req_fec_info =
-                       abilities.fec_cfg_curr_mod_ext_info &
-                       (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+               if (abilities.fec_cfg_curr_mod_ext_info &
+                   I40E_AQ_ENABLE_FEC_AUTO)
+                       hw->phy.link_info.req_fec_info =
+                               (I40E_AQ_REQUEST_FEC_KR |
+                                I40E_AQ_REQUEST_FEC_RS);
+               else
+                       hw->phy.link_info.req_fec_info =
+                               abilities.fec_cfg_curr_mod_ext_info &
+                               (I40E_AQ_REQUEST_FEC_KR |
+                                I40E_AQ_REQUEST_FEC_RS);
 
                memcpy(hw->phy.link_info.module_type, &abilities.module_type,
                       sizeof(hw->phy.link_info.module_type));
@@ -5043,7 +5069,7 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
                status =
                       i40e_aq_get_phy_register(hw,
                                                I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
-                                               I40E_PHY_COM_REG_PAGE,
+                                               I40E_PHY_COM_REG_PAGE, true,
                                                I40E_PHY_LED_PROV_REG_1,
                                                reg_val, NULL);
        } else {
@@ -5076,7 +5102,7 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
                status =
                       i40e_aq_set_phy_register(hw,
                                                I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
-                                               I40E_PHY_COM_REG_PAGE,
+                                               I40E_PHY_COM_REG_PAGE, true,
                                                I40E_PHY_LED_PROV_REG_1,
                                                reg_val, NULL);
        } else {
@@ -5115,7 +5141,7 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
                status =
                      i40e_aq_get_phy_register(hw,
                                               I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
-                                              I40E_PHY_COM_REG_PAGE,
+                                              I40E_PHY_COM_REG_PAGE, true,
                                               I40E_PHY_LED_PROV_REG_1,
                                               &reg_val_aq, NULL);
                if (status == I40E_SUCCESS)
@@ -5320,20 +5346,49 @@ do_retry:
 }
 
 /**
- * i40e_aq_set_phy_register
+ * i40e_mdio_if_number_selection - MDIO I/F number selection
+ * @hw: pointer to the hw struct
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @cmd: pointer to PHY Register command structure
+ **/
+static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
+                                         u8 mdio_num,
+                                         struct i40e_aqc_phy_register_access *cmd)
+{
+       if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
+               if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
+                       cmd->cmd_flags |=
+                               I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
+                               ((mdio_num <<
+                               I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
+                               I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
+               else
+                       i40e_debug(hw, I40E_DEBUG_PHY,
+                                  "MDIO I/F number selection not supported by current FW version.\n");
+       }
+}
+
+/**
+ * i40e_aq_set_phy_register_ext
  * @hw: pointer to the hw struct
  * @phy_select: select which phy should be accessed
  * @dev_addr: PHY device address
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
  * @reg_addr: PHY register address
  * @reg_val: new register value
  * @cmd_details: pointer to command details structure or NULL
  *
  * Write the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_set_phy_register.
  **/
-i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
-                                    u8 phy_select, u8 dev_addr,
-                                    u32 reg_addr, u32 reg_val,
-                                    struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+                            u8 phy_select, u8 dev_addr, bool page_change,
+                            bool set_mdio, u8 mdio_num,
+                            u32 reg_addr, u32 reg_val,
+                            struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
        struct i40e_aqc_phy_register_access *cmd =
@@ -5348,26 +5403,36 @@ i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
        cmd->reg_address = cpu_to_le32(reg_addr);
        cmd->reg_value = cpu_to_le32(reg_val);
 
+       i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+       if (!page_change)
+               cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
        return status;
 }
 
 /**
- * i40e_aq_get_phy_register
+ * i40e_aq_get_phy_register_ext
  * @hw: pointer to the hw struct
  * @phy_select: select which phy should be accessed
  * @dev_addr: PHY device address
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
  * @reg_addr: PHY register address
  * @reg_val: read register value
  * @cmd_details: pointer to command details structure or NULL
  *
  * Read the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_get_phy_register.
  **/
-i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
-                                    u8 phy_select, u8 dev_addr,
-                                    u32 reg_addr, u32 *reg_val,
-                                    struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+                            u8 phy_select, u8 dev_addr, bool page_change,
+                            bool set_mdio, u8 mdio_num,
+                            u32 reg_addr, u32 *reg_val,
+                            struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
        struct i40e_aqc_phy_register_access *cmd =
@@ -5381,6 +5446,11 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
        cmd->dev_address = dev_addr;
        cmd->reg_address = cpu_to_le32(reg_addr);
 
+       i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+       if (!page_change)
+               cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
        if (!status)
                *reg_val = le32_to_cpu(cmd->reg_value);
index 200a1cb..9de503c 100644 (file)
@@ -889,7 +889,9 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
 
                ret = i40e_read_nvm_module_data(hw,
                                                I40E_SR_EMP_SR_SETTINGS_PTR,
-                                               offset, 1,
+                                               offset,
+                                               I40E_LLDP_CURRENT_STATUS_OFFSET,
+                                               I40E_LLDP_CURRENT_STATUS_SIZE,
                                                &lldp_cfg.adminstatus);
        } else {
                ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
index 2a80c5d..ba86ad8 100644 (file)
@@ -32,6 +32,9 @@
 #define I40E_CEE_MAX_FEAT_TYPE         3
 #define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET  0x2B
 #define I40E_LLDP_CURRENT_STATUS_X722_OFFSET   0x31
+#define I40E_LLDP_CURRENT_STATUS_OFFSET                1
+#define I40E_LLDP_CURRENT_STATUS_SIZE          1
+
 /* Defines for LLDP TLV header */
 #define I40E_LLDP_TLV_LEN_SHIFT                0
 #define I40E_LLDP_TLV_LEN_MASK         (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
index bac4da0..bf15a86 100644 (file)
@@ -23,6 +23,8 @@
 #define I40E_DEV_ID_10G_BASE_T_BC      0x15FF
 #define I40E_DEV_ID_10G_B              0x104F
 #define I40E_DEV_ID_10G_SFP            0x104E
+#define I40E_IS_X710TL_DEVICE(d) \
+       ((d) == I40E_DEV_ID_10G_BASE_T_BC)
 #define I40E_DEV_ID_KX_X722            0x37CE
 #define I40E_DEV_ID_QSFP_X722          0x37CF
 #define I40E_DEV_ID_SFP_X722           0x37D0
index 41e1240..d24d873 100644 (file)
@@ -722,7 +722,14 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info,
        ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
        ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
 
-       if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+       if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) &&
+           (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) {
+               ethtool_link_ksettings_add_link_mode(ks, advertising,
+                                                    FEC_NONE);
+               ethtool_link_ksettings_add_link_mode(ks, advertising,
+                                                    FEC_BASER);
+               ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+       } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
                ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
        } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
                ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -730,12 +737,6 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info,
        } else {
                ethtool_link_ksettings_add_link_mode(ks, advertising,
                                                     FEC_NONE);
-               if (I40E_AQ_SET_FEC_AUTO & req_fec_info) {
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            FEC_RS);
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            FEC_BASER);
-               }
        }
 }
 
@@ -1437,6 +1438,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
        struct i40e_hw *hw = &pf->hw;
        i40e_status status = 0;
        int err = 0;
+       u8 fec_cfg;
 
        /* Get the current phy config */
        memset(&abilities, 0, sizeof(abilities));
@@ -1448,18 +1450,16 @@ static int i40e_get_fec_param(struct net_device *netdev,
        }
 
        fecparam->fec = 0;
-       if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO)
+       fec_cfg = abilities.fec_cfg_curr_mod_ext_info;
+       if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
                fecparam->fec |= ETHTOOL_FEC_AUTO;
-       if ((abilities.fec_cfg_curr_mod_ext_info &
-            I40E_AQ_SET_FEC_REQUEST_RS) ||
-           (abilities.fec_cfg_curr_mod_ext_info &
-            I40E_AQ_SET_FEC_ABILITY_RS))
+       else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS |
+                I40E_AQ_SET_FEC_ABILITY_RS))
                fecparam->fec |= ETHTOOL_FEC_RS;
-       if ((abilities.fec_cfg_curr_mod_ext_info &
-            I40E_AQ_SET_FEC_REQUEST_KR) ||
-           (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR))
+       else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR |
+                I40E_AQ_SET_FEC_ABILITY_KR))
                fecparam->fec |= ETHTOOL_FEC_BASER;
-       if (abilities.fec_cfg_curr_mod_ext_info == 0)
+       if (fec_cfg == 0)
                fecparam->fec |= ETHTOOL_FEC_OFF;
 
        if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
@@ -5112,7 +5112,7 @@ static int i40e_get_module_info(struct net_device *netdev,
        case I40E_MODULE_TYPE_SFP:
                status = i40e_aq_get_phy_register(hw,
                                I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
-                               I40E_I2C_EEPROM_DEV_ADDR,
+                               I40E_I2C_EEPROM_DEV_ADDR, true,
                                I40E_MODULE_SFF_8472_COMP,
                                &sff8472_comp, NULL);
                if (status)
@@ -5120,7 +5120,7 @@ static int i40e_get_module_info(struct net_device *netdev,
 
                status = i40e_aq_get_phy_register(hw,
                                I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
-                               I40E_I2C_EEPROM_DEV_ADDR,
+                               I40E_I2C_EEPROM_DEV_ADDR, true,
                                I40E_MODULE_SFF_8472_SWAP,
                                &sff8472_swap, NULL);
                if (status)
@@ -5152,7 +5152,7 @@ static int i40e_get_module_info(struct net_device *netdev,
                /* Read from memory page 0. */
                status = i40e_aq_get_phy_register(hw,
                                I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
-                               0,
+                               0, true,
                                I40E_MODULE_REVISION_ADDR,
                                &sff8636_rev, NULL);
                if (status)
@@ -5223,7 +5223,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
 
                status = i40e_aq_get_phy_register(hw,
                                I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
-                               addr, offset, &value, NULL);
+                               true, addr, offset, &value, NULL);
                if (status)
                        return -EIO;
                data[i] = value;
@@ -5242,6 +5242,7 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
 }
 
 static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
+       .get_drvinfo            = i40e_get_drvinfo,
        .set_eeprom             = i40e_set_eeprom,
        .get_eeprom_len         = i40e_get_eeprom_len,
        .get_eeprom             = i40e_get_eeprom,
index 6031223..b3d7edb 100644 (file)
@@ -1109,6 +1109,25 @@ void i40e_update_stats(struct i40e_vsi *vsi)
        i40e_update_vsi_stats(vsi);
 }
 
+/**
+ * i40e_count_filters - counts VSI mac filters
+ * @vsi: the VSI to be searched
+ *
+ * Returns count of mac filters
+ **/
+int i40e_count_filters(struct i40e_vsi *vsi)
+{
+       struct i40e_mac_filter *f;
+       struct hlist_node *h;
+       int bkt;
+       int cnt = 0;
+
+       hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+               ++cnt;
+
+       return cnt;
+}
+
 /**
  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
  * @vsi: the VSI to be searched
@@ -3534,14 +3553,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
                q_vector->rx.target_itr =
                        ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
                wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
-                    q_vector->rx.target_itr);
+                    q_vector->rx.target_itr >> 1);
                q_vector->rx.current_itr = q_vector->rx.target_itr;
 
                q_vector->tx.next_update = jiffies + 1;
                q_vector->tx.target_itr =
                        ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
                wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
-                    q_vector->tx.target_itr);
+                    q_vector->tx.target_itr >> 1);
                q_vector->tx.current_itr = q_vector->tx.target_itr;
 
                wr32(hw, I40E_PFINT_RATEN(vector - 1),
@@ -3646,11 +3665,11 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
        /* set the ITR configuration */
        q_vector->rx.next_update = jiffies + 1;
        q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
-       wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
+       wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
        q_vector->rx.current_itr = q_vector->rx.target_itr;
        q_vector->tx.next_update = jiffies + 1;
        q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
-       wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
+       wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
        q_vector->tx.current_itr = q_vector->tx.target_itr;
 
        i40e_enable_misc_int_causes(pf);
@@ -7168,6 +7187,7 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
                ch->num_queue_pairs = qcnt;
                if (!i40e_setup_channel(pf, vsi, ch)) {
                        ret = -EINVAL;
+                       kfree(ch);
                        goto err_free;
                }
                ch->parent_vsi = vsi;
@@ -11396,7 +11416,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
 
        /* associate no queues to the misc vector */
        wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
-       wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+       wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
 
        i40e_flush(hw);
 
@@ -12911,6 +12931,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                          NETIF_F_GSO_IPXIP6            |
                          NETIF_F_GSO_UDP_TUNNEL        |
                          NETIF_F_GSO_UDP_TUNNEL_CSUM   |
+                         NETIF_F_GSO_UDP_L4            |
                          NETIF_F_SCTP_CRC              |
                          NETIF_F_RXHASH                |
                          NETIF_F_RXCSUM                |
index e4d8d20..7164f4a 100644 (file)
@@ -323,20 +323,24 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
 
 /**
  * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
- * @hw: pointer to the HW structure
+ * @hw: Pointer to the HW structure
  * @module_ptr: Pointer to module in words with respect to NVM beginning
- * @offset: offset in words from module start
+ * @module_offset: Offset in words from module start
+ * @data_offset: Offset in words from reading data area start
  * @words_data_size: Words to read from NVM
  * @data_ptr: Pointer to memory location where resulting buffer will be stored
  **/
-i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
-                                     u8 module_ptr, u16 offset,
-                                     u16 words_data_size,
-                                     u16 *data_ptr)
+enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+                                               u8 module_ptr,
+                                               u16 module_offset,
+                                               u16 data_offset,
+                                               u16 words_data_size,
+                                               u16 *data_ptr)
 {
        i40e_status status;
+       u16 specific_ptr = 0;
        u16 ptr_value = 0;
-       u32 flat_offset;
+       u32 offset = 0;
 
        if (module_ptr != 0) {
                status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
@@ -352,36 +356,35 @@ i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
 
        /* Pointer not initialized */
        if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
-           ptr_value == I40E_NVM_INVALID_VAL)
+           ptr_value == I40E_NVM_INVALID_VAL) {
+               i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
                return I40E_ERR_BAD_PTR;
+       }
 
        /* Check whether the module is in SR mapped area or outside */
        if (ptr_value & I40E_PTR_TYPE) {
                /* Pointer points outside of the Shared RAM mapped area */
-               ptr_value &= ~I40E_PTR_TYPE;
+               i40e_debug(hw, I40E_DEBUG_ALL,
+                          "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
 
-               /* PtrValue in 4kB units, need to convert to words */
-               ptr_value /= 2;
-               flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset;
-               status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
-               if (!status) {
-                       status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset,
-                                                 2 * words_data_size,
-                                                 data_ptr, true, NULL);
-                       i40e_release_nvm(hw);
-                       if (status) {
-                               i40e_debug(hw, I40E_DEBUG_ALL,
-                                          "Reading nvm aq failed.Error code: %d.\n",
-                                          status);
-                               return I40E_ERR_NVM;
-                       }
-               } else {
-                       return I40E_ERR_NVM;
-               }
+               return I40E_ERR_PARAM;
        } else {
                /* Read from the Shadow RAM */
-               status = i40e_read_nvm_buffer(hw, ptr_value + offset,
-                                             &words_data_size, data_ptr);
+
+               status = i40e_read_nvm_word(hw, ptr_value + module_offset,
+                                           &specific_ptr);
+               if (status) {
+                       i40e_debug(hw, I40E_DEBUG_ALL,
+                                  "Reading nvm word failed.Error code: %d.\n",
+                                  status);
+                       return I40E_ERR_NVM;
+               }
+
+               offset = ptr_value + module_offset + specific_ptr +
+                       data_offset;
+
+               status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
+                                             data_ptr);
                if (status) {
                        i40e_debug(hw, I40E_DEBUG_ALL,
                                   "Reading nvm buffer failed.Error code: %d.\n",
index 5250441..bbb478f 100644 (file)
@@ -315,10 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
 void i40e_release_nvm(struct i40e_hw *hw);
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
                                         u16 *data);
-i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
-                                     u8 module_ptr, u16 offset,
-                                     u16 words_data_size,
-                                     u16 *data_ptr);
+enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+                                               u8 module_ptr,
+                                               u16 module_offset,
+                                               u16 data_offset,
+                                               u16 words_data_size,
+                                               u16 *data_ptr);
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
                                 u16 *words, u16 *data);
 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
@@ -409,14 +411,24 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
                                u32 reg_addr, u32 reg_val,
                                struct i40e_asq_cmd_details *cmd_details);
 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
-                                    u8 phy_select, u8 dev_addr,
-                                    u32 reg_addr, u32 reg_val,
-                                    struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
-                                    u8 phy_select, u8 dev_addr,
-                                    u32 reg_addr, u32 *reg_val,
-                                    struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+                            u8 phy_select, u8 dev_addr, bool page_change,
+                            bool set_mdio, u8 mdio_num,
+                            u32 reg_addr, u32 reg_val,
+                            struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+                            u8 phy_select, u8 dev_addr, bool page_change,
+                            bool set_mdio, u8 mdio_num,
+                            u32 reg_addr, u32 *reg_val,
+                            struct i40e_asq_cmd_details *cmd_details);
+
+/* Convenience wrappers for most common use case */
+#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd)           \
+       i40e_aq_set_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
+#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd)           \
+       i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
 
 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
                                            u16 reg, u8 phy_addr, u16 *value);
index e3f29dc..b849603 100644 (file)
@@ -2960,10 +2960,16 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
 
        /* remove payload length from inner checksum */
        paylen = skb->len - l4_offset;
-       csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
 
-       /* compute length of segmentation header */
-       *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+       if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+               csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
+               /* compute length of segmentation header */
+               *hdr_len = sizeof(*l4.udp) + l4_offset;
+       } else {
+               csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+               /* compute length of segmentation header */
+               *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+       }
 
        /* pull values out of skb_shinfo */
        gso_size = skb_shinfo(skb)->gso_size;
index b43ec94..6ea2867 100644 (file)
@@ -624,6 +624,7 @@ struct i40e_hw {
 #define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
 #define I40E_HW_FLAG_FW_LLDP_STOPPABLE      BIT_ULL(4)
 #define I40E_HW_FLAG_FW_LLDP_PERSISTENT     BIT_ULL(5)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
 #define I40E_HW_FLAG_DROP_MODE              BIT_ULL(7)
        u64 flags;
 
index 3d24408..a271066 100644 (file)
@@ -955,7 +955,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
                i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
                vf->lan_vsi_idx = 0;
                vf->lan_vsi_id = 0;
-               vf->num_mac = 0;
        }
 
        /* do the accounting and remove additional ADq VSI's */
@@ -2548,20 +2547,12 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
                                           struct virtchnl_ether_addr_list *al)
 {
        struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+       int mac2add_cnt = 0;
        int i;
 
-       /* If this VF is not privileged, then we can't add more than a limited
-        * number of addresses. Check to make sure that the additions do not
-        * push us over the limit.
-        */
-       if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
-           (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
-               dev_err(&pf->pdev->dev,
-                       "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
-               return -EPERM;
-       }
-
        for (i = 0; i < al->num_elements; i++) {
+               struct i40e_mac_filter *f;
                u8 *addr = al->list[i].addr;
 
                if (is_broadcast_ether_addr(addr) ||
@@ -2585,8 +2576,24 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
                                "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
                        return -EPERM;
                }
+
+               /*count filters that really will be added*/
+               f = i40e_find_mac(vsi, addr);
+               if (!f)
+                       ++mac2add_cnt;
        }
 
+       /* If this VF is not privileged, then we can't add more than a limited
+        * number of addresses. Check to make sure that the additions do not
+        * push us over the limit.
+        */
+       if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+           (i40e_count_filters(vsi) + mac2add_cnt) >
+                   I40E_VC_MAX_MAC_ADDR_PER_VF) {
+               dev_err(&pf->pdev->dev,
+                       "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+               return -EPERM;
+       }
        return 0;
 }
 
@@ -2640,8 +2647,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
                                ret = I40E_ERR_PARAM;
                                spin_unlock_bh(&vsi->mac_filter_hash_lock);
                                goto error_param;
-                       } else {
-                               vf->num_mac++;
                        }
                }
        }
@@ -2689,16 +2694,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
                        ret = I40E_ERR_INVALID_MAC_ADDR;
                        goto error_param;
                }
-
-               if (vf->pf_set_mac &&
-                   ether_addr_equal(al->list[i].addr,
-                                    vf->default_lan_addr.addr)) {
-                       dev_err(&pf->pdev->dev,
-                               "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
-                               vf->default_lan_addr.addr, vf->vf_id);
-                       ret = I40E_ERR_PARAM;
-                       goto error_param;
-               }
        }
        vsi = pf->vsi[vf->lan_vsi_idx];
 
@@ -2709,8 +2704,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
                        ret = I40E_ERR_INVALID_MAC_ADDR;
                        spin_unlock_bh(&vsi->mac_filter_hash_lock);
                        goto error_param;
-               } else {
-                       vf->num_mac--;
                }
 
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
index 7164b9b..1ce0624 100644 (file)
@@ -101,7 +101,6 @@ struct i40e_vf {
        bool link_up;           /* only valid if VF link is forced */
        bool queues_enabled;    /* true if the VF queues are enabled */
        bool spoofchk;
-       u16 num_mac;
        u16 num_vlan;
 
        /* ADq related variables */
index b1c3227..a05dfec 100644 (file)
@@ -157,11 +157,6 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
                err = i40e_queue_pair_enable(vsi, qid);
                if (err)
                        return err;
-
-               /* Kick start the NAPI context so that receiving will start */
-               err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
-               if (err)
-                       return err;
        }
 
        return 0;
index 3ec2ce0..8a6ef35 100644 (file)
@@ -466,7 +466,7 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
                        ? igb_setup_copper_link_82575
                        : igb_setup_serdes_link_82575;
 
-       if (mac->type == e1000_82580) {
+       if (mac->type == e1000_82580 || mac->type == e1000_i350) {
                switch (hw->device_id) {
                /* feature not supported on these id's */
                case E1000_DEV_ID_DH89XXCC_SGMII:
index 6ad775b..63ec253 100644 (file)
@@ -127,6 +127,7 @@ struct e1000_adv_tx_context_desc {
 };
 
 #define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000  /* L4 Packet TYPE of UDP */
 #define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
 #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
 #define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
index 105b062..48a40e4 100644 (file)
@@ -753,7 +753,8 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg)
                struct net_device *netdev = igb->netdev;
                hw->hw_addr = NULL;
                netdev_err(netdev, "PCIe link lost\n");
-               WARN(1, "igb: Failed to read reg 0x%x!\n", reg);
+               WARN(pci_device_is_present(igb->pdev),
+                    "igb: Failed to read reg 0x%x!\n", reg);
        }
 
        return value;
@@ -2064,7 +2065,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
        if ((hw->phy.media_type == e1000_media_type_copper) &&
            (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
                swap_now = true;
-       } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+       } else if ((hw->phy.media_type != e1000_media_type_copper) &&
+                  !(connsw & E1000_CONNSW_SERDESD)) {
                /* copper signal takes time to appear */
                if (adapter->copper_tries < 4) {
                        adapter->copper_tries++;
@@ -2370,7 +2372,7 @@ void igb_reset(struct igb_adapter *adapter)
                adapter->ei.get_invariants(hw);
                adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
        }
-       if ((mac->type == e1000_82575) &&
+       if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
            (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
                igb_enable_mas(adapter);
        }
@@ -2516,6 +2518,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
        if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
                return features & ~(NETIF_F_HW_CSUM |
                                    NETIF_F_SCTP_CRC |
+                                   NETIF_F_GSO_UDP_L4 |
                                    NETIF_F_HW_VLAN_CTAG_TX |
                                    NETIF_F_TSO |
                                    NETIF_F_TSO6);
@@ -2524,6 +2527,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
        if (unlikely(network_hdr_len >  IGB_MAX_NETWORK_HDR_LEN))
                return features & ~(NETIF_F_HW_CSUM |
                                    NETIF_F_SCTP_CRC |
+                                   NETIF_F_GSO_UDP_L4 |
                                    NETIF_F_TSO |
                                    NETIF_F_TSO6);
 
@@ -3120,7 +3124,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                            NETIF_F_HW_CSUM;
 
        if (hw->mac.type >= e1000_82576)
-               netdev->features |= NETIF_F_SCTP_CRC;
+               netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
 
        if (hw->mac.type >= e1000_i350)
                netdev->features |= NETIF_F_HW_TC;
@@ -5694,6 +5698,7 @@ static int igb_tso(struct igb_ring *tx_ring,
        } ip;
        union {
                struct tcphdr *tcp;
+               struct udphdr *udp;
                unsigned char *hdr;
        } l4;
        u32 paylen, l4_offset;
@@ -5713,7 +5718,8 @@ static int igb_tso(struct igb_ring *tx_ring,
        l4.hdr = skb_checksum_start(skb);
 
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-       type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
+       type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+                     E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
 
        /* initialize outer IP header fields */
        if (ip.v4->version == 4) {
@@ -5741,12 +5747,19 @@ static int igb_tso(struct igb_ring *tx_ring,
        /* determine offset of inner transport header */
        l4_offset = l4.hdr - skb->data;
 
-       /* compute length of segmentation header */
-       *hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
        /* remove payload length from inner checksum */
        paylen = skb->len - l4_offset;
-       csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+       if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
+               /* compute length of segmentation header */
+               *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+               csum_replace_by_diff(&l4.tcp->check,
+                       (__force __wsum)htonl(paylen));
+       } else {
+               /* compute length of segmentation header */
+               *hdr_len = sizeof(*l4.udp) + l4_offset;
+               csum_replace_by_diff(&l4.udp->check,
+                                    (__force __wsum)htonl(paylen));
+       }
 
        /* update gso size and bytecount with header size */
        first->gso_segs = skb_shinfo(skb)->gso_segs;
index 6e0af46..6105c6d 100644 (file)
@@ -4270,7 +4270,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
                hw->hw_addr = NULL;
                netif_device_detach(netdev);
                netdev_err(netdev, "PCIe link lost, device now detached\n");
-               WARN(1, "igc: Failed to read reg 0x%x!\n", reg);
+               WARN(pci_device_is_present(igc->pdev),
+                    "igc: Failed to read reg 0x%x!\n", reg);
        }
 
        return value;
index 1ce2397..b22baea 100644 (file)
@@ -4310,7 +4310,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
                if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
                        set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
 
-               clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
                if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
                        continue;
 
@@ -7946,6 +7945,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        } ip;
        union {
                struct tcphdr *tcp;
+               struct udphdr *udp;
                unsigned char *hdr;
        } l4;
        u32 paylen, l4_offset;
@@ -7969,7 +7969,8 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        l4.hdr = skb_checksum_start(skb);
 
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-       type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+       type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+                     IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
        /* initialize outer IP header fields */
        if (ip.v4->version == 4) {
@@ -7999,12 +8000,20 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        /* determine offset of inner transport header */
        l4_offset = l4.hdr - skb->data;
 
-       /* compute length of segmentation header */
-       *hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
        /* remove payload length from inner checksum */
        paylen = skb->len - l4_offset;
-       csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+
+       if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
+               /* compute length of segmentation header */
+               *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+               csum_replace_by_diff(&l4.tcp->check,
+                                    (__force __wsum)htonl(paylen));
+       } else {
+               /* compute length of segmentation header */
+               *hdr_len = sizeof(*l4.udp) + l4_offset;
+               csum_replace_by_diff(&l4.udp->check,
+                                    (__force __wsum)htonl(paylen));
+       }
 
        /* update gso size and bytecount with header size */
        first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -10190,6 +10199,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
        if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
                return features & ~(NETIF_F_HW_CSUM |
                                    NETIF_F_SCTP_CRC |
+                                   NETIF_F_GSO_UDP_L4 |
                                    NETIF_F_HW_VLAN_CTAG_TX |
                                    NETIF_F_TSO |
                                    NETIF_F_TSO6);
@@ -10198,6 +10208,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
        if (unlikely(network_hdr_len >  IXGBE_MAX_NETWORK_HDR_LEN))
                return features & ~(NETIF_F_HW_CSUM |
                                    NETIF_F_SCTP_CRC |
+                                   NETIF_F_GSO_UDP_L4 |
                                    NETIF_F_TSO |
                                    NETIF_F_TSO6);
 
@@ -10907,7 +10918,7 @@ skip_sriov:
                            IXGBE_GSO_PARTIAL_FEATURES;
 
        if (hw->mac.type >= ixgbe_mac_82599EB)
-               netdev->features |= NETIF_F_SCTP_CRC;
+               netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
 
 #ifdef CONFIG_IXGBE_IPSEC
 #define IXGBE_ESP_FEATURES     (NETIF_F_HW_ESP | \
index c8425d3..e47783c 100644 (file)
@@ -160,16 +160,23 @@ static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
                             (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
 }
 #else
-void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
-                           struct mvneta_bm_pool *bm_pool, u8 port_map) {}
-void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
-                        u8 port_map) {}
-int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
-int mvneta_bm_pool_refill(struct mvneta_bm *priv,
-                         struct mvneta_bm_pool *bm_pool) {return 0; }
-struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
-                                         enum mvneta_bm_type type, u8 port_id,
-                                         int pkt_size) { return NULL; }
+static inline void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+                                         struct mvneta_bm_pool *bm_pool,
+                                         u8 port_map) {}
+static inline void mvneta_bm_bufs_free(struct mvneta_bm *priv,
+                                      struct mvneta_bm_pool *bm_pool,
+                                      u8 port_map) {}
+static inline int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
+{ return 0; }
+static inline int mvneta_bm_pool_refill(struct mvneta_bm *priv,
+                                       struct mvneta_bm_pool *bm_pool)
+{ return 0; }
+static inline struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv,
+                                                       u8 pool_id,
+                                                       enum mvneta_bm_type type,
+                                                       u8 port_id,
+                                                       int pkt_size)
+{ return NULL; }
 
 static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
                                         struct mvneta_bm_pool *bm_pool,
@@ -178,7 +185,8 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
 static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
                                        struct mvneta_bm_pool *bm_pool)
 { return 0; }
-struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
-void mvneta_bm_put(struct mvneta_bm *priv) {}
+static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{ return NULL; }
+static inline void mvneta_bm_put(struct mvneta_bm *priv) {}
 #endif /* CONFIG_MVNETA_BM */
 #endif
index 5fea652..17e24c1 100644 (file)
@@ -2956,14 +2956,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
                 * by the hardware, and the information about the buffer is
                 * comprised by the RX descriptor.
                 */
-               if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
-err_drop_frame:
-                       dev->stats.rx_errors++;
-                       mvpp2_rx_error(port, rx_desc);
-                       /* Return the buffer to the pool */
-                       mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
-                       continue;
-               }
+               if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+                       goto err_drop_frame;
+
+               dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
+                                       rx_bytes + MVPP2_MH_SIZE,
+                                       DMA_FROM_DEVICE);
+               prefetch(data);
 
                if (bm_pool->frag_size > PAGE_SIZE)
                        frag_size = 0;
@@ -2982,8 +2981,9 @@ err_drop_frame:
                        goto err_drop_frame;
                }
 
-               dma_unmap_single(dev->dev.parent, dma_addr,
-                                bm_pool->buf_size, DMA_FROM_DEVICE);
+               dma_unmap_single_attrs(dev->dev.parent, dma_addr,
+                                      bm_pool->buf_size, DMA_FROM_DEVICE,
+                                      DMA_ATTR_SKIP_CPU_SYNC);
 
                rcvd_pkts++;
                rcvd_bytes += rx_bytes;
@@ -2994,6 +2994,13 @@ err_drop_frame:
                mvpp2_rx_csum(port, rx_status, skb);
 
                napi_gro_receive(napi, skb);
+               continue;
+
+err_drop_frame:
+               dev->stats.rx_errors++;
+               mvpp2_rx_error(port, rx_desc);
+               /* Return the buffer to the pool */
+               mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
        }
 
        if (rcvd_pkts) {
index ef11cf3..0fe9715 100644 (file)
@@ -57,7 +57,7 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
        default:
                updated = false;
                break;
-       };
+       }
 
        if (updated) {
                val = mtk_r32(eth, MTK_MAC_MISC);
@@ -143,7 +143,7 @@ static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
        default:
                updated = false;
                break;
-       };
+       }
 
        if (updated)
                regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
@@ -174,7 +174,7 @@ static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
                break;
        default:
                updated = false;
-       };
+       }
 
        if (updated)
                regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
index 4db27df..32d8342 100644 (file)
@@ -93,7 +93,7 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
        case SPEED_1000:
                val |= SGMII_SPEED_1000;
                break;
-       };
+       }
 
        if (state->duplex == DUPLEX_FULL)
                val |= SGMII_DUPLEX_FULL;
index 4356f3a..1187ef1 100644 (file)
@@ -471,12 +471,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
                priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 }
 
-static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
+static int
+mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
+                                struct resource_allocator *res_alloc,
+                                int vf)
 {
-       /* reduce the sink counter */
-       return (dev->caps.max_counters - 1 -
-               (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
-               / MLX4_MAX_PORTS;
+       struct mlx4_active_ports actv_ports;
+       int ports, counters_guaranteed;
+
+       /* For master, only allocate according to the number of phys ports */
+       if (vf == mlx4_master_func_num(dev))
+               return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
+
+       /* calculate real number of ports for the VF */
+       actv_ports = mlx4_get_active_ports(dev, vf);
+       ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+       counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
+
+       /* If we do not have enough counters for this VF, do not
+        * allocate any for it. '-1' to reduce the sink counter.
+        */
+       if ((res_alloc->res_reserved + counters_guaranteed) >
+           (dev->caps.max_counters - 1))
+               return 0;
+
+       return counters_guaranteed;
 }
 
 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
@@ -484,7 +503,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        int i, j;
        int t;
-       int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
 
        priv->mfunc.master.res_tracker.slave_list =
                kcalloc(dev->num_slaves, sizeof(struct slave_list),
@@ -603,16 +621,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
                                break;
                        case RES_COUNTER:
                                res_alloc->quota[t] = dev->caps.max_counters;
-                               if (t == mlx4_master_func_num(dev))
-                                       res_alloc->guaranteed[t] =
-                                               MLX4_PF_COUNTERS_PER_PORT *
-                                               MLX4_MAX_PORTS;
-                               else if (t <= max_vfs_guarantee_counter)
-                                       res_alloc->guaranteed[t] =
-                                               MLX4_VF_COUNTERS_PER_PORT *
-                                               MLX4_MAX_PORTS;
-                               else
-                                       res_alloc->guaranteed[t] = 0;
+                               res_alloc->guaranteed[t] =
+                                       mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
                                break;
                        default:
                                break;
index 8d76452..f1a7bc4 100644 (file)
@@ -345,7 +345,7 @@ struct mlx5e_tx_wqe_info {
        u8  num_wqebbs;
        u8  num_dma;
 #ifdef CONFIG_MLX5_EN_TLS
-       skb_frag_t *resync_dump_frag;
+       struct page *resync_dump_frag_page;
 #endif
 };
 
@@ -410,6 +410,7 @@ struct mlx5e_txqsq {
        struct device             *pdev;
        __be32                     mkey_be;
        unsigned long              state;
+       unsigned int               hw_mtu;
        struct hwtstamp_config    *tstamp;
        struct mlx5_clock         *clock;
 
index b3a249b..ac44bbe 100644 (file)
@@ -141,7 +141,7 @@ int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
                                    "Failed to create hv vhca stats agent, err = %ld\n",
                                    PTR_ERR(agent));
 
-               kfree(priv->stats_agent.buf);
+               kvfree(priv->stats_agent.buf);
                return IS_ERR_OR_NULL(agent);
        }
 
@@ -157,5 +157,5 @@ void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
                return;
 
        mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent);
-       kfree(priv->stats_agent.buf);
+       kvfree(priv->stats_agent.buf);
 }
index 633b117..7b672ad 100644 (file)
@@ -175,7 +175,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     @port_buffer: <output> port receive buffer configuration
  *     @change: <output>
  *
- *     Update buffer configuration based on pfc configuraiton and
+ *     Update buffer configuration based on pfc configuration and
  *     priority to buffer mapping.
  *     Buffer's lossy bit is changed to:
  *             lossless if there is at least one PFC enabled priority
index f8ee18b..13af725 100644 (file)
@@ -97,15 +97,19 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
        if (ret)
                return ret;
 
-       if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
+       if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
+               ip_rt_put(rt);
                return -ENETUNREACH;
+       }
 #else
        return -EOPNOTSUPP;
 #endif
 
        ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
-       if (ret < 0)
+       if (ret < 0) {
+               ip_rt_put(rt);
                return ret;
+       }
 
        if (!(*out_ttl))
                *out_ttl = ip4_dst_hoplimit(&rt->dst);
@@ -149,8 +153,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
                *out_ttl = ip6_dst_hoplimit(dst);
 
        ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
-       if (ret < 0)
+       if (ret < 0) {
+               dst_release(dst);
                return ret;
+       }
 #else
        return -EOPNOTSUPP;
 #endif
index 87be967..7c8796d 100644 (file)
 #else
 /* TLS offload requires additional stop_room for:
  *  - a resync SKB.
- * kTLS offload requires additional stop_room for:
- * - static params WQE,
- * - progress params WQE, and
- * - resync DUMP per frag.
+ * kTLS offload requires fixed additional stop_room for:
+ * - a static params WQE, and a progress params WQE.
+ * The additional MTU-depending room for the resync DUMP WQEs
+ * will be calculated and added in runtime.
  */
 #define MLX5E_SQ_TLS_ROOM  \
        (MLX5_SEND_WQE_MAX_WQEBBS + \
-        MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \
-        MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS)
+        MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
 #endif
 
 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
@@ -92,7 +91,7 @@ mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
 
        /* fill sq frag edge with nops to avoid wqe wrapping two pages */
        for (; wi < edge_wi; wi++) {
-               wi->skb        = NULL;
+               memset(wi, 0, sizeof(*wi));
                wi->num_wqebbs = 1;
                mlx5e_post_nop(wq, sq->sqn, &sq->pc);
        }
index d2ff74d..46725cd 100644 (file)
@@ -38,7 +38,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
                return -ENOMEM;
 
        tx_priv->expected_seq = start_offload_tcp_sn;
-       tx_priv->crypto_info  = crypto_info;
+       tx_priv->crypto_info  = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
        mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv);
 
        /* tc and underlay_qpn values are not in use for tls tis */
index b7298f9..a3efa29 100644 (file)
         MLX5_ST_SZ_BYTES(tls_progress_params))
 #define MLX5E_KTLS_PROGRESS_WQEBBS \
        (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
-#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
+
+struct mlx5e_dump_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_data_seg data;
+};
+
+#define MLX5E_KTLS_DUMP_WQEBBS \
+       (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
 
 enum {
        MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD     = 0,
@@ -37,7 +44,7 @@ enum {
 
 struct mlx5e_ktls_offload_context_tx {
        struct tls_offload_context_tx *tx_ctx;
-       struct tls_crypto_info *crypto_info;
+       struct tls12_crypto_info_aes_gcm_128 crypto_info;
        u32 expected_seq;
        u32 tisn;
        u32 key_id;
@@ -86,14 +93,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
                                         struct mlx5e_tx_wqe **wqe, u16 *pi);
 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
                                           struct mlx5e_tx_wqe_info *wi,
-                                          struct mlx5e_sq_dma *dma);
-
+                                          u32 *dma_fifo_cc);
+static inline u8
+mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
+                           unsigned int sync_len)
+{
+       /* Given the MTU and sync_len, calculates an upper bound for the
+        * number of WQEBBs needed for the TX resync DUMP WQEs of a record.
+        */
+       return MLX5E_KTLS_DUMP_WQEBBS *
+               (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
+}
 #else
 
 static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
 {
 }
 
+static inline void
+mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+                                     struct mlx5e_tx_wqe_info *wi,
+                                     u32 *dma_fifo_cc) {}
+
 #endif
 
 #endif /* __MLX5E_TLS_H__ */
index d195366..778dab1 100644 (file)
@@ -24,17 +24,12 @@ enum {
 static void
 fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
 {
-       struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
-       struct tls12_crypto_info_aes_gcm_128 *info;
+       struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
        char *initial_rn, *gcm_iv;
        u16 salt_sz, rec_seq_sz;
        char *salt, *rec_seq;
        u8 tls_version;
 
-       if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
-               return;
-
-       info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
        EXTRACT_INFO_FIELDS;
 
        gcm_iv      = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
@@ -108,16 +103,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
 }
 
 static void tx_fill_wi(struct mlx5e_txqsq *sq,
-                      u16 pi, u8 num_wqebbs,
-                      skb_frag_t *resync_dump_frag,
-                      u32 num_bytes)
+                      u16 pi, u8 num_wqebbs, u32 num_bytes,
+                      struct page *page)
 {
        struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
 
-       wi->skb              = NULL;
-       wi->num_wqebbs       = num_wqebbs;
-       wi->resync_dump_frag = resync_dump_frag;
-       wi->num_bytes        = num_bytes;
+       memset(wi, 0, sizeof(*wi));
+       wi->num_wqebbs = num_wqebbs;
+       wi->num_bytes  = num_bytes;
+       wi->resync_dump_frag_page = page;
 }
 
 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -145,7 +139,7 @@ post_static_params(struct mlx5e_txqsq *sq,
 
        umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
        build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
-       tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
        sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
 }
 
@@ -159,7 +153,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
 
        wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
        build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
-       tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
        sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
 }
 
@@ -169,6 +163,14 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
                              bool skip_static_post, bool fence_first_post)
 {
        bool progress_fence = skip_static_post || !fence_first_post;
+       struct mlx5_wq_cyc *wq = &sq->wq;
+       u16 contig_wqebbs_room, pi;
+
+       pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+       contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+       if (unlikely(contig_wqebbs_room <
+                    MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
+               mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 
        if (!skip_static_post)
                post_static_params(sq, priv_tx, fence_first_post);
@@ -180,29 +182,36 @@ struct tx_sync_info {
        u64 rcd_sn;
        s32 sync_len;
        int nr_frags;
-       skb_frag_t *frags[MAX_SKB_FRAGS];
+       skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+enum mlx5e_ktls_sync_retval {
+       MLX5E_KTLS_SYNC_DONE,
+       MLX5E_KTLS_SYNC_FAIL,
+       MLX5E_KTLS_SYNC_SKIP_NO_DATA,
 };
 
-static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
-                            u32 tcp_seq, struct tx_sync_info *info)
+static enum mlx5e_ktls_sync_retval
+tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
+                u32 tcp_seq, struct tx_sync_info *info)
 {
        struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
+       enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
        struct tls_record_info *record;
        int remaining, i = 0;
        unsigned long flags;
-       bool ret = true;
 
        spin_lock_irqsave(&tx_ctx->lock, flags);
        record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
 
        if (unlikely(!record)) {
-               ret = false;
+               ret = MLX5E_KTLS_SYNC_FAIL;
                goto out;
        }
 
        if (unlikely(tcp_seq < tls_record_start_seq(record))) {
-               if (!tls_record_is_start_marker(record))
-                       ret = false;
+               ret = tls_record_is_start_marker(record) ?
+                       MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
                goto out;
        }
 
@@ -211,13 +220,13 @@ static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
        while (remaining > 0) {
                skb_frag_t *frag = &record->frags[i];
 
-               __skb_frag_ref(frag);
+               get_page(skb_frag_page(frag));
                remaining -= skb_frag_size(frag);
-               info->frags[i++] = frag;
+               info->frags[i++] = *frag;
        }
        /* reduce the part which will be sent with the original SKB */
        if (remaining < 0)
-               skb_frag_size_add(info->frags[i - 1], remaining);
+               skb_frag_size_add(&info->frags[i - 1], remaining);
        info->nr_frags = i;
 out:
        spin_unlock_irqrestore(&tx_ctx->lock, flags);
@@ -229,17 +238,12 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
                      struct mlx5e_ktls_offload_context_tx *priv_tx,
                      u64 rcd_sn)
 {
-       struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
-       struct tls12_crypto_info_aes_gcm_128 *info;
+       struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
        __be64 rn_be = cpu_to_be64(rcd_sn);
        bool skip_static_post;
        u16 rec_seq_sz;
        char *rec_seq;
 
-       if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
-               return;
-
-       info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
        rec_seq = info->rec_seq;
        rec_seq_sz = sizeof(info->rec_seq);
 
@@ -250,11 +254,6 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
        mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
 }
 
-struct mlx5e_dump_wqe {
-       struct mlx5_wqe_ctrl_seg ctrl;
-       struct mlx5_wqe_data_seg data;
-};
-
 static int
 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
 {
@@ -262,7 +261,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
        struct mlx5_wqe_data_seg *dseg;
        struct mlx5e_dump_wqe *wqe;
        dma_addr_t dma_addr = 0;
-       u8  num_wqebbs;
        u16 ds_cnt;
        int fsz;
        u16 pi;
@@ -270,7 +268,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
        wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
 
        ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
-       num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 
        cseg = &wqe->ctrl;
        dseg = &wqe->data;
@@ -291,24 +288,27 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
        dseg->byte_count = cpu_to_be32(fsz);
        mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
 
-       tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
-       sq->pc += num_wqebbs;
-
-       WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
-            "unexpected DUMP num_wqebbs, %d > %d",
-            num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
+       sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
 
        return 0;
 }
 
 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
                                           struct mlx5e_tx_wqe_info *wi,
-                                          struct mlx5e_sq_dma *dma)
+                                          u32 *dma_fifo_cc)
 {
-       struct mlx5e_sq_stats *stats = sq->stats;
+       struct mlx5e_sq_stats *stats;
+       struct mlx5e_sq_dma *dma;
+
+       if (!wi->resync_dump_frag_page)
+               return;
+
+       dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
+       stats = sq->stats;
 
        mlx5e_tx_dma_unmap(sq->pdev, dma);
-       __skb_frag_unref(wi->resync_dump_frag);
+       put_page(wi->resync_dump_frag_page);
        stats->tls_dump_packets++;
        stats->tls_dump_bytes += wi->num_bytes;
 }
@@ -318,25 +318,31 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
        struct mlx5_wq_cyc *wq = &sq->wq;
        u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 
-       tx_fill_wi(sq, pi, 1, NULL, 0);
+       tx_fill_wi(sq, pi, 1, 0, NULL);
 
        mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
 }
 
-static struct sk_buff *
+static enum mlx5e_ktls_sync_retval
 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
                         struct mlx5e_txqsq *sq,
-                        struct sk_buff *skb,
+                        int datalen,
                         u32 seq)
 {
        struct mlx5e_sq_stats *stats = sq->stats;
        struct mlx5_wq_cyc *wq = &sq->wq;
+       enum mlx5e_ktls_sync_retval ret;
        struct tx_sync_info info = {};
        u16 contig_wqebbs_room, pi;
        u8 num_wqebbs;
-       int i;
-
-       if (!tx_sync_info_get(priv_tx, seq, &info)) {
+       int i = 0;
+
+       ret = tx_sync_info_get(priv_tx, seq, &info);
+       if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
+               if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
+                       stats->tls_skip_no_sync_data++;
+                       return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
+               }
                /* We might get here if a retransmission reaches the driver
                 * after the relevant record is acked.
                 * It should be safe to drop the packet in this case
@@ -346,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
        }
 
        if (unlikely(info.sync_len < 0)) {
-               u32 payload;
-               int headln;
-
-               headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
-               payload = skb->len - headln;
-               if (likely(payload <= -info.sync_len))
-                       return skb;
+               if (likely(datalen <= -info.sync_len))
+                       return MLX5E_KTLS_SYNC_DONE;
 
                stats->tls_drop_bypass_req++;
                goto err_out;
@@ -360,30 +361,62 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
 
        stats->tls_ooo++;
 
-       num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
-               (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
+       tx_post_resync_params(sq, priv_tx, info.rcd_sn);
+
+       /* If no dump WQE was sent, we need to have a fence NOP WQE before the
+        * actual data xmit.
+        */
+       if (!info.nr_frags) {
+               tx_post_fence_nop(sq);
+               return MLX5E_KTLS_SYNC_DONE;
+       }
+
+       num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
        pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
        contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+
        if (unlikely(contig_wqebbs_room < num_wqebbs))
                mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 
        tx_post_resync_params(sq, priv_tx, info.rcd_sn);
 
-       for (i = 0; i < info.nr_frags; i++)
-               if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i))
-                       goto err_out;
+       for (; i < info.nr_frags; i++) {
+               unsigned int orig_fsz, frag_offset = 0, n = 0;
+               skb_frag_t *f = &info.frags[i];
 
-       /* If no dump WQE was sent, we need to have a fence NOP WQE before the
-        * actual data xmit.
-        */
-       if (!info.nr_frags)
-               tx_post_fence_nop(sq);
+               orig_fsz = skb_frag_size(f);
 
-       return skb;
+               do {
+                       bool fence = !(i || frag_offset);
+                       unsigned int fsz;
+
+                       n++;
+                       fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
+                       skb_frag_size_set(f, fsz);
+                       if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+                               page_ref_add(skb_frag_page(f), n - 1);
+                               goto err_out;
+                       }
+
+                       skb_frag_off_add(f, fsz);
+                       frag_offset += fsz;
+               } while (frag_offset < orig_fsz);
+
+               page_ref_add(skb_frag_page(f), n - 1);
+       }
+
+       return MLX5E_KTLS_SYNC_DONE;
 
 err_out:
-       dev_kfree_skb_any(skb);
-       return NULL;
+       for (; i < info.nr_frags; i++)
+               /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
+                * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
+                * released only upon their completions (or in mlx5e_free_txqsq_descs,
+                * if channel closes).
+                */
+               put_page(skb_frag_page(&info.frags[i]));
+
+       return MLX5E_KTLS_SYNC_FAIL;
 }
 
 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
@@ -419,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
 
        seq = ntohl(tcp_hdr(skb)->seq);
        if (unlikely(priv_tx->expected_seq != seq)) {
-               skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
-               if (unlikely(!skb))
+               enum mlx5e_ktls_sync_retval ret =
+                       mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
+
+               if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+                       *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
+               else if (ret == MLX5E_KTLS_SYNC_FAIL)
+                       goto err_out;
+               else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
                        goto out;
-               *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
        }
 
        priv_tx->expected_seq = seq + datalen;
index c5a9c20..327c93a 100644 (file)
@@ -1021,7 +1021,7 @@ static bool ext_link_mode_requested(const unsigned long *adver)
 {
 #define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
        int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
-       __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = {0,};
 
        bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
        return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
index 7569287..772bfdb 100644 (file)
@@ -1128,6 +1128,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        sq->txq_ix    = txq_ix;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
+       sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        sq->stats     = &c->priv->channel_stats[c->ix].sq[tc];
        sq->stop_room = MLX5E_SQ_STOP_ROOM;
        INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
@@ -1135,10 +1136,14 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
                set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
        if (MLX5_IPSEC_DEV(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
+#ifdef CONFIG_MLX5_EN_TLS
        if (mlx5_accel_is_tls_device(c->priv->mdev)) {
                set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
-               sq->stop_room += MLX5E_SQ_TLS_ROOM;
+               sq->stop_room += MLX5E_SQ_TLS_ROOM +
+                       mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
+                                                   TLS_MAX_PAYLOAD_SIZE);
        }
+#endif
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1349,9 +1354,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
        /* last doorbell out, godspeed .. */
        if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
                u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+               struct mlx5e_tx_wqe_info *wi;
                struct mlx5e_tx_wqe *nop;
 
-               sq->db.wqe_info[pi].skb = NULL;
+               wi = &sq->db.wqe_info[pi];
+
+               memset(wi, 0, sizeof(*wi));
+               wi->num_wqebbs = 1;
                nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
                mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
        }
index 95892a3..cd9bb7c 100644 (file)
@@ -611,8 +611,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
 
        mutex_lock(&esw->offloads.encap_tbl_lock);
        encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
-       if (e->compl_result || (encap_connected == neigh_connected &&
-                               ether_addr_equal(e->h_dest, ha)))
+       if (e->compl_result < 0 || (encap_connected == neigh_connected &&
+                                   ether_addr_equal(e->h_dest, ha)))
                goto unlock;
 
        mlx5e_take_all_encap_flows(e, &flow_list);
index d6a5472..82cffb3 100644 (file)
@@ -1386,8 +1386,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
        if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
                return 0;
 
-       if (rq->cqd.left)
+       if (rq->cqd.left) {
                work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
+               if (rq->cqd.left || work_done >= budget)
+                       goto out;
+       }
 
        cqe = mlx5_cqwq_get_cqe(cqwq);
        if (!cqe) {
index 840ec94..bbff8d8 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/udp.h>
 #include <net/udp.h>
 #include "en.h"
+#include "en/port.h"
 
 enum {
        MLX5E_ST_LINK_STATE,
@@ -80,22 +81,12 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv)
 
 static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
 {
-       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-       u32 eth_proto_oper;
-       int i;
+       u32 speed;
 
        if (!netif_carrier_ok(priv->netdev))
                return 1;
 
-       if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1))
-               return 1;
-
-       eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
-       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) {
-               if (eth_proto_oper & MLX5E_PROT_MASK(i))
-                       return 0;
-       }
-       return 1;
+       return mlx5e_port_linkspeed(priv->mdev, &speed);
 }
 
 struct mlx5ehdr {
index ac6fdcd..7e6ebd0 100644 (file)
@@ -52,11 +52,12 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
 #endif
 
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
@@ -288,11 +289,12 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                        s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
                        s->tx_tls_ctx               += sq_stats->tls_ctx;
                        s->tx_tls_ooo               += sq_stats->tls_ooo;
+                       s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
+                       s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
                        s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
+                       s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
                        s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
                        s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
-                       s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
-                       s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
 #endif
                        s->tx_cqes              += sq_stats->cqes;
                }
@@ -1472,10 +1474,12 @@ static const struct counter_desc sq_stats_desc[] = {
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
-       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
-       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
 #endif
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
index 79f261b..869f350 100644 (file)
@@ -129,11 +129,12 @@ struct mlx5e_sw_stats {
        u64 tx_tls_encrypted_bytes;
        u64 tx_tls_ctx;
        u64 tx_tls_ooo;
+       u64 tx_tls_dump_packets;
+       u64 tx_tls_dump_bytes;
        u64 tx_tls_resync_bytes;
+       u64 tx_tls_skip_no_sync_data;
        u64 tx_tls_drop_no_sync_data;
        u64 tx_tls_drop_bypass_req;
-       u64 tx_tls_dump_packets;
-       u64 tx_tls_dump_bytes;
 #endif
 
        u64 rx_xsk_packets;
@@ -273,11 +274,12 @@ struct mlx5e_sq_stats {
        u64 tls_encrypted_bytes;
        u64 tls_ctx;
        u64 tls_ooo;
+       u64 tls_dump_packets;
+       u64 tls_dump_bytes;
        u64 tls_resync_bytes;
+       u64 tls_skip_no_sync_data;
        u64 tls_drop_no_sync_data;
        u64 tls_drop_bypass_req;
-       u64 tls_dump_packets;
-       u64 tls_dump_bytes;
 #endif
        /* less likely accessed in data path */
        u64 csum_none;
index 3e78a72..fda0b37 100644 (file)
@@ -1278,8 +1278,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
        mlx5_eswitch_del_vlan_action(esw, attr);
 
        for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
-               if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
+               if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
                        mlx5e_detach_encap(priv, flow, out_index);
+                       kfree(attr->parse_attr->tun_info[out_index]);
+               }
        kvfree(attr->parse_attr);
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -1559,6 +1561,7 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr
                        mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
        }
 
+       kfree(e->tun_info);
        kfree(e->encap_header);
        kfree_rcu(e, rcu);
 }
@@ -2972,6 +2975,13 @@ mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
        return NULL;
 }
 
+static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
+{
+       size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
+
+       return kmemdup(tun_info, tun_size, GFP_KERNEL);
+}
+
 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow,
                              struct net_device *mirred_dev,
@@ -3028,13 +3038,15 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        refcount_set(&e->refcnt, 1);
        init_completion(&e->res_ready);
 
+       tun_info = dup_tun_info(tun_info);
+       if (!tun_info) {
+               err = -ENOMEM;
+               goto out_err_init;
+       }
        e->tun_info = tun_info;
        err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
-       if (err) {
-               kfree(e);
-               e = NULL;
-               goto out_err;
-       }
+       if (err)
+               goto out_err_init;
 
        INIT_LIST_HEAD(&e->flows);
        hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
@@ -3075,6 +3087,12 @@ out_err:
        if (e)
                mlx5e_encap_put(priv, e);
        return err;
+
+out_err_init:
+       mutex_unlock(&esw->offloads.encap_tbl_lock);
+       kfree(tun_info);
+       kfree(e);
+       return err;
 }
 
 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
@@ -3160,7 +3178,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
                               struct mlx5_esw_flow_attr *attr,
                               u32 *action)
 {
-       int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
+       int nest_level = attr->parse_attr->filter_dev->lower_level;
        struct flow_action_entry vlan_act = {
                .id = FLOW_ACTION_VLAN_POP,
        };
@@ -3295,7 +3313,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                        } else if (encap) {
                                parse_attr->mirred_ifindex[attr->out_count] =
                                        out_dev->ifindex;
-                               parse_attr->tun_info[attr->out_count] = info;
+                               parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
+                               if (!parse_attr->tun_info[attr->out_count])
+                                       return -ENOMEM;
                                encap = false;
                                attr->dests[attr->out_count].flags |=
                                        MLX5_ESW_DEST_ENCAP;
index d3a67a9..67dc4f0 100644 (file)
@@ -403,7 +403,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
                                 struct mlx5_err_cqe *err_cqe)
 {
-       u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
+       struct mlx5_cqwq *wq = &sq->cq.wq;
+       u32 ci;
+
+       ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
 
        netdev_err(sq->channel->netdev,
                   "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
@@ -479,14 +482,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
                        skb = wi->skb;
 
                        if (unlikely(!skb)) {
-#ifdef CONFIG_MLX5_EN_TLS
-                               if (wi->resync_dump_frag) {
-                                       struct mlx5e_sq_dma *dma =
-                                               mlx5e_dma_get(sq, dma_fifo_cc++);
-
-                                       mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma);
-                               }
-#endif
+                               mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
                                sqcc += wi->num_wqebbs;
                                continue;
                        }
@@ -542,29 +538,38 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
 {
        struct mlx5e_tx_wqe_info *wi;
        struct sk_buff *skb;
+       u32 dma_fifo_cc;
+       u16 sqcc;
        u16 ci;
        int i;
 
-       while (sq->cc != sq->pc) {
-               ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+       sqcc = sq->cc;
+       dma_fifo_cc = sq->dma_fifo_cc;
+
+       while (sqcc != sq->pc) {
+               ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
                wi = &sq->db.wqe_info[ci];
                skb = wi->skb;
 
-               if (!skb) { /* nop */
-                       sq->cc++;
+               if (!skb) {
+                       mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
+                       sqcc += wi->num_wqebbs;
                        continue;
                }
 
                for (i = 0; i < wi->num_dma; i++) {
                        struct mlx5e_sq_dma *dma =
-                               mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+                               mlx5e_dma_get(sq, dma_fifo_cc++);
 
                        mlx5e_tx_dma_unmap(sq->pdev, dma);
                }
 
                dev_kfree_skb_any(skb);
-               sq->cc += wi->num_wqebbs;
+               sqcc += wi->num_wqebbs;
        }
+
+       sq->dma_fifo_cc = dma_fifo_cc;
+       sq->cc = sqcc;
 }
 
 #ifdef CONFIG_MLX5_CORE_IPOIB
index 00d71db..369499e 100644 (file)
@@ -285,7 +285,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
 
        mlx5_eswitch_set_rule_source_port(esw, spec, attr);
 
-       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
        if (attr->outer_match_level != MLX5_MATCH_NONE)
                spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
 
index 1d55a32..7879e17 100644 (file)
@@ -177,22 +177,32 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
        memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
 }
 
+static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
+                                               const struct mlx5_flow_spec *spec)
+{
+       u32 port_mask, port_value;
+
+       if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
+               return spec->flow_context.flow_source == MLX5_VPORT_UPLINK;
+
+       port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
+                            misc_parameters.source_port);
+       port_value = MLX5_GET(fte_match_param, spec->match_value,
+                             misc_parameters.source_port);
+       return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
+}
+
 bool
 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
                              struct mlx5_flow_act *flow_act,
                              struct mlx5_flow_spec *spec)
 {
-       u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
-                                misc_parameters.source_port);
-       u32 port_value = MLX5_GET(fte_match_param, spec->match_value,
-                                 misc_parameters.source_port);
-
        if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
                return false;
 
        /* push vlan on RX */
        return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
-               ((port_mask & port_value) == MLX5_VPORT_UPLINK);
+               mlx5_eswitch_offload_is_uplink_port(esw, spec);
 }
 
 struct mlx5_flow_handle *
index 4c50efe..6102113 100644 (file)
@@ -464,8 +464,10 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
        }
 
        err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
-       if (err)
+       if (err) {
+               kvfree(in);
                goto err_cqwq;
+       }
 
        cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
        MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
index 579c306..3c816e8 100644 (file)
@@ -507,7 +507,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
                                MLX5_SET(dest_format_struct, in_dests,
                                         destination_eswitch_owner_vhca_id,
                                         dst->dest_attr.vport.vhca_id);
-                               if (extended_dest) {
+                               if (extended_dest &&
+                                   dst->dest_attr.vport.pkt_reformat) {
                                        MLX5_SET(dest_format_struct, in_dests,
                                                 packet_reformat,
                                                 !!(dst->dest_attr.vport.flags &
index be3c3c7..e718170 100644 (file)
@@ -576,7 +576,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
                return -ENOMEM;
        err = mlx5_crdump_collect(dev, cr_data);
        if (err)
-               return err;
+               goto free_data;
 
        if (priv_ctx) {
                struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
index 2b59f84..e1a90f5 100644 (file)
@@ -1198,7 +1198,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
        if (err)
                goto err_thermal_init;
 
-       if (mlxsw_driver->params_register && !reload)
+       if (mlxsw_driver->params_register)
                devlink_params_publish(devlink);
 
        return 0;
@@ -1273,7 +1273,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
                        return;
        }
 
-       if (mlxsw_core->driver->params_unregister && !reload)
+       if (mlxsw_core->driver->params_unregister)
                devlink_params_unpublish(devlink);
        mlxsw_thermal_fini(mlxsw_core->thermal);
        mlxsw_hwmon_fini(mlxsw_core->hwmon);
@@ -2017,6 +2017,35 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
 }
 EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
 
+int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
+{
+       enum mlxsw_reg_pmtm_module_type module_type;
+       char pmtm_pl[MLXSW_REG_PMTM_LEN];
+       int err;
+
+       mlxsw_reg_pmtm_pack(pmtm_pl, module);
+       err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
+       if (err)
+               return err;
+       mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type);
+
+       /* Here we need to get the module width according to the module type. */
+
+       switch (module_type) {
+       case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: /* fall through */
+       case MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP:
+               return 4;
+       case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
+               return 2;
+       case MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP: /* fall through */
+       case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
+               return 1;
+       default:
+               return -EINVAL;
+       }
+}
+EXPORT_SYMBOL(mlxsw_core_module_max_width);
+
 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
                                    const char *buf, size_t size)
 {
index f250370..0d18bee 100644 (file)
@@ -200,6 +200,7 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
 struct devlink_port *
 mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
                                 u8 local_port);
+int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module);
 
 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
 bool mlxsw_core_schedule_work(struct work_struct *work);
index 2b3aec4..e0d7d2d 100644 (file)
@@ -27,7 +27,7 @@
 
 #define MLXSW_PCI_SW_RESET                     0xF0010
 #define MLXSW_PCI_SW_RESET_RST_BIT             BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       20000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       900000
 #define MLXSW_PCI_SW_RESET_WAIT_MSECS          100
 #define MLXSW_PCI_FW_READY                     0xA1844
 #define MLXSW_PCI_FW_READY_MASK                        0xFFFF
index a33eeef..741fd29 100644 (file)
@@ -24,8 +24,6 @@
 
 #define MLXSW_PORT_DONT_CARE           0xFF
 
-#define MLXSW_PORT_MODULE_MAX_WIDTH    4
-
 enum mlxsw_port_admin_status {
        MLXSW_PORT_ADMIN_STATUS_UP = 1,
        MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
index adb63a2..bec035e 100644 (file)
@@ -3969,6 +3969,7 @@ MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
  * 1 - Lane 0 is used.
  * 2 - Lanes 0 and 1 are used.
  * 4 - Lanes 0, 1, 2 and 3 are used.
+ * 8 - Lanes 0-7 are used.
  * Access: RW
  */
 MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
@@ -3983,14 +3984,14 @@ MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
  * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
  * Access: RW
  */
-MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 4, 0x04, 0x00, false);
 
 /* reg_pmlp_rx_lane
  * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
  * equal to Tx lane.
  * Access: RW
  */
-MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 4, 0x04, 0x00, false);
 
 static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
 {
@@ -5374,6 +5375,55 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
                                 MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
 }
 
+/* PMTM - Port Module Type Mapping Register
+ * ----------------------------------------
+ * The PMTM allows query or configuration of module types.
+ */
+#define MLXSW_REG_PMTM_ID 0x5067
+#define MLXSW_REG_PMTM_LEN 0x10
+
+MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+
+/* reg_pmtm_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmtm_module_type {
+       /* Backplane with 4 lanes */
+       MLXSW_REG_PMTM_MODULE_TYPE_BP_4X,
+       /* QSFP */
+       MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP,
+       /* SFP */
+       MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP,
+       /* Backplane with single lane */
+       MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4,
+       /* Backplane with two lane */
+       MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8,
+       /* Chip2Chip */
+       MLXSW_REG_PMTM_MODULE_TYPE_C2C = 10,
+};
+
+/* reg_pmtm_module_type
+ * Module type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4);
+
+static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module)
+{
+       MLXSW_REG_ZERO(pmtm, payload);
+       mlxsw_reg_pmtm_module_set(payload, module);
+}
+
+static inline void
+mlxsw_reg_pmtm_unpack(char *payload,
+                     enum mlxsw_reg_pmtm_module_type *module_type)
+{
+       *module_type = mlxsw_reg_pmtm_module_type_get(payload);
+}
+
 /* HTGT - Host Trap Group Table
  * ----------------------------
  * Configures the properties for forwarding to CPU.
@@ -8680,7 +8730,7 @@ mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(char *payload, u8 ttl,
  * properties.
  */
 #define MLXSW_REG_MPAR_ID 0x901B
-#define MLXSW_REG_MPAR_LEN 0x08
+#define MLXSW_REG_MPAR_LEN 0x0C
 
 MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN);
 
@@ -10544,6 +10594,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
        MLXSW_REG(pbmc),
        MLXSW_REG(pspa),
        MLXSW_REG(pplr),
+       MLXSW_REG(pmtm),
        MLXSW_REG(htgt),
        MLXSW_REG(hpkt),
        MLXSW_REG(rgcr),
index 85f919f..6534184 100644 (file)
@@ -26,6 +26,7 @@ enum mlxsw_res_id {
        MLXSW_RES_ID_MAX_LAG_MEMBERS,
        MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
        MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
+       MLXSW_RES_ID_LOCAL_PORTS_IN_4X,
        MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
        MLXSW_RES_ID_CELL_SIZE,
        MLXSW_RES_ID_MAX_HEADROOM_SIZE,
@@ -82,6 +83,7 @@ static u16 mlxsw_res_ids[] = {
        [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
        [MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
        [MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
+       [MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612,
        [MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805,       /* Bytes */
        [MLXSW_RES_ID_CELL_SIZE] = 0x2803,      /* Bytes */
        [MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811,      /* Bytes */
index 1275d21..ea4cc2a 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/inetdevice.h>
 #include <linux/netlink.h>
 #include <linux/jhash.h>
+#include <linux/log2.h>
 #include <net/switchdev.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_mirred.h>
@@ -48,7 +49,7 @@
 
 #define MLXSW_SP1_FWREV_MAJOR 13
 #define MLXSW_SP1_FWREV_MINOR 2000
-#define MLXSW_SP1_FWREV_SUBMINOR 1886
+#define MLXSW_SP1_FWREV_SUBMINOR 2308
 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
 
 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -63,6 +64,21 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
        "." __stringify(MLXSW_SP1_FWREV_MINOR) \
        "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
 
+#define MLXSW_SP2_FWREV_MAJOR 29
+#define MLXSW_SP2_FWREV_MINOR 2000
+#define MLXSW_SP2_FWREV_SUBMINOR 2308
+
+static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
+       .major = MLXSW_SP2_FWREV_MAJOR,
+       .minor = MLXSW_SP2_FWREV_MINOR,
+       .subminor = MLXSW_SP2_FWREV_SUBMINOR,
+};
+
+#define MLXSW_SP2_FW_FILENAME \
+       "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
+       "." __stringify(MLXSW_SP2_FWREV_MINOR) \
+       "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
+
 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
@@ -733,35 +749,69 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
 }
 
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
-                                        u8 local_port, u8 *p_module,
-                                        u8 *p_width, u8 *p_lane)
+static int
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                             struct mlxsw_sp_port_mapping *port_mapping)
 {
        char pmlp_pl[MLXSW_REG_PMLP_LEN];
+       bool separate_rxtx;
+       u8 module;
+       u8 width;
        int err;
+       int i;
 
        mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
        if (err)
                return err;
-       *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
-       *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
-       *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
+       module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+       width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+       separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
+
+       if (width && !is_power_of_2(width)) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
+                       local_port);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < width; i++) {
+               if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
+                       dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
+                               local_port);
+                       return -EINVAL;
+               }
+               if (separate_rxtx &&
+                   mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
+                   mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
+                       dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
+                               local_port);
+                       return -EINVAL;
+               }
+               if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
+                       dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
+                               local_port);
+                       return -EINVAL;
+               }
+       }
+
+       port_mapping->module = module;
+       port_mapping->width = width;
+       port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
        return 0;
 }
 
-static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
-                                   u8 module, u8 width, u8 lane)
+static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
 {
+       struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        char pmlp_pl[MLXSW_REG_PMLP_LEN];
        int i;
 
        mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
-       mlxsw_reg_pmlp_width_set(pmlp_pl, width);
-       for (i = 0; i < width; i++) {
-               mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
-               mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
+       mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
+       for (i = 0; i < port_mapping->width; i++) {
+               mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
+               mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
        }
 
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
@@ -3465,7 +3515,7 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
 };
 
 static int
-mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
+mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -3481,7 +3531,7 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
                                   &base_speed);
        if (err)
                return err;
-       upper_speed = base_speed * width;
+       upper_speed = base_speed * mlxsw_sp_port->mapping.width;
 
        eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed);
        ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
@@ -3642,15 +3692,18 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
 }
 
 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                               bool split, u8 module, u8 width, u8 lane)
+                               u8 split_base_local_port,
+                               struct mlxsw_sp_port_mapping *port_mapping)
 {
        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+       bool split = !!split_base_local_port;
        struct mlxsw_sp_port *mlxsw_sp_port;
        struct net_device *dev;
        int err;
 
        err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
-                                  module + 1, split, lane / width,
+                                  port_mapping->module + 1, split,
+                                  port_mapping->lane / port_mapping->width,
                                   mlxsw_sp->base_mac,
                                   sizeof(mlxsw_sp->base_mac));
        if (err) {
@@ -3672,9 +3725,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        mlxsw_sp_port->local_port = local_port;
        mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
        mlxsw_sp_port->split = split;
-       mlxsw_sp_port->mapping.module = module;
-       mlxsw_sp_port->mapping.width = width;
-       mlxsw_sp_port->mapping.lane = lane;
+       mlxsw_sp_port->split_base_local_port = split_base_local_port;
+       mlxsw_sp_port->mapping = *port_mapping;
        mlxsw_sp_port->link.autoneg = 1;
        INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
        INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
@@ -3699,7 +3751,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
        dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
 
-       err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
+       err = mlxsw_sp_port_module_map(mlxsw_sp_port);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
                        mlxsw_sp_port->local_port);
@@ -3741,7 +3793,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                goto err_port_system_port_mapping_set;
        }
 
-       err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
+       err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
                        mlxsw_sp_port->local_port);
@@ -3964,14 +4016,13 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
                if (mlxsw_sp_port_created(mlxsw_sp, i))
                        mlxsw_sp_port_remove(mlxsw_sp, i);
        mlxsw_sp_cpu_port_remove(mlxsw_sp);
-       kfree(mlxsw_sp->port_to_module);
        kfree(mlxsw_sp->ports);
 }
 
 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 {
        unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
-       u8 module, width, lane;
+       struct mlxsw_sp_port_mapping *port_mapping;
        size_t alloc_size;
        int i;
        int err;
@@ -3981,66 +4032,98 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
        if (!mlxsw_sp->ports)
                return -ENOMEM;
 
-       mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
-                                                GFP_KERNEL);
-       if (!mlxsw_sp->port_to_module) {
-               err = -ENOMEM;
-               goto err_port_to_module_alloc;
-       }
-
        err = mlxsw_sp_cpu_port_create(mlxsw_sp);
        if (err)
                goto err_cpu_port_create;
 
        for (i = 1; i < max_ports; i++) {
-               /* Mark as invalid */
-               mlxsw_sp->port_to_module[i] = -1;
-
-               err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
-                                                   &width, &lane);
-               if (err)
-                       goto err_port_module_info_get;
-               if (!width)
+               port_mapping = mlxsw_sp->port_mapping[i];
+               if (!port_mapping)
                        continue;
-               mlxsw_sp->port_to_module[i] = module;
-               err = mlxsw_sp_port_create(mlxsw_sp, i, false,
-                                          module, width, lane);
+               err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
                if (err)
                        goto err_port_create;
        }
        return 0;
 
 err_port_create:
-err_port_module_info_get:
        for (i--; i >= 1; i--)
                if (mlxsw_sp_port_created(mlxsw_sp, i))
                        mlxsw_sp_port_remove(mlxsw_sp, i);
        mlxsw_sp_cpu_port_remove(mlxsw_sp);
 err_cpu_port_create:
-       kfree(mlxsw_sp->port_to_module);
-err_port_to_module_alloc:
        kfree(mlxsw_sp->ports);
        return err;
 }
 
-static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
+static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
+{
+       unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+       struct mlxsw_sp_port_mapping port_mapping;
+       int i;
+       int err;
+
+       mlxsw_sp->port_mapping = kcalloc(max_ports,
+                                        sizeof(struct mlxsw_sp_port_mapping *),
+                                        GFP_KERNEL);
+       if (!mlxsw_sp->port_mapping)
+               return -ENOMEM;
+
+       for (i = 1; i < max_ports; i++) {
+               err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
+               if (err)
+                       goto err_port_module_info_get;
+               if (!port_mapping.width)
+                       continue;
+
+               mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
+                                                   sizeof(port_mapping),
+                                                   GFP_KERNEL);
+               if (!mlxsw_sp->port_mapping[i])
+                       goto err_port_module_info_dup;
+       }
+       return 0;
+
+err_port_module_info_get:
+err_port_module_info_dup:
+       for (i--; i >= 1; i--)
+               kfree(mlxsw_sp->port_mapping[i]);
+       kfree(mlxsw_sp->port_mapping);
+       return err;
+}
+
+static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       int i;
+
+       for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
+               kfree(mlxsw_sp->port_mapping[i]);
+       kfree(mlxsw_sp->port_mapping);
+}
+
+static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
 {
-       u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
+       u8 offset = (local_port - 1) % max_width;
 
        return local_port - offset;
 }
 
-static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
-                                     u8 module, unsigned int count, u8 offset)
+static int
+mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+                          struct mlxsw_sp_port_mapping *port_mapping,
+                          unsigned int count, u8 offset)
 {
-       u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+       struct mlxsw_sp_port_mapping split_port_mapping;
        int err, i;
 
+       split_port_mapping = *port_mapping;
+       split_port_mapping.width /= count;
        for (i = 0; i < count; i++) {
                err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
-                                          true, module, width, i * width);
+                                          base_port, &split_port_mapping);
                if (err)
                        goto err_port_create;
+               split_port_mapping.lane += split_port_mapping.width;
        }
 
        return 0;
@@ -4053,45 +4136,55 @@ err_port_create:
 }
 
 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
-                                        u8 base_port, unsigned int count)
+                                        u8 base_port,
+                                        unsigned int count, u8 offset)
 {
-       u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+       struct mlxsw_sp_port_mapping *port_mapping;
        int i;
 
-       /* Split by four means we need to re-create two ports, otherwise
-        * only one.
-        */
-       count = count / 2;
-
-       for (i = 0; i < count; i++) {
-               local_port = base_port + i * 2;
-               if (mlxsw_sp->port_to_module[local_port] < 0)
+       /* Go over original unsplit ports in the gap and recreate them. */
+       for (i = 0; i < count * offset; i++) {
+               port_mapping = mlxsw_sp->port_mapping[base_port + i];
+               if (!port_mapping)
                        continue;
-               module = mlxsw_sp->port_to_module[local_port];
-
-               mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
-                                    width, 0);
+               mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
        }
 }
 
+static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
+                                      unsigned int count,
+                                      unsigned int max_width)
+{
+       enum mlxsw_res_id local_ports_in_x_res_id;
+       int split_width = max_width / count;
+
+       if (split_width == 1)
+               local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
+       else if (split_width == 2)
+               local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
+       else if (split_width == 4)
+               local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
+       else
+               return -EINVAL;
+
+       if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
+               return -EINVAL;
+       return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
+}
+
 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                               unsigned int count,
                               struct netlink_ext_ack *extack)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-       u8 local_ports_in_1x, local_ports_in_2x, offset;
+       struct mlxsw_sp_port_mapping port_mapping;
        struct mlxsw_sp_port *mlxsw_sp_port;
-       u8 module, cur_width, base_port;
+       int max_width;
+       u8 base_port;
+       int offset;
        int i;
        int err;
 
-       if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
-           !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
-               return -EIO;
-
-       local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
-       local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
-
        mlxsw_sp_port = mlxsw_sp->ports[local_port];
        if (!mlxsw_sp_port) {
                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -4100,47 +4193,70 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                return -EINVAL;
        }
 
-       module = mlxsw_sp_port->mapping.module;
-       cur_width = mlxsw_sp_port->mapping.width;
+       /* Split ports cannot be split. */
+       if (mlxsw_sp_port->split) {
+               netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
+               NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
+               return -EINVAL;
+       }
+
+       max_width = mlxsw_core_module_max_width(mlxsw_core,
+                                               mlxsw_sp_port->mapping.module);
+       if (max_width < 0) {
+               netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
+               NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
+               return max_width;
+       }
 
-       if (count != 2 && count != 4) {
-               netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
-               NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports");
+       /* Split port with non-max and 1 module width cannot be split. */
+       if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) {
+               netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
+               NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
                return -EINVAL;
        }
 
-       if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
-               netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
-               NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
+       if (count == 1 || !is_power_of_2(count) || count > max_width) {
+               netdev_err(mlxsw_sp_port->dev, "Invalid split count\n");
+               NL_SET_ERR_MSG_MOD(extack, "Invalid split count");
                return -EINVAL;
        }
 
-       /* Make sure we have enough slave (even) ports for the split. */
-       if (count == 2) {
-               offset = local_ports_in_2x;
-               base_port = local_port;
-               if (mlxsw_sp->ports[base_port + local_ports_in_2x]) {
-                       netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
-                       NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
-                       return -EINVAL;
-               }
-       } else {
-               offset = local_ports_in_1x;
-               base_port = mlxsw_sp_cluster_base_port_get(local_port);
-               if (mlxsw_sp->ports[base_port + 1] ||
-                   mlxsw_sp->ports[base_port + 3]) {
+       offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
+       if (offset < 0) {
+               netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
+               NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+               return -EINVAL;
+       }
+
+       /* Only in case max split is being done, the local port and
+        * base port may differ.
+        */
+       base_port = count == max_width ?
+                   mlxsw_sp_cluster_base_port_get(local_port, max_width) :
+                   local_port;
+
+       for (i = 0; i < count * offset; i++) {
+               /* Expect base port to exist and also the one in the middle in
+                * case of maximal split count.
+                */
+               if (i == 0 || (count == max_width && i == count / 2))
+                       continue;
+
+               if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
                        netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
                        NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
                        return -EINVAL;
                }
        }
 
+       port_mapping = mlxsw_sp_port->mapping;
+
        for (i = 0; i < count; i++)
                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
 
-       err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count,
-                                        offset);
+       err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
+                                        count, offset);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
                goto err_port_split_create;
@@ -4149,7 +4265,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
        return 0;
 
 err_port_split_create:
-       mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
+       mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
        return err;
 }
 
@@ -4157,19 +4273,13 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
                                 struct netlink_ext_ack *extack)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-       u8 local_ports_in_1x, local_ports_in_2x, offset;
        struct mlxsw_sp_port *mlxsw_sp_port;
-       u8 cur_width, base_port;
        unsigned int count;
+       int max_width;
+       u8 base_port;
+       int offset;
        int i;
 
-       if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
-           !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
-               return -EIO;
-
-       local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
-       local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
-
        mlxsw_sp_port = mlxsw_sp->ports[local_port];
        if (!mlxsw_sp_port) {
                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -4184,25 +4294,30 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
                return -EINVAL;
        }
 
-       cur_width = mlxsw_sp_port->mapping.width;
-       count = cur_width == 1 ? 4 : 2;
+       max_width = mlxsw_core_module_max_width(mlxsw_core,
+                                               mlxsw_sp_port->mapping.module);
+       if (max_width < 0) {
+               netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
+               NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
+               return max_width;
+       }
 
-       if (count == 2)
-               offset = local_ports_in_2x;
-       else
-               offset = local_ports_in_1x;
+       count = max_width / mlxsw_sp_port->mapping.width;
 
-       base_port = mlxsw_sp_cluster_base_port_get(local_port);
+       offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
+       if (WARN_ON(offset < 0)) {
+               netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
+               NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+               return -EINVAL;
+       }
 
-       /* Determine which ports to remove. */
-       if (count == 2 && local_port >= base_port + 2)
-               base_port = base_port + 2;
+       base_port = mlxsw_sp_port->split_base_local_port;
 
        for (i = 0; i < count; i++)
                if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
                        mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
 
-       mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
+       mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
 
        return 0;
 }
@@ -4909,6 +5024,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
                goto err_dpipe_init;
        }
 
+       err = mlxsw_sp_port_module_info_init(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
+               goto err_port_module_info_init;
+       }
+
        err = mlxsw_sp_ports_create(mlxsw_sp);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -4918,6 +5039,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
        return 0;
 
 err_ports_create:
+       mlxsw_sp_port_module_info_fini(mlxsw_sp);
+err_port_module_info_init:
        mlxsw_sp_dpipe_fini(mlxsw_sp);
 err_dpipe_init:
        unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
@@ -4988,6 +5111,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 
+       mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev;
+       mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME;
        mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
        mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
        mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -5008,6 +5133,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 
        mlxsw_sp_ports_remove(mlxsw_sp);
+       mlxsw_sp_port_module_info_fini(mlxsw_sp);
        mlxsw_sp_dpipe_fini(mlxsw_sp);
        unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
                                          &mlxsw_sp->netdevice_nb);
@@ -6649,3 +6775,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
index a5fdd84..347bec9 100644 (file)
@@ -32,8 +32,6 @@
 
 #define MLXSW_SP_MID_MAX 7000
 
-#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
-
 #define MLXSW_SP_PORT_BASE_SPEED_25G 25000 /* Mb/s */
 #define MLXSW_SP_PORT_BASE_SPEED_50G 50000 /* Mb/s */
 
@@ -143,6 +141,12 @@ struct mlxsw_sp_port_type_speed_ops;
 struct mlxsw_sp_ptp_state;
 struct mlxsw_sp_ptp_ops;
 
+struct mlxsw_sp_port_mapping {
+       u8 module;
+       u8 width;
+       u8 lane;
+};
+
 struct mlxsw_sp {
        struct mlxsw_sp_port **ports;
        struct mlxsw_core *core;
@@ -150,7 +154,7 @@ struct mlxsw_sp {
        unsigned char base_mac[ETH_ALEN];
        const unsigned char *mac_mask;
        struct mlxsw_sp_upper *lags;
-       int *port_to_module;
+       struct mlxsw_sp_port_mapping **port_mapping;
        struct mlxsw_sp_sb *sb;
        struct mlxsw_sp_bridge *bridge;
        struct mlxsw_sp_router *router;
@@ -259,11 +263,11 @@ struct mlxsw_sp_port {
                struct ieee_pfc *pfc;
                enum mlxsw_reg_qpts_trust_state trust_state;
        } dcb;
-       struct {
-               u8 module;
-               u8 width;
-               u8 lane;
-       } mapping;
+       struct mlxsw_sp_port_mapping mapping; /* mapping is constant during the
+                                              * mlxsw_sp_port lifetime, however
+                                              * the same localport can have
+                                              * different mapping.
+                                              */
        /* TC handles */
        struct list_head mall_tc_list;
        struct {
@@ -287,6 +291,7 @@ struct mlxsw_sp_port {
                u16 egr_types;
                struct mlxsw_sp_ptp_port_stats stats;
        } ptp;
+       u8 split_base_local_port;
 };
 
 struct mlxsw_sp_port_type_speed_ops {
index 5fd9a72..968f090 100644 (file)
@@ -470,7 +470,7 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
                                size_t prs_len)
 {
        /* Round down, unlike mlxsw_sp_bytes_cells(). */
-       u32 sb_cells = mlxsw_sp->sb->sb_size / mlxsw_sp->sb->cell_size;
+       u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
        u32 rest_cells[2] = {sb_cells, sb_cells};
        int i;
        int err;
@@ -1044,12 +1044,12 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
        if (pr->freeze_mode && pr->mode != mode) {
                NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
                return -EINVAL;
-       };
+       }
 
        if (pr->freeze_size && pr->size != size) {
                NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
                return -EINVAL;
-       };
+       }
 
        return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
                                    pool_size, false);
index 4d1bce4..344539c 100644 (file)
@@ -261,8 +261,15 @@ static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
                port->pvid = vid;
 
        /* Untagged egress vlan clasification */
-       if (untagged)
+       if (untagged && port->vid != vid) {
+               if (port->vid) {
+                       dev_err(ocelot->dev,
+                               "Port already has a native VLAN: %d\n",
+                               port->vid);
+                       return -EBUSY;
+               }
                port->vid = vid;
+       }
 
        ocelot_vlan_port_apply(ocelot, port);
 
@@ -934,7 +941,7 @@ end:
 static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
                                  u16 vid)
 {
-       return ocelot_vlan_vid_add(dev, vid, false, true);
+       return ocelot_vlan_vid_add(dev, vid, false, false);
 }
 
 static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
index 1eef446..79d72c8 100644 (file)
@@ -299,22 +299,6 @@ static void nfp_repr_clean(struct nfp_repr *repr)
        nfp_port_free(repr->port);
 }
 
-static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
-static struct lock_class_key nfp_repr_netdev_addr_lock_key;
-
-static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
-                                          struct netdev_queue *txq,
-                                          void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
-}
-
-static void nfp_repr_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
-}
-
 int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
                  u32 cmsg_port_id, struct nfp_port *port,
                  struct net_device *pf_netdev)
@@ -324,8 +308,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
        u32 repr_cap = nn->tlv_caps.repr_cap;
        int err;
 
-       nfp_repr_set_lockdep_class(netdev);
-
        repr->port = port;
        repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
        if (!repr->dst)
index a9bb12c..60fd14d 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
 
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/rtnetlink.h>
index 52eb303..3590ea7 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
 
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/utsname.h>
index 0aeac31..97e7994 100644 (file)
@@ -402,7 +402,6 @@ static void ionic_rx_fill_cb(void *arg)
 
 void ionic_rx_empty(struct ionic_queue *q)
 {
-       struct ionic_rxq_sg_desc *sg_desc;
        struct ionic_desc_info *cur;
        struct ionic_rxq_desc *desc;
        unsigned int i;
@@ -412,7 +411,6 @@ void ionic_rx_empty(struct ionic_queue *q)
                desc->addr = 0;
                desc->len = 0;
 
-               sg_desc = cur->sg_desc;
                for (i = 0; i < cur->npages; i++) {
                        if (likely(cur->pages[i].page)) {
                                ionic_rx_page_free(q, cur->pages[i].page,
index d473b52..9ad568d 100644 (file)
 #include <linux/slab.h>
 #include "qed.h"
 
-/* Fields of IGU PF CONFIGRATION REGISTER */
+/* Fields of IGU PF CONFIGURATION REGISTER */
 #define IGU_PF_CONF_FUNC_EN       (0x1 << 0)    /* function enable        */
 #define IGU_PF_CONF_MSI_MSIX_EN   (0x1 << 1)    /* MSI/MSIX enable        */
 #define IGU_PF_CONF_INT_LINE_EN   (0x1 << 2)    /* INT enable             */
 #define IGU_PF_CONF_ATTN_BIT_EN   (0x1 << 3)    /* attention enable       */
 #define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)    /* single ISR mode enable */
 #define IGU_PF_CONF_SIMD_MODE     (0x1 << 5)    /* simd all ones mode     */
-/* Fields of IGU VF CONFIGRATION REGISTER */
+/* Fields of IGU VF CONFIGURATION REGISTER */
 #define IGU_VF_CONF_FUNC_EN        (0x1 << 0)  /* function enable        */
 #define IGU_VF_CONF_MSI_MSIX_EN    (0x1 << 1)  /* MSI/MSIX enable        */
 #define IGU_VF_CONF_SINGLE_ISR_EN  (0x1 << 4)  /* single ISR mode enable */
index 2ce7009..38f7f40 100644 (file)
 #define QED_ROCE_QPS                   (8192)
 #define QED_ROCE_DPIS                  (8)
 #define QED_RDMA_SRQS                   QED_ROCE_QPS
-#define QED_NVM_CFG_SET_FLAGS          0xE
-#define QED_NVM_CFG_SET_PF_FLAGS       0x1E
 #define QED_NVM_CFG_GET_FLAGS          0xA
 #define QED_NVM_CFG_GET_PF_FLAGS       0x1A
+#define QED_NVM_CFG_MAX_ATTRS          50
 
 static char version[] =
        "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -2255,6 +2254,7 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        u8 entity_id, len, buf[32];
+       bool need_nvm_init = true;
        struct qed_ptt *ptt;
        u16 cfg_id, count;
        int rc = 0, i;
@@ -2271,8 +2271,10 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
 
        DP_VERBOSE(cdev, NETIF_MSG_DRV,
                   "Read config ids: num_attrs = %0d\n", count);
-       /* NVM CFG ID attributes */
-       for (i = 0; i < count; i++) {
+       /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
+        * arithmetic operations in the implementation.
+        */
+       for (i = 1; i <= count; i++) {
                cfg_id = *((u16 *)*data);
                *data += 2;
                entity_id = **data;
@@ -2282,8 +2284,21 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
                memcpy(buf, *data, len);
                *data += len;
 
-               flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS :
-                       QED_NVM_CFG_SET_FLAGS;
+               flags = 0;
+               if (need_nvm_init) {
+                       flags |= QED_NVM_CFG_OPTION_INIT;
+                       need_nvm_init = false;
+               }
+
+               /* Commit to flash and free the resources */
+               if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
+                       flags |= QED_NVM_CFG_OPTION_COMMIT |
+                                QED_NVM_CFG_OPTION_FREE;
+                       need_nvm_init = true;
+               }
+
+               if (entity_id)
+                       flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
 
                DP_VERBOSE(cdev, NETIF_MSG_DRV,
                           "cfg_id = %d entity = %d len = %d\n", cfg_id,
index 78f77b7..dcb5c91 100644 (file)
@@ -2005,7 +2005,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
            (qed_iov_validate_active_txq(p_hwfn, vf))) {
                vf->b_malicious = true;
                DP_NOTICE(p_hwfn,
-                         "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+                         "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
                          vf->abs_vf_id);
                status = PFVF_STATUS_MALICIOUS;
                goto out;
index 9a8fd79..368e885 100644 (file)
@@ -305,7 +305,7 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
 
 /**
  * @brief Read sriov related information and allocated resources
- *  reads from configuraiton space, shmem, etc.
+ *  reads from configuration space, shmem, etc.
  *
  * @param p_hwfn
  *
index 9a6a9a0..d6cfe4f 100644 (file)
@@ -1298,7 +1298,7 @@ void qede_config_rx_mode(struct net_device *ndev)
        rx_mode.type = QED_FILTER_TYPE_RX_MODE;
 
        /* Remove all previous unicast secondary macs and multicast macs
-        * (configrue / leave the primary mac)
+        * (configure / leave the primary mac)
         */
        rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
                                   edev->ndev->dev_addr);
index 91978ce..0704f8b 100644 (file)
@@ -1023,6 +1023,10 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
 {
        int value;
 
+       /* Work around issue with chip reporting wrong PHY ID */
+       if (reg == MII_PHYSID2)
+               return 0xc912;
+
        r8168dp_2_mdio_start(tp);
 
        value = r8169_mdio_read(tp, reg);
@@ -4710,8 +4714,7 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
        rtl_hw_start_8168g(tp);
 
        /* disable aspm and clock request before access ephy */
-       RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
-       RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+       rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8168g_2);
 }
 
index 0ec13f5..ad68eb0 100644 (file)
@@ -946,8 +946,10 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
                /* Extra channels, even those with TXQs (PTP), do not require
                 * PIO resources.
                 */
-               if (!channel->type->want_pio)
+               if (!channel->type->want_pio ||
+                   channel->channel >= efx->xdp_channel_offset)
                        continue;
+
                efx_for_each_channel_tx_queue(tx_queue, channel) {
                        /* We assign the PIO buffers to queues in
                         * reverse order to allow for the following
@@ -1296,8 +1298,9 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
        int rc;
 
        channel_vis = max(efx->n_channels,
-                         (efx->n_tx_channels + efx->n_extra_tx_channels) *
-                         EFX_TXQ_TYPES);
+                         ((efx->n_tx_channels + efx->n_extra_tx_channels) *
+                          EFX_TXQ_TYPES) +
+                          efx->n_xdp_channels * efx->xdp_tx_per_channel);
 
 #ifdef EFX_USE_PIO
        /* Try to allocate PIO buffers if wanted and if the full
@@ -2434,11 +2437,12 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
        /* TSOv2 is a limited resource that can only be configured on a limited
         * number of queues. TSO without checksum offload is not really a thing,
         * so we only enable it for those queues.
-        * TSOv2 cannot be used with Hardware timestamping.
+        * TSOv2 cannot be used with Hardware timestamping, and is never needed
+        * for XDP tx.
         */
        if (csum_offload && (nic_data->datapath_caps2 &
                        (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
-           !tx_queue->timestamping) {
+           !tx_queue->timestamping && !tx_queue->xdp_tx) {
                tso_v2 = true;
                netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
                                channel->channel);
index 2fef740..0fa9972 100644 (file)
@@ -226,6 +226,10 @@ static void efx_fini_napi_channel(struct efx_channel *channel);
 static void efx_fini_struct(struct efx_nic *efx);
 static void efx_start_all(struct efx_nic *efx);
 static void efx_stop_all(struct efx_nic *efx);
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+                       u32 flags);
 
 #define EFX_ASSERT_RESET_SERIALISED(efx)               \
        do {                                            \
@@ -340,6 +344,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
 
        spent = efx_process_channel(channel, budget);
 
+       xdp_do_flush_map();
+
        if (spent < budget) {
                if (efx_channel_has_rx_queue(channel) &&
                    efx->irq_rx_adaptive &&
@@ -579,9 +585,14 @@ efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
        int number;
 
        number = channel->channel;
-       if (efx->tx_channel_offset == 0) {
+
+       if (number >= efx->xdp_channel_offset &&
+           !WARN_ON_ONCE(!efx->n_xdp_channels)) {
+               type = "-xdp";
+               number -= efx->xdp_channel_offset;
+       } else if (efx->tx_channel_offset == 0) {
                type = "";
-       } else if (channel->channel < efx->tx_channel_offset) {
+       } else if (number < efx->tx_channel_offset) {
                type = "-rx";
        } else {
                type = "-tx";
@@ -651,7 +662,7 @@ static void efx_start_datapath(struct efx_nic *efx)
        efx->rx_dma_len = (efx->rx_prefix_size +
                           EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
                           efx->type->rx_buffer_padding);
-       rx_buf_len = (sizeof(struct efx_rx_page_state) +
+       rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
                      efx->rx_ip_align + efx->rx_dma_len);
        if (rx_buf_len <= PAGE_SIZE) {
                efx->rx_scatter = efx->type->always_rx_scatter;
@@ -774,6 +785,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
                efx_for_each_possible_channel_tx_queue(tx_queue, channel)
                        efx_fini_tx_queue(tx_queue);
        }
+       efx->xdp_rxq_info_failed = false;
 }
 
 static void efx_remove_channel(struct efx_channel *channel)
@@ -798,6 +810,8 @@ static void efx_remove_channels(struct efx_nic *efx)
 
        efx_for_each_channel(channel, efx)
                efx_remove_channel(channel);
+
+       kfree(efx->xdp_tx_queues);
 }
 
 int
@@ -1435,6 +1449,101 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
        return count;
 }
 
+static int efx_allocate_msix_channels(struct efx_nic *efx,
+                                     unsigned int max_channels,
+                                     unsigned int extra_channels,
+                                     unsigned int parallelism)
+{
+       unsigned int n_channels = parallelism;
+       int vec_count;
+       int n_xdp_tx;
+       int n_xdp_ev;
+
+       if (efx_separate_tx_channels)
+               n_channels *= 2;
+       n_channels += extra_channels;
+
+       /* To allow XDP transmit to happen from arbitrary NAPI contexts
+        * we allocate a TX queue per CPU. We share event queues across
+        * multiple tx queues, assuming tx and ev queues are both
+        * maximum size.
+        */
+
+       n_xdp_tx = num_possible_cpus();
+       n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
+
+       /* Check resources.
+        * We need a channel per event queue, plus a VI per tx queue.
+        * This may be more pessimistic than it needs to be.
+        */
+       if (n_channels + n_xdp_ev > max_channels) {
+               netif_err(efx, drv, efx->net_dev,
+                         "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+                         n_xdp_ev, n_channels, max_channels);
+               efx->n_xdp_channels = 0;
+               efx->xdp_tx_per_channel = 0;
+               efx->xdp_tx_queue_count = 0;
+       } else {
+               efx->n_xdp_channels = n_xdp_ev;
+               efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
+               efx->xdp_tx_queue_count = n_xdp_tx;
+               n_channels += n_xdp_ev;
+               netif_dbg(efx, drv, efx->net_dev,
+                         "Allocating %d TX and %d event queues for XDP\n",
+                         n_xdp_tx, n_xdp_ev);
+       }
+
+       n_channels = min(n_channels, max_channels);
+
+       vec_count = pci_msix_vec_count(efx->pci_dev);
+       if (vec_count < 0)
+               return vec_count;
+       if (vec_count < n_channels) {
+               netif_err(efx, drv, efx->net_dev,
+                         "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
+                         vec_count, n_channels);
+               netif_err(efx, drv, efx->net_dev,
+                         "WARNING: Performance may be reduced.\n");
+               n_channels = vec_count;
+       }
+
+       efx->n_channels = n_channels;
+
+       /* Do not create the PTP TX queue(s) if PTP uses the MC directly. */
+       if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx))
+               n_channels--;
+
+       /* Ignore XDP tx channels when creating rx channels. */
+       n_channels -= efx->n_xdp_channels;
+
+       if (efx_separate_tx_channels) {
+               efx->n_tx_channels =
+                       min(max(n_channels / 2, 1U),
+                           efx->max_tx_channels);
+               efx->tx_channel_offset =
+                       n_channels - efx->n_tx_channels;
+               efx->n_rx_channels =
+                       max(n_channels -
+                           efx->n_tx_channels, 1U);
+       } else {
+               efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
+               efx->tx_channel_offset = 0;
+               efx->n_rx_channels = n_channels;
+       }
+
+       if (efx->n_xdp_channels)
+               efx->xdp_channel_offset = efx->tx_channel_offset +
+                                         efx->n_tx_channels;
+       else
+               efx->xdp_channel_offset = efx->n_channels;
+
+       netif_dbg(efx, drv, efx->net_dev,
+                 "Allocating %u RX channels\n",
+                 efx->n_rx_channels);
+
+       return efx->n_channels;
+}
+
 /* Probe the number and type of interrupts we are able to obtain, and
  * the resulting numbers of channels and RX queues.
  */
@@ -1449,19 +1558,19 @@ static int efx_probe_interrupts(struct efx_nic *efx)
                        ++extra_channels;
 
        if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+               unsigned int parallelism = efx_wanted_parallelism(efx);
                struct msix_entry xentries[EFX_MAX_CHANNELS];
                unsigned int n_channels;
 
-               n_channels = efx_wanted_parallelism(efx);
-               if (efx_separate_tx_channels)
-                       n_channels *= 2;
-               n_channels += extra_channels;
-               n_channels = min(n_channels, efx->max_channels);
-
-               for (i = 0; i < n_channels; i++)
-                       xentries[i].entry = i;
-               rc = pci_enable_msix_range(efx->pci_dev,
-                                          xentries, 1, n_channels);
+               rc = efx_allocate_msix_channels(efx, efx->max_channels,
+                                               extra_channels, parallelism);
+               if (rc >= 0) {
+                       n_channels = rc;
+                       for (i = 0; i < n_channels; i++)
+                               xentries[i].entry = i;
+                       rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
+                                                  n_channels);
+               }
                if (rc < 0) {
                        /* Fall back to single channel MSI */
                        netif_err(efx, drv, efx->net_dev,
@@ -1480,21 +1589,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
                }
 
                if (rc > 0) {
-                       efx->n_channels = n_channels;
-                       if (n_channels > extra_channels)
-                               n_channels -= extra_channels;
-                       if (efx_separate_tx_channels) {
-                               efx->n_tx_channels = min(max(n_channels / 2,
-                                                            1U),
-                                                        efx->max_tx_channels);
-                               efx->n_rx_channels = max(n_channels -
-                                                        efx->n_tx_channels,
-                                                        1U);
-                       } else {
-                               efx->n_tx_channels = min(n_channels,
-                                                        efx->max_tx_channels);
-                               efx->n_rx_channels = n_channels;
-                       }
                        for (i = 0; i < efx->n_channels; i++)
                                efx_get_channel(efx, i)->irq =
                                        xentries[i].vector;
@@ -1506,6 +1600,8 @@ static int efx_probe_interrupts(struct efx_nic *efx)
                efx->n_channels = 1;
                efx->n_rx_channels = 1;
                efx->n_tx_channels = 1;
+               efx->n_xdp_channels = 0;
+               efx->xdp_channel_offset = efx->n_channels;
                rc = pci_enable_msi(efx->pci_dev);
                if (rc == 0) {
                        efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
@@ -1524,12 +1620,14 @@ static int efx_probe_interrupts(struct efx_nic *efx)
                efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
                efx->n_rx_channels = 1;
                efx->n_tx_channels = 1;
+               efx->n_xdp_channels = 0;
+               efx->xdp_channel_offset = efx->n_channels;
                efx->legacy_irq = efx->pci_dev->irq;
        }
 
-       /* Assign extra channels if possible */
+       /* Assign extra channels if possible, before XDP channels */
        efx->n_extra_tx_channels = 0;
-       j = efx->n_channels;
+       j = efx->xdp_channel_offset;
        for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
                if (!efx->extra_channel_type[i])
                        continue;
@@ -1724,29 +1822,50 @@ static void efx_remove_interrupts(struct efx_nic *efx)
        efx->legacy_irq = 0;
 }
 
-static void efx_set_channels(struct efx_nic *efx)
+static int efx_set_channels(struct efx_nic *efx)
 {
        struct efx_channel *channel;
        struct efx_tx_queue *tx_queue;
+       int xdp_queue_number;
 
        efx->tx_channel_offset =
                efx_separate_tx_channels ?
                efx->n_channels - efx->n_tx_channels : 0;
 
+       if (efx->xdp_tx_queue_count) {
+               EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
+
+               /* Allocate array for XDP TX queue lookup. */
+               efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
+                                            sizeof(*efx->xdp_tx_queues),
+                                            GFP_KERNEL);
+               if (!efx->xdp_tx_queues)
+                       return -ENOMEM;
+       }
+
        /* We need to mark which channels really have RX and TX
         * queues, and adjust the TX queue numbers if we have separate
         * RX-only and TX-only channels.
         */
+       xdp_queue_number = 0;
        efx_for_each_channel(channel, efx) {
                if (channel->channel < efx->n_rx_channels)
                        channel->rx_queue.core_index = channel->channel;
                else
                        channel->rx_queue.core_index = -1;
 
-               efx_for_each_channel_tx_queue(tx_queue, channel)
+               efx_for_each_channel_tx_queue(tx_queue, channel) {
                        tx_queue->queue -= (efx->tx_channel_offset *
                                            EFX_TXQ_TYPES);
+
+                       if (efx_channel_is_xdp_tx(channel) &&
+                           xdp_queue_number < efx->xdp_tx_queue_count) {
+                               efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+                               xdp_queue_number++;
+                       }
+               }
        }
+       return 0;
 }
 
 static int efx_probe_nic(struct efx_nic *efx)
@@ -1776,7 +1895,9 @@ static int efx_probe_nic(struct efx_nic *efx)
                if (rc)
                        goto fail1;
 
-               efx_set_channels(efx);
+               rc = efx_set_channels(efx);
+               if (rc)
+                       goto fail1;
 
                /* dimension_resources can fail with EAGAIN */
                rc = efx->type->dimension_resources(efx);
@@ -2022,6 +2143,10 @@ static void efx_stop_all(struct efx_nic *efx)
 
 static void efx_remove_all(struct efx_nic *efx)
 {
+       rtnl_lock();
+       efx_xdp_setup_prog(efx, NULL);
+       rtnl_unlock();
+
        efx_remove_channels(efx);
        efx_remove_filters(efx);
 #ifdef CONFIG_SFC_SRIOV
@@ -2082,6 +2207,8 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
                        channel->irq_moderation_us = rx_usecs;
                else if (efx_channel_has_tx_queues(channel))
                        channel->irq_moderation_us = tx_usecs;
+               else if (efx_channel_is_xdp_tx(channel))
+                       channel->irq_moderation_us = tx_usecs;
        }
 
        return 0;
@@ -2277,6 +2404,17 @@ static void efx_watchdog(struct net_device *net_dev)
        efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
 }
 
+static unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
+{
+       /* The maximum MTU that we can fit in a single page, allowing for
+        * framing, overhead and XDP headroom.
+        */
+       int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
+                      efx->rx_prefix_size + efx->type->rx_buffer_padding +
+                      efx->rx_ip_align + XDP_PACKET_HEADROOM;
+
+       return PAGE_SIZE - overhead;
+}
 
 /* Context: process, rtnl_lock() held. */
 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
@@ -2288,6 +2426,14 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
        if (rc)
                return rc;
 
+       if (rtnl_dereference(efx->xdp_prog) &&
+           new_mtu > efx_xdp_max_mtu(efx)) {
+               netif_err(efx, drv, efx->net_dev,
+                         "Requested MTU of %d too big for XDP (max: %d)\n",
+                         new_mtu, efx_xdp_max_mtu(efx));
+               return -EINVAL;
+       }
+
        netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 
        efx_device_detach_sync(efx);
@@ -2489,8 +2635,65 @@ static const struct net_device_ops efx_netdev_ops = {
 #endif
        .ndo_udp_tunnel_add     = efx_udp_tunnel_add,
        .ndo_udp_tunnel_del     = efx_udp_tunnel_del,
+       .ndo_xdp_xmit           = efx_xdp_xmit,
+       .ndo_bpf                = efx_xdp
 };
 
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
+{
+       struct bpf_prog *old_prog;
+
+       if (efx->xdp_rxq_info_failed) {
+               netif_err(efx, drv, efx->net_dev,
+                         "Unable to bind XDP program due to previous failure of rxq_info\n");
+               return -EINVAL;
+       }
+
+       if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) {
+               netif_err(efx, drv, efx->net_dev,
+                         "Unable to configure XDP with MTU of %d (max: %d)\n",
+                         efx->net_dev->mtu, efx_xdp_max_mtu(efx));
+               return -EINVAL;
+       }
+
+       old_prog = rtnl_dereference(efx->xdp_prog);
+       rcu_assign_pointer(efx->xdp_prog, prog);
+       /* Release the reference that was originally passed by the caller. */
+       if (old_prog)
+               bpf_prog_put(old_prog);
+
+       return 0;
+}
+
+/* Context: process, rtnl_lock() held. */
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+       struct efx_nic *efx = netdev_priv(dev);
+       struct bpf_prog *xdp_prog;
+
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               return efx_xdp_setup_prog(efx, xdp->prog);
+       case XDP_QUERY_PROG:
+               xdp_prog = rtnl_dereference(efx->xdp_prog);
+               xdp->prog_id = xdp_prog ? xdp_prog->aux->id : 0;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+                       u32 flags)
+{
+       struct efx_nic *efx = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
+}
+
 static void efx_update_name(struct efx_nic *efx)
 {
        strcpy(efx->name, efx->net_dev->name);
index 04fed7c..45c7ae4 100644 (file)
@@ -322,4 +322,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
        return true;
 }
 
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+                      bool flush);
+
 #endif /* EFX_EFX_H */
index 86b9658..8db593f 100644 (file)
@@ -83,6 +83,10 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
 };
 
 #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
@@ -399,6 +403,19 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
                        }
                }
        }
+       if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+               unsigned short xdp;
+
+               for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+                       n_stats++;
+                       if (strings) {
+                               snprintf(strings, ETH_GSTRING_LEN,
+                                        "tx-xdp-cpu-%hu.tx_packets", xdp);
+                               strings += ETH_GSTRING_LEN;
+                       }
+               }
+       }
+
        return n_stats;
 }
 
@@ -509,6 +526,14 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
                        data++;
                }
        }
+       if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+               int xdp;
+
+               for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+                       data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
+                       data++;
+               }
+       }
 
        efx_ptp_update_stats(efx, data);
 }
index 284a1b0..04e49ea 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/i2c.h>
 #include <linux/mtd/mtd.h>
 #include <net/busy_poll.h>
+#include <net/xdp.h>
 
 #include "enum.h"
 #include "bitfield.h"
@@ -136,7 +137,8 @@ struct efx_special_buffer {
  * struct efx_tx_buffer - buffer state for a TX descriptor
  * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
  *     freed when descriptor completes
- * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
+ * @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
+ *     member is the associated buffer to drop a page reference on.
  * @dma_addr: DMA address of the fragment.
  * @flags: Flags for allocation and DMA mapping type
  * @len: Length of this fragment.
@@ -146,7 +148,10 @@ struct efx_special_buffer {
  * Only valid if @unmap_len != 0.
  */
 struct efx_tx_buffer {
-       const struct sk_buff *skb;
+       union {
+               const struct sk_buff *skb;
+               struct xdp_frame *xdpf;
+       };
        union {
                efx_qword_t option;
                dma_addr_t dma_addr;
@@ -160,6 +165,7 @@ struct efx_tx_buffer {
 #define EFX_TX_BUF_SKB         2       /* buffer is last part of skb */
 #define EFX_TX_BUF_MAP_SINGLE  8       /* buffer was mapped with dma_map_single() */
 #define EFX_TX_BUF_OPTION      0x10    /* empty buffer for option descriptor */
+#define EFX_TX_BUF_XDP         0x20    /* buffer was sent with XDP */
 
 /**
  * struct efx_tx_queue - An Efx TX queue
@@ -189,6 +195,7 @@ struct efx_tx_buffer {
  * @piobuf_offset: Buffer offset to be specified in PIO descriptors
  * @initialised: Has hardware queue been initialised?
  * @timestamping: Is timestamping enabled for this channel?
+ * @xdp_tx: Is this an XDP tx queue?
  * @handle_tso: TSO xmit preparation handler.  Sets up the TSO metadata and
  *     may also map tx data, depending on the nature of the TSO implementation.
  * @read_count: Current read pointer.
@@ -250,6 +257,7 @@ struct efx_tx_queue {
        unsigned int piobuf_offset;
        bool initialised;
        bool timestamping;
+       bool xdp_tx;
 
        /* Function pointers used in the fast path. */
        int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
@@ -363,6 +371,8 @@ struct efx_rx_page_state {
  *     refill was triggered.
  * @recycle_count: RX buffer recycle counter.
  * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
+ * @xdp_rxq_info: XDP specific RX queue information.
+ * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
  */
 struct efx_rx_queue {
        struct efx_nic *efx;
@@ -394,6 +404,8 @@ struct efx_rx_queue {
        unsigned int slow_fill_count;
        /* Statistics to supplement MAC stats */
        unsigned long rx_packets;
+       struct xdp_rxq_info xdp_rxq_info;
+       bool xdp_rxq_info_valid;
 };
 
 enum efx_sync_events_state {
@@ -441,6 +453,10 @@ enum efx_sync_events_state {
  *     lack of descriptors
  * @n_rx_merge_events: Number of RX merged completion events
  * @n_rx_merge_packets: Number of RX packets completed by merged events
+ * @n_rx_xdp_drops: Count of RX packets intentionally dropped due to XDP
+ * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
+ * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
+ * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
  * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
  *     __efx_rx_packet(), or zero if there is none
  * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -494,6 +510,10 @@ struct efx_channel {
        unsigned int n_rx_nodesc_trunc;
        unsigned int n_rx_merge_events;
        unsigned int n_rx_merge_packets;
+       unsigned int n_rx_xdp_drops;
+       unsigned int n_rx_xdp_bad_drops;
+       unsigned int n_rx_xdp_tx;
+       unsigned int n_rx_xdp_redirect;
 
        unsigned int rx_pkt_n_frags;
        unsigned int rx_pkt_index;
@@ -818,6 +838,8 @@ struct efx_async_filter_insertion {
  * @msi_context: Context for each MSI
  * @extra_channel_types: Types of extra (non-traffic) channels that
  *     should be allocated for this NIC
+ * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
+ * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
  * @rxq_entries: Size of receive queues requested by user.
  * @txq_entries: Size of transmit queues requested by user.
  * @txq_stop_thresh: TX queue fill level at or above which we stop it.
@@ -830,6 +852,9 @@ struct efx_async_filter_insertion {
  * @n_rx_channels: Number of channels used for RX (= number of RX queues)
  * @n_tx_channels: Number of channels used for TX
  * @n_extra_tx_channels: Number of extra channels with TX queues
+ * @n_xdp_channels: Number of channels used for XDP TX
+ * @xdp_channel_offset: Offset of zeroth channel used for XPD TX.
+ * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel.
  * @rx_ip_align: RX DMA address offset to have IP header aligned in
  *     in accordance with NET_IP_ALIGN
  * @rx_dma_len: Current maximum RX DMA length
@@ -894,6 +919,7 @@ struct efx_async_filter_insertion {
  * @loopback_mode: Loopback status
  * @loopback_modes: Supported loopback mode bitmask
  * @loopback_selftest: Offline self-test private state
+ * @xdp_prog: Current XDP programme for this interface
  * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
  * @filter_state: Architecture-dependent filter table state
  * @rps_mutex: Protects RPS state of all channels
@@ -919,6 +945,8 @@ struct efx_async_filter_insertion {
  * @ptp_data: PTP state data
  * @ptp_warned: has this NIC seen and warned about unexpected PTP events?
  * @vpd_sn: Serial number read from VPD
+ * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
+ *      xdp_rxq_info structures?
  * @monitor_work: Hardware monitor workitem
  * @biu_lock: BIU (bus interface unit) lock
  * @last_irq_cpu: Last CPU to handle a possible test interrupt.  This
@@ -966,6 +994,9 @@ struct efx_nic {
        const struct efx_channel_type *
        extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
 
+       unsigned int xdp_tx_queue_count;
+       struct efx_tx_queue **xdp_tx_queues;
+
        unsigned rxq_entries;
        unsigned txq_entries;
        unsigned int txq_stop_thresh;
@@ -984,6 +1015,9 @@ struct efx_nic {
        unsigned tx_channel_offset;
        unsigned n_tx_channels;
        unsigned n_extra_tx_channels;
+       unsigned int n_xdp_channels;
+       unsigned int xdp_channel_offset;
+       unsigned int xdp_tx_per_channel;
        unsigned int rx_ip_align;
        unsigned int rx_dma_len;
        unsigned int rx_buffer_order;
@@ -1053,6 +1087,10 @@ struct efx_nic {
        u64 loopback_modes;
 
        void *loopback_selftest;
+       /* We access loopback_selftest immediately before running XDP,
+        * so we want them next to each other.
+        */
+       struct bpf_prog __rcu *xdp_prog;
 
        struct rw_semaphore filter_sem;
        void *filter_state;
@@ -1082,6 +1120,7 @@ struct efx_nic {
        bool ptp_warned;
 
        char *vpd_sn;
+       bool xdp_rxq_info_failed;
 
        /* The following fields may be written more often */
 
@@ -1473,10 +1512,24 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
        return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
 }
 
+static inline struct efx_channel *
+efx_get_xdp_channel(struct efx_nic *efx, unsigned int index)
+{
+       EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_xdp_channels);
+       return efx->channel[efx->xdp_channel_offset + index];
+}
+
+static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
+{
+       return channel->channel - channel->efx->xdp_channel_offset <
+              channel->efx->n_xdp_channels;
+}
+
 static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
 {
-       return channel->type && channel->type->want_txqs &&
-                               channel->type->want_txqs(channel);
+       return efx_channel_is_xdp_tx(channel) ||
+              (channel->type && channel->type->want_txqs &&
+               channel->type->want_txqs(channel));
 }
 
 static inline struct efx_tx_queue *
@@ -1500,7 +1553,8 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
        else                                                            \
                for (_tx_queue = (_channel)->tx_queue;                  \
                     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
-                            efx_tx_queue_used(_tx_queue);              \
+                            (efx_tx_queue_used(_tx_queue) ||            \
+                             efx_channel_is_xdp_tx(_channel));         \
                     _tx_queue++)
 
 /* Iterate over all possible TX queues belonging to a channel */
index 85ec07f..a7d9841 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/iommu.h>
 #include <net/ip.h>
 #include <net/checksum.h>
+#include <net/xdp.h>
+#include <linux/bpf_trace.h>
 #include "net_driver.h"
 #include "efx.h"
 #include "filter.h"
@@ -27,6 +29,9 @@
 /* Preferred number of descriptors to fill at once */
 #define EFX_RX_PREFERRED_BATCH 8U
 
+/* Maximum rx prefix used by any architecture. */
+#define EFX_MAX_RX_PREFIX_SIZE 16
+
 /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
  * ring, this number is divided by the number of buffers per page to calculate
  * the number of pages to store in the RX page recycle ring.
@@ -95,7 +100,7 @@ void efx_rx_config_page_split(struct efx_nic *efx)
                                      EFX_RX_BUF_ALIGNMENT);
        efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
                ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
-                efx->rx_page_buf_step);
+               (efx->rx_page_buf_step + XDP_PACKET_HEADROOM));
        efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
                efx->rx_bufs_per_page;
        efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
@@ -185,6 +190,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
                page_offset = sizeof(struct efx_rx_page_state);
 
                do {
+                       page_offset += XDP_PACKET_HEADROOM;
+                       dma_addr += XDP_PACKET_HEADROOM;
+
                        index = rx_queue->added_count & rx_queue->ptr_mask;
                        rx_buf = efx_rx_buffer(rx_queue, index);
                        rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
@@ -635,6 +643,123 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
                netif_receive_skb(skb);
 }
 
+/** efx_do_xdp: perform XDP processing on a received packet
+ *
+ * Returns true if packet should still be delivered.
+ */
+static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
+                      struct efx_rx_buffer *rx_buf, u8 **ehp)
+{
+       u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
+       struct efx_rx_queue *rx_queue;
+       struct bpf_prog *xdp_prog;
+       struct xdp_frame *xdpf;
+       struct xdp_buff xdp;
+       u32 xdp_act;
+       s16 offset;
+       int err;
+
+       rcu_read_lock();
+       xdp_prog = rcu_dereference(efx->xdp_prog);
+       if (!xdp_prog) {
+               rcu_read_unlock();
+               return true;
+       }
+
+       rx_queue = efx_channel_get_rx_queue(channel);
+
+       if (unlikely(channel->rx_pkt_n_frags > 1)) {
+               /* We can't do XDP on fragmented packets - drop. */
+               rcu_read_unlock();
+               efx_free_rx_buffers(rx_queue, rx_buf,
+                                   channel->rx_pkt_n_frags);
+               if (net_ratelimit())
+                       netif_err(efx, rx_err, efx->net_dev,
+                                 "XDP is not possible with multiple receive fragments (%d)\n",
+                                 channel->rx_pkt_n_frags);
+               channel->n_rx_xdp_bad_drops++;
+               return false;
+       }
+
+       dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
+                               rx_buf->len, DMA_FROM_DEVICE);
+
+       /* Save the rx prefix. */
+       EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
+       memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
+              efx->rx_prefix_size);
+
+       xdp.data = *ehp;
+       xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+
+       /* No support yet for XDP metadata */
+       xdp_set_data_meta_invalid(&xdp);
+       xdp.data_end = xdp.data + rx_buf->len;
+       xdp.rxq = &rx_queue->xdp_rxq_info;
+
+       xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+       rcu_read_unlock();
+
+       offset = (u8 *)xdp.data - *ehp;
+
+       switch (xdp_act) {
+       case XDP_PASS:
+               /* Fix up rx prefix. */
+               if (offset) {
+                       *ehp += offset;
+                       rx_buf->page_offset += offset;
+                       rx_buf->len -= offset;
+                       memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
+                              efx->rx_prefix_size);
+               }
+               break;
+
+       case XDP_TX:
+               /* Buffer ownership passes to tx on success. */
+               xdpf = convert_to_xdp_frame(&xdp);
+               err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
+               if (unlikely(err != 1)) {
+                       efx_free_rx_buffers(rx_queue, rx_buf, 1);
+                       if (net_ratelimit())
+                               netif_err(efx, rx_err, efx->net_dev,
+                                         "XDP TX failed (%d)\n", err);
+                       channel->n_rx_xdp_bad_drops++;
+               } else {
+                       channel->n_rx_xdp_tx++;
+               }
+               break;
+
+       case XDP_REDIRECT:
+               err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
+               if (unlikely(err)) {
+                       efx_free_rx_buffers(rx_queue, rx_buf, 1);
+                       if (net_ratelimit())
+                               netif_err(efx, rx_err, efx->net_dev,
+                                         "XDP redirect failed (%d)\n", err);
+                       channel->n_rx_xdp_bad_drops++;
+               } else {
+                       channel->n_rx_xdp_redirect++;
+               }
+               break;
+
+       default:
+               bpf_warn_invalid_xdp_action(xdp_act);
+               efx_free_rx_buffers(rx_queue, rx_buf, 1);
+               channel->n_rx_xdp_bad_drops++;
+               break;
+
+       case XDP_ABORTED:
+               trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+               /* Fall through */
+       case XDP_DROP:
+               efx_free_rx_buffers(rx_queue, rx_buf, 1);
+               channel->n_rx_xdp_drops++;
+               break;
+       }
+
+       return xdp_act == XDP_PASS;
+}
+
 /* Handle a received packet.  Second half: Touches packet payload. */
 void __efx_rx_packet(struct efx_channel *channel)
 {
@@ -663,6 +788,9 @@ void __efx_rx_packet(struct efx_channel *channel)
                goto out;
        }
 
+       if (!efx_do_xdp(efx, channel, rx_buf, &eh))
+               goto out;
+
        if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
                rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
 
@@ -731,6 +859,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 {
        struct efx_nic *efx = rx_queue->efx;
        unsigned int max_fill, trigger, max_trigger;
+       int rc = 0;
 
        netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
                  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -764,6 +893,19 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
        rx_queue->fast_fill_trigger = trigger;
        rx_queue->refill_enabled = true;
 
+       /* Initialise XDP queue information */
+       rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
+                             rx_queue->core_index);
+
+       if (rc) {
+               netif_err(efx, rx_err, efx->net_dev,
+                         "Failure to initialise XDP queue information rc=%d\n",
+                         rc);
+               efx->xdp_rxq_info_failed = true;
+       } else {
+               rx_queue->xdp_rxq_info_valid = true;
+       }
+
        /* Set up RX descriptor ring */
        efx_nic_init_rx(rx_queue);
 }
@@ -805,6 +947,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
        }
        kfree(rx_queue->page_ring);
        rx_queue->page_ring = NULL;
+
+       if (rx_queue->xdp_rxq_info_valid)
+               xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
+
+       rx_queue->xdp_rxq_info_valid = false;
 }
 
 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
index 65e81ec..00c1c44 100644 (file)
@@ -95,6 +95,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
                           "TX queue %d transmission id %x complete\n",
                           tx_queue->queue, tx_queue->read_count);
+       } else if (buffer->flags & EFX_TX_BUF_XDP) {
+               xdp_return_frame_rx_napi(buffer->xdpf);
        }
 
        buffer->len = 0;
@@ -597,6 +599,94 @@ err:
        return NETDEV_TX_OK;
 }
 
+static void efx_xdp_return_frames(int n,  struct xdp_frame **xdpfs)
+{
+       int i;
+
+       for (i = 0; i < n; i++)
+               xdp_return_frame_rx_napi(xdpfs[i]);
+}
+
+/* Transmit a packet from an XDP buffer
+ *
+ * Returns number of packets sent on success, error code otherwise.
+ * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
+ * (for XDP redirect).
+ */
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+                      bool flush)
+{
+       struct efx_tx_buffer *tx_buffer;
+       struct efx_tx_queue *tx_queue;
+       struct xdp_frame *xdpf;
+       dma_addr_t dma_addr;
+       unsigned int len;
+       int space;
+       int cpu;
+       int i;
+
+       cpu = raw_smp_processor_id();
+
+       if (!efx->xdp_tx_queue_count ||
+           unlikely(cpu >= efx->xdp_tx_queue_count))
+               return -EINVAL;
+
+       tx_queue = efx->xdp_tx_queues[cpu];
+       if (unlikely(!tx_queue))
+               return -EINVAL;
+
+       if (unlikely(n && !xdpfs))
+               return -EINVAL;
+
+       if (!n)
+               return 0;
+
+       /* Check for available space. We should never need multiple
+        * descriptors per frame.
+        */
+       space = efx->txq_entries +
+               tx_queue->read_count - tx_queue->insert_count;
+
+       for (i = 0; i < n; i++) {
+               xdpf = xdpfs[i];
+
+               if (i >= space)
+                       break;
+
+               /* We'll want a descriptor for this tx. */
+               prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
+
+               len = xdpf->len;
+
+               /* Map for DMA. */
+               dma_addr = dma_map_single(&efx->pci_dev->dev,
+                                         xdpf->data, len,
+                                         DMA_TO_DEVICE);
+               if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
+                       break;
+
+               /*  Create descriptor and set up for unmapping DMA. */
+               tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+               tx_buffer->xdpf = xdpf;
+               tx_buffer->flags = EFX_TX_BUF_XDP |
+                                  EFX_TX_BUF_MAP_SINGLE;
+               tx_buffer->dma_offset = 0;
+               tx_buffer->unmap_len = len;
+               tx_queue->tx_packets++;
+       }
+
+       /* Pass mapped frames to hardware. */
+       if (flush && i > 0)
+               efx_nic_push_buffers(tx_queue);
+
+       if (i == 0)
+               return -EIO;
+
+       efx_xdp_return_frames(n - i, xdpfs + i);
+
+       return i;
+}
+
 /* Remove packets from the TX queue
  *
  * This removes packets from the TX queue, up to and including the
@@ -857,6 +947,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
        tx_queue->completed_timestamp_major = 0;
        tx_queue->completed_timestamp_minor = 0;
 
+       tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
+
        /* Set up default function pointers. These may get replaced by
         * efx_nic_init_tx() based off NIC/queue capabilities.
         */
index 28705db..654a2b7 100644 (file)
@@ -2995,6 +2995,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        } else {
                stmmac_set_desc_addr(priv, first, des);
                tmp_pay_len = pay_len;
+               des += proto_hdr_len;
        }
 
        stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
index bbbc1dc..b517c1a 100644 (file)
@@ -1237,8 +1237,17 @@ static int fjes_probe(struct platform_device *plat_dev)
        adapter->open_guard = false;
 
        adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+       if (unlikely(!adapter->txrx_wq)) {
+               err = -ENOMEM;
+               goto err_free_netdev;
+       }
+
        adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
                                              WQ_MEM_RECLAIM, 0);
+       if (unlikely(!adapter->control_wq)) {
+               err = -ENOMEM;
+               goto err_free_txrx_wq;
+       }
 
        INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
        INIT_WORK(&adapter->raise_intr_rxdata_task,
@@ -1255,7 +1264,7 @@ static int fjes_probe(struct platform_device *plat_dev)
        hw->hw_res.irq = platform_get_irq(plat_dev, 0);
        err = fjes_hw_init(&adapter->hw);
        if (err)
-               goto err_free_netdev;
+               goto err_free_control_wq;
 
        /* setup MAC address (02:00:00:00:00:[epid])*/
        netdev->dev_addr[0] = 2;
@@ -1277,6 +1286,10 @@ static int fjes_probe(struct platform_device *plat_dev)
 
 err_hw_exit:
        fjes_hw_exit(&adapter->hw);
+err_free_control_wq:
+       destroy_workqueue(adapter->control_wq);
+err_free_txrx_wq:
+       destroy_workqueue(adapter->txrx_wq);
 err_free_netdev:
        free_netdev(netdev);
 err_out:
index fbec711..fbea6f2 100644 (file)
@@ -107,27 +107,6 @@ struct bpqdev {
 
 static LIST_HEAD(bpq_devices);
 
-/*
- * bpqether network devices are paired with ethernet devices below them, so
- * form a special "super class" of normal ethernet devices; split their locks
- * off into a separate class since they always nest.
- */
-static struct lock_class_key bpq_netdev_xmit_lock_key;
-static struct lock_class_key bpq_netdev_addr_lock_key;
-
-static void bpq_set_lockdep_class_one(struct net_device *dev,
-                                     struct netdev_queue *txq,
-                                     void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
-}
-
-static void bpq_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
-}
-
 /* ------------------------------------------------------------------------ */
 
 
@@ -498,7 +477,6 @@ static int bpq_new_device(struct net_device *edev)
        err = register_netdevice(ndev);
        if (err)
                goto error;
-       bpq_set_lockdep_class(ndev);
 
        /* List protected by RTNL */
        list_add_rcu(&bpq->bpq_list, &bpq_devices);
index 39dddcd..963509a 100644 (file)
@@ -982,7 +982,7 @@ static int netvsc_attach(struct net_device *ndev,
        if (netif_running(ndev)) {
                ret = rndis_filter_open(nvdev);
                if (ret)
-                       return ret;
+                       goto err;
 
                rdev = nvdev->extension;
                if (!rdev->link_state)
@@ -990,6 +990,13 @@ static int netvsc_attach(struct net_device *ndev,
        }
 
        return 0;
+
+err:
+       netif_device_detach(ndev);
+
+       rndis_filter_device_remove(hdev, nvdev);
+
+       return ret;
 }
 
 static int netvsc_set_channels(struct net_device *net,
@@ -1807,8 +1814,10 @@ static int netvsc_set_features(struct net_device *ndev,
 
        ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
 
-       if (ret)
+       if (ret) {
                features ^= NETIF_F_LRO;
+               ndev->features = features;
+       }
 
 syncvf:
        if (!vf_netdev)
@@ -2335,8 +2344,6 @@ static int netvsc_probe(struct hv_device *dev,
                NETIF_F_HW_VLAN_CTAG_RX;
        net->vlan_features = net->features;
 
-       netdev_lockdep_set_classes(net);
-
        /* MTU range: 68 - 1500 or 65521 */
        net->min_mtu = NETVSC_MTU_MIN;
        if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
index b0ac557..a706622 100644 (file)
@@ -131,8 +131,6 @@ static int ipvlan_init(struct net_device *dev)
        dev->gso_max_segs = phy_dev->gso_max_segs;
        dev->hard_header_len = phy_dev->hard_header_len;
 
-       netdev_lockdep_set_classes(dev);
-
        ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
        if (!ipvlan->pcpu_stats)
                return -ENOMEM;
index cb76373..afd8b2a 100644 (file)
@@ -267,7 +267,6 @@ struct macsec_dev {
        struct pcpu_secy_stats __percpu *stats;
        struct list_head secys;
        struct gro_cells gro_cells;
-       unsigned int nest_level;
 };
 
 /**
@@ -2750,7 +2749,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
 
 #define MACSEC_FEATURES \
        (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
-static struct lock_class_key macsec_netdev_addr_lock_key;
 
 static int macsec_dev_init(struct net_device *dev)
 {
@@ -2958,11 +2956,6 @@ static int macsec_get_iflink(const struct net_device *dev)
        return macsec_priv(dev)->real_dev->ifindex;
 }
 
-static int macsec_get_nest_level(struct net_device *dev)
-{
-       return macsec_priv(dev)->nest_level;
-}
-
 static const struct net_device_ops macsec_netdev_ops = {
        .ndo_init               = macsec_dev_init,
        .ndo_uninit             = macsec_dev_uninit,
@@ -2976,7 +2969,6 @@ static const struct net_device_ops macsec_netdev_ops = {
        .ndo_start_xmit         = macsec_start_xmit,
        .ndo_get_stats64        = macsec_get_stats64,
        .ndo_get_iflink         = macsec_get_iflink,
-       .ndo_get_lock_subclass  = macsec_get_nest_level,
 };
 
 static const struct device_type macsec_type = {
@@ -3001,12 +2993,10 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
 static void macsec_free_netdev(struct net_device *dev)
 {
        struct macsec_dev *macsec = macsec_priv(dev);
-       struct net_device *real_dev = macsec->real_dev;
 
        free_percpu(macsec->stats);
        free_percpu(macsec->secy.tx_sc.stats);
 
-       dev_put(real_dev);
 }
 
 static void macsec_setup(struct net_device *dev)
@@ -3261,14 +3251,6 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
        if (err < 0)
                return err;
 
-       dev_hold(real_dev);
-
-       macsec->nest_level = dev_get_nest_level(real_dev) + 1;
-       netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macsec_netdev_addr_lock_key,
-                                      macsec_get_nest_level(dev));
-
        err = netdev_upper_dev_link(real_dev, dev, extack);
        if (err < 0)
                goto unregister;
index 940192c..34fc59b 100644 (file)
@@ -852,8 +852,6 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  * "super class" of normal network devices; split their locks off into a
  * separate class since they always nest.
  */
-static struct lock_class_key macvlan_netdev_addr_lock_key;
-
 #define ALWAYS_ON_OFFLOADS \
        (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
         NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
@@ -869,19 +867,6 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 #define MACVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
-static int macvlan_get_nest_level(struct net_device *dev)
-{
-       return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
-}
-
-static void macvlan_set_lockdep_class(struct net_device *dev)
-{
-       netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macvlan_netdev_addr_lock_key,
-                                      macvlan_get_nest_level(dev));
-}
-
 static int macvlan_init(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
@@ -900,8 +885,6 @@ static int macvlan_init(struct net_device *dev)
        dev->gso_max_segs       = lowerdev->gso_max_segs;
        dev->hard_header_len    = lowerdev->hard_header_len;
 
-       macvlan_set_lockdep_class(dev);
-
        vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan->pcpu_stats)
                return -ENOMEM;
@@ -1161,7 +1144,6 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_fdb_add            = macvlan_fdb_add,
        .ndo_fdb_del            = macvlan_fdb_del,
        .ndo_fdb_dump           = ndo_dflt_fdb_dump,
-       .ndo_get_lock_subclass  = macvlan_get_nest_level,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = macvlan_dev_poll_controller,
        .ndo_netpoll_setup      = macvlan_dev_netpoll_setup,
@@ -1445,7 +1427,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        vlan->dev      = dev;
        vlan->port     = port;
        vlan->set_features = MACVLAN_FEATURES;
-       vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
 
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
index 468e157..e59a882 100644 (file)
@@ -681,9 +681,11 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
 {
        struct nsim_dev_port *nsim_dev_port, *tmp;
 
+       mutex_lock(&nsim_dev->port_list_lock);
        list_for_each_entry_safe(nsim_dev_port, tmp,
                                 &nsim_dev->port_list, list)
                __nsim_dev_port_del(nsim_dev_port);
+       mutex_unlock(&nsim_dev->port_list_lock);
 }
 
 static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
@@ -874,13 +876,28 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
 int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
 {
        struct nsim_dev *nsim_dev;
+       int i;
+       int err;
 
        nsim_dev = nsim_dev_create(nsim_bus_dev);
        if (IS_ERR(nsim_dev))
                return PTR_ERR(nsim_dev);
        dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
 
+       mutex_lock(&nsim_dev->port_list_lock);
+       for (i = 0; i < nsim_bus_dev->port_count; i++) {
+               err = __nsim_dev_port_add(nsim_dev, i);
+               if (err)
+                       goto err_port_del_all;
+       }
+       mutex_unlock(&nsim_dev->port_list_lock);
        return 0;
+
+err_port_del_all:
+       mutex_unlock(&nsim_dev->port_list_lock);
+       nsim_dev_port_del_all(nsim_dev);
+       nsim_dev_destroy(nsim_dev);
+       return err;
 }
 
 void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
index 5816a06..0b95e7a 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/of.h>
 #include <linux/phy.h>
 #include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 
 #include <dt-bindings/net/ti-dp83867.h>
 
@@ -21,8 +23,9 @@
 #define MII_DP83867_PHYCTRL    0x10
 #define MII_DP83867_MICR       0x12
 #define MII_DP83867_ISR                0x13
-#define DP83867_CTRL           0x1f
+#define DP83867_CFG2           0x14
 #define DP83867_CFG3           0x1e
+#define DP83867_CTRL           0x1f
 
 /* Extended Registers */
 #define DP83867_CFG4            0x0031
 #define DP83867_STRAP_STS1     0x006E
 #define DP83867_STRAP_STS2     0x006f
 #define DP83867_RGMIIDCTL      0x0086
+#define DP83867_RXFCFG         0x0134
+#define DP83867_RXFPMD1        0x0136
+#define DP83867_RXFPMD2        0x0137
+#define DP83867_RXFPMD3        0x0138
+#define DP83867_RXFSOP1        0x0139
+#define DP83867_RXFSOP2        0x013A
+#define DP83867_RXFSOP3        0x013B
 #define DP83867_IO_MUX_CFG     0x0170
 #define DP83867_SGMIICTL       0x00D3
 #define DP83867_10M_SGMII_CFG   0x016F
 /* SGMIICTL bits */
 #define DP83867_SGMII_TYPE             BIT(14)
 
+/* RXFCFG bits*/
+#define DP83867_WOL_MAGIC_EN           BIT(0)
+#define DP83867_WOL_BCAST_EN           BIT(2)
+#define DP83867_WOL_UCAST_EN           BIT(4)
+#define DP83867_WOL_SEC_EN             BIT(5)
+#define DP83867_WOL_ENH_MAC            BIT(7)
+
 /* STRAP_STS1 bits */
 #define DP83867_STRAP_STS1_RESERVED            BIT(11)
 
@@ -130,6 +147,115 @@ static int dp83867_ack_interrupt(struct phy_device *phydev)
        return 0;
 }
 
+static int dp83867_set_wol(struct phy_device *phydev,
+                          struct ethtool_wolinfo *wol)
+{
+       struct net_device *ndev = phydev->attached_dev;
+       u16 val_rxcfg, val_micr;
+       u8 *mac;
+
+       val_rxcfg = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG);
+       val_micr = phy_read(phydev, MII_DP83867_MICR);
+
+       if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_UCAST |
+                           WAKE_BCAST)) {
+               val_rxcfg |= DP83867_WOL_ENH_MAC;
+               val_micr |= MII_DP83867_MICR_WOL_INT_EN;
+
+               if (wol->wolopts & WAKE_MAGIC) {
+                       mac = (u8 *)ndev->dev_addr;
+
+                       if (!is_valid_ether_addr(mac))
+                               return -EINVAL;
+
+                       phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD1,
+                                     (mac[1] << 8 | mac[0]));
+                       phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD2,
+                                     (mac[3] << 8 | mac[2]));
+                       phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD3,
+                                     (mac[5] << 8 | mac[4]));
+
+                       val_rxcfg |= DP83867_WOL_MAGIC_EN;
+               } else {
+                       val_rxcfg &= ~DP83867_WOL_MAGIC_EN;
+               }
+
+               if (wol->wolopts & WAKE_MAGICSECURE) {
+                       phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+                                     (wol->sopass[1] << 8) | wol->sopass[0]);
+                       phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+                                     (wol->sopass[3] << 8) | wol->sopass[2]);
+                       phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+                                     (wol->sopass[5] << 8) | wol->sopass[4]);
+
+                       val_rxcfg |= DP83867_WOL_SEC_EN;
+               } else {
+                       val_rxcfg &= ~DP83867_WOL_SEC_EN;
+               }
+
+               if (wol->wolopts & WAKE_UCAST)
+                       val_rxcfg |= DP83867_WOL_UCAST_EN;
+               else
+                       val_rxcfg &= ~DP83867_WOL_UCAST_EN;
+
+               if (wol->wolopts & WAKE_BCAST)
+                       val_rxcfg |= DP83867_WOL_BCAST_EN;
+               else
+                       val_rxcfg &= ~DP83867_WOL_BCAST_EN;
+       } else {
+               val_rxcfg &= ~DP83867_WOL_ENH_MAC;
+               val_micr &= ~MII_DP83867_MICR_WOL_INT_EN;
+       }
+
+       phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG, val_rxcfg);
+       phy_write(phydev, MII_DP83867_MICR, val_micr);
+
+       return 0;
+}
+
+static void dp83867_get_wol(struct phy_device *phydev,
+                           struct ethtool_wolinfo *wol)
+{
+       u16 value, sopass_val;
+
+       wol->supported = (WAKE_UCAST | WAKE_BCAST | WAKE_MAGIC |
+                       WAKE_MAGICSECURE);
+       wol->wolopts = 0;
+
+       value = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG);
+
+       if (value & DP83867_WOL_UCAST_EN)
+               wol->wolopts |= WAKE_UCAST;
+
+       if (value & DP83867_WOL_BCAST_EN)
+               wol->wolopts |= WAKE_BCAST;
+
+       if (value & DP83867_WOL_MAGIC_EN)
+               wol->wolopts |= WAKE_MAGIC;
+
+       if (value & DP83867_WOL_SEC_EN) {
+               sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+                                         DP83867_RXFSOP1);
+               wol->sopass[0] = (sopass_val & 0xff);
+               wol->sopass[1] = (sopass_val >> 8);
+
+               sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+                                         DP83867_RXFSOP2);
+               wol->sopass[2] = (sopass_val & 0xff);
+               wol->sopass[3] = (sopass_val >> 8);
+
+               sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+                                         DP83867_RXFSOP3);
+               wol->sopass[4] = (sopass_val & 0xff);
+               wol->sopass[5] = (sopass_val >> 8);
+
+               wol->wolopts |= WAKE_MAGICSECURE;
+       }
+
+       if (!(value & DP83867_WOL_ENH_MAC))
+               wol->wolopts = 0;
+}
+
 static int dp83867_config_intr(struct phy_device *phydev)
 {
        int micr_status;
@@ -464,6 +590,9 @@ static struct phy_driver dp83867_driver[] = {
                .config_init    = dp83867_config_init,
                .soft_reset     = dp83867_phy_reset,
 
+               .get_wol        = dp83867_get_wol,
+               .set_wol        = dp83867_set_wol,
+
                /* IRQ related */
                .ack_interrupt  = dp83867_ack_interrupt,
                .config_intr    = dp83867_config_intr,
index 0a814fd..b1fbd19 100644 (file)
@@ -53,7 +53,7 @@
 
 #define MII_M1011_PHY_SCR                      0x10
 #define MII_M1011_PHY_SCR_DOWNSHIFT_EN         BIT(11)
-#define MII_M1011_PHY_SRC_DOWNSHIFT_MASK       GENMASK(14, 12)
+#define MII_M1011_PHY_SCR_DOWNSHIFT_MASK       GENMASK(14, 12)
 #define MII_M1011_PHY_SCR_DOWNSHIFT_MAX                8
 #define MII_M1011_PHY_SCR_MDI                  (0x0 << 5)
 #define MII_M1011_PHY_SCR_MDI_X                        (0x1 << 5)
@@ -66,6 +66,9 @@
 #define MII_M1111_PHY_LED_DIRECT       0x4100
 #define MII_M1111_PHY_LED_COMBINE      0x411c
 #define MII_M1111_PHY_EXT_CR           0x14
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK    GENMASK(11, 9)
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX     8
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN      BIT(8)
 #define MII_M1111_RGMII_RX_DELAY       BIT(7)
 #define MII_M1111_RGMII_TX_DELAY       BIT(1)
 #define MII_M1111_PHY_EXT_SR           0x1b
@@ -788,19 +791,77 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
 {
        int val, cnt, enable;
 
+       val = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+       if (val < 0)
+               return val;
+
+       enable = FIELD_GET(MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN, val);
+       cnt = FIELD_GET(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, val) + 1;
+
+       *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+       return 0;
+}
+
+static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+       int val;
+
+       if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
+               return -E2BIG;
+
+       if (!cnt)
+               return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
+                                     MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
+
+       val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
+       val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
+
+       return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+                         MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
+                         MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
+                         val);
+}
+
+static int m88e1111_get_tunable(struct phy_device *phydev,
+                               struct ethtool_tunable *tuna, void *data)
+{
+       switch (tuna->id) {
+       case ETHTOOL_PHY_DOWNSHIFT:
+               return m88e1111_get_downshift(phydev, data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int m88e1111_set_tunable(struct phy_device *phydev,
+                               struct ethtool_tunable *tuna, const void *data)
+{
+       switch (tuna->id) {
+       case ETHTOOL_PHY_DOWNSHIFT:
+               return m88e1111_set_downshift(phydev, *(const u8 *)data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
+{
+       int val, cnt, enable;
+
        val = phy_read(phydev, MII_M1011_PHY_SCR);
        if (val < 0)
                return val;
 
        enable = FIELD_GET(MII_M1011_PHY_SCR_DOWNSHIFT_EN, val);
-       cnt = FIELD_GET(MII_M1011_PHY_SRC_DOWNSHIFT_MASK, val) + 1;
+       cnt = FIELD_GET(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, val) + 1;
 
        *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
 
        return 0;
 }
 
-static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
+static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
 {
        int val;
 
@@ -812,37 +873,37 @@ static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
                                      MII_M1011_PHY_SCR_DOWNSHIFT_EN);
 
        val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
-       val |= FIELD_PREP(MII_M1011_PHY_SRC_DOWNSHIFT_MASK, cnt - 1);
+       val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
 
        return phy_modify(phydev, MII_M1011_PHY_SCR,
                          MII_M1011_PHY_SCR_DOWNSHIFT_EN |
-                         MII_M1011_PHY_SRC_DOWNSHIFT_MASK,
+                         MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
                          val);
 }
 
-static int m88e1111_get_tunable(struct phy_device *phydev,
+static int m88e1011_get_tunable(struct phy_device *phydev,
                                struct ethtool_tunable *tuna, void *data)
 {
        switch (tuna->id) {
        case ETHTOOL_PHY_DOWNSHIFT:
-               return m88e1111_get_downshift(phydev, data);
+               return m88e1011_get_downshift(phydev, data);
        default:
                return -EOPNOTSUPP;
        }
 }
 
-static int m88e1111_set_tunable(struct phy_device *phydev,
+static int m88e1011_set_tunable(struct phy_device *phydev,
                                struct ethtool_tunable *tuna, const void *data)
 {
        switch (tuna->id) {
        case ETHTOOL_PHY_DOWNSHIFT:
-               return m88e1111_set_downshift(phydev, *(const u8 *)data);
+               return m88e1011_set_downshift(phydev, *(const u8 *)data);
        default:
                return -EOPNOTSUPP;
        }
 }
 
-static void m88e1111_link_change_notify(struct phy_device *phydev)
+static void m88e1011_link_change_notify(struct phy_device *phydev)
 {
        int status;
 
@@ -875,7 +936,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       err = m88e1111_set_downshift(phydev, 8);
+       err = m88e1011_set_downshift(phydev, 8);
        if (err < 0)
                return err;
 
@@ -1177,7 +1238,7 @@ static int m88e1540_get_tunable(struct phy_device *phydev,
        case ETHTOOL_PHY_FAST_LINK_DOWN:
                return m88e1540_get_fld(phydev, data);
        case ETHTOOL_PHY_DOWNSHIFT:
-               return m88e1111_get_downshift(phydev, data);
+               return m88e1011_get_downshift(phydev, data);
        default:
                return -EOPNOTSUPP;
        }
@@ -1190,7 +1251,7 @@ static int m88e1540_set_tunable(struct phy_device *phydev,
        case ETHTOOL_PHY_FAST_LINK_DOWN:
                return m88e1540_set_fld(phydev, data);
        case ETHTOOL_PHY_DOWNSHIFT:
-               return m88e1111_set_downshift(phydev, *(const u8 *)data);
+               return m88e1011_set_downshift(phydev, *(const u8 *)data);
        default:
                return -EOPNOTSUPP;
        }
@@ -2226,6 +2287,9 @@ static struct phy_driver marvell_drivers[] = {
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
+               .get_tunable = m88e1011_get_tunable,
+               .set_tunable = m88e1011_set_tunable,
+               .link_change_notify = m88e1011_link_change_notify,
        },
        {
                .phy_id = MARVELL_PHY_ID_88E1111,
@@ -2245,6 +2309,9 @@ static struct phy_driver marvell_drivers[] = {
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
+               .get_tunable = m88e1111_get_tunable,
+               .set_tunable = m88e1111_set_tunable,
+               .link_change_notify = m88e1011_link_change_notify,
        },
        {
                .phy_id = MARVELL_PHY_ID_88E1118,
@@ -2283,9 +2350,9 @@ static struct phy_driver marvell_drivers[] = {
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
-               .get_tunable = m88e1111_get_tunable,
-               .set_tunable = m88e1111_set_tunable,
-               .link_change_notify = m88e1111_link_change_notify,
+               .get_tunable = m88e1011_get_tunable,
+               .set_tunable = m88e1011_set_tunable,
+               .link_change_notify = m88e1011_link_change_notify,
        },
        {
                .phy_id = MARVELL_PHY_ID_88E1318S,
@@ -2327,6 +2394,9 @@ static struct phy_driver marvell_drivers[] = {
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
+               .get_tunable = m88e1111_get_tunable,
+               .set_tunable = m88e1111_set_tunable,
+               .link_change_notify = m88e1011_link_change_notify,
        },
        {
                .phy_id = MARVELL_PHY_ID_88E1149R,
@@ -2380,6 +2450,9 @@ static struct phy_driver marvell_drivers[] = {
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
+               .get_tunable = m88e1011_get_tunable,
+               .set_tunable = m88e1011_set_tunable,
+               .link_change_notify = m88e1011_link_change_notify,
        },
        {
                .phy_id = MARVELL_PHY_ID_88E1510,
@@ -2403,6 +2476,9 @@ static struct phy_driver marvell_drivers[] = {
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
                .set_loopback = genphy_loopback,
+               .get_tunable = m88e1011_get_tunable,
+               .set_tunable = m88e1011_set_tunable,
+               .link_change_notify = m88e1011_link_change_notify,
        },
        {
                .phy_id = MARVELL_PHY_ID_88E1540,
@@ -2425,7 +2501,7 @@ static struct phy_driver marvell_drivers[] = {
                .get_stats = marvell_get_stats,
                .get_tunable = m88e1540_get_tunable,
                .set_tunable = m88e1540_set_tunable,
-               .link_change_notify = m88e1111_link_change_notify,
+               .link_change_notify = m88e1011_link_change_notify,
        },
        {
                .phy_id = MARVELL_PHY_ID_88E1545,
@@ -2446,6 +2522,9 @@ static struct phy_driver marvell_drivers[] = {
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
+               .get_tunable = m88e1540_get_tunable,
+               .set_tunable = m88e1540_set_tunable,
+               .link_change_notify = m88e1011_link_change_notify,
        },
        {
                .phy_id = MARVELL_PHY_ID_88E3016,
@@ -2488,7 +2567,7 @@ static struct phy_driver marvell_drivers[] = {
                .get_stats = marvell_get_stats,
                .get_tunable = m88e1540_get_tunable,
                .set_tunable = m88e1540_set_tunable,
-               .link_change_notify = m88e1111_link_change_notify,
+               .link_change_notify = m88e1011_link_change_notify,
        },
 };
 
index be7a2c0..f16d9e9 100644 (file)
@@ -87,8 +87,24 @@ struct phylink {
        phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__)
 #define phylink_info(pl, fmt, ...) \
        phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__)
+#if defined(CONFIG_DYNAMIC_DEBUG)
 #define phylink_dbg(pl, fmt, ...) \
+do {                                                                   \
+       if ((pl)->config->type == PHYLINK_NETDEV)                       \
+               netdev_dbg((pl)->netdev, fmt, ##__VA_ARGS__);           \
+       else if ((pl)->config->type == PHYLINK_DEV)                     \
+               dev_dbg((pl)->dev, fmt, ##__VA_ARGS__);                 \
+} while (0)
+#elif defined(DEBUG)
+#define phylink_dbg(pl, fmt, ...)                                      \
        phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__)
+#else
+#define phylink_dbg(pl, fmt, ...)                                      \
+({                                                                     \
+       if (0)                                                          \
+               phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__);     \
+})
+#endif
 
 /**
  * phylink_set_port_modes() - set the port type modes in the ethtool mask
index dc3d92d..b732982 100644 (file)
@@ -327,6 +327,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .name           = "SMSC LAN8740",
 
        /* PHY_BASIC_FEATURES */
+       .flags          = PHY_RST_AFTER_CLK_EN,
 
        .probe          = smsc_phy_probe,
 
index 9a1b006..61824bb 100644 (file)
@@ -1324,8 +1324,6 @@ static int ppp_dev_init(struct net_device *dev)
 {
        struct ppp *ppp;
 
-       netdev_lockdep_set_classes(dev);
-
        ppp = netdev_priv(dev);
        /* Let the netdevice take a reference on the ppp file. This ensures
         * that ppp_destroy_interface() won't run before the device gets
index cb1d5fe..ca70a1d 100644 (file)
@@ -1615,7 +1615,6 @@ static int team_init(struct net_device *dev)
        int err;
 
        team->dev = dev;
-       mutex_init(&team->lock);
        team_set_no_mode(team);
 
        team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
@@ -1642,7 +1641,8 @@ static int team_init(struct net_device *dev)
                goto err_options_register;
        netif_carrier_off(dev);
 
-       netdev_lockdep_set_classes(dev);
+       lockdep_register_key(&team->team_lock_key);
+       __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
 
        return 0;
 
@@ -1673,6 +1673,7 @@ static void team_uninit(struct net_device *dev)
        team_queue_override_fini(team);
        mutex_unlock(&team->lock);
        netdev_change_features(dev);
+       lockdep_unregister_key(&team->team_lock_key);
 }
 
 static void team_destructor(struct net_device *dev)
@@ -1976,8 +1977,15 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
        err = team_port_del(team, port_dev);
        mutex_unlock(&team->lock);
 
-       if (!err)
-               netdev_change_features(dev);
+       if (err)
+               return err;
+
+       if (netif_is_team_master(port_dev)) {
+               lockdep_unregister_key(&team->team_lock_key);
+               lockdep_register_key(&team->team_lock_key);
+               lockdep_set_class(&team->lock, &team->team_lock_key);
+       }
+       netdev_change_features(dev);
 
        return err;
 }
index 32f53de..fe63043 100644 (file)
@@ -787,6 +787,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
 {
        USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
index f8c0818..cf1f3f0 100644 (file)
@@ -1264,8 +1264,11 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
                netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
                lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
 
-               if (dev->domain_data.phyirq > 0)
+               if (dev->domain_data.phyirq > 0) {
+                       local_irq_disable();
                        generic_handle_irq(dev->domain_data.phyirq);
+                       local_irq_enable();
+               }
        } else
                netdev_warn(dev->net,
                            "unexpected interrupt: 0x%08x\n", intdata);
index 283b35a..b9f526e 100644 (file)
@@ -6758,6 +6758,7 @@ static const struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x720c)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7214)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0xa387)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
        {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
        {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK,  0x0601)},
index ee52bde..b8228f5 100644 (file)
@@ -865,7 +865,6 @@ static int vrf_dev_init(struct net_device *dev)
 
        /* similarly, oper state is irrelevant; set to up to avoid confusion */
        dev->operstate = IF_OPER_UP;
-       netdev_lockdep_set_classes(dev);
        return 0;
 
 out_rth:
index 3d9bcc9..11f5776 100644 (file)
@@ -793,8 +793,7 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
        return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
 }
 
-static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
-                                        const u8 *mac, __u16 state,
+static struct vxlan_fdb *vxlan_fdb_alloc(const u8 *mac, __u16 state,
                                         __be32 src_vni, __u16 ndm_flags)
 {
        struct vxlan_fdb *f;
@@ -835,7 +834,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                return -ENOSPC;
 
        netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
-       f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
+       f = vxlan_fdb_alloc(mac, state, src_vni, ndm_flags);
        if (!f)
                return -ENOMEM;
 
@@ -2487,9 +2486,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                vni = tunnel_id_to_key32(info->key.tun_id);
                ifindex = 0;
                dst_cache = &info->dst_cache;
-               if (info->options_len &&
-                   info->key.tun_flags & TUNNEL_VXLAN_OPT)
+               if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+                       if (info->options_len < sizeof(*md))
+                               goto drop;
                        md = ip_tunnel_info_opts(info);
+               }
                ttl = info->key.ttl;
                tos = info->key.tos;
                label = info->key.label;
@@ -3566,10 +3567,13 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct net_device *remote_dev = NULL;
        struct vxlan_fdb *f = NULL;
        bool unregister = false;
+       struct vxlan_rdst *dst;
        int err;
 
+       dst = &vxlan->default_dst;
        err = vxlan_dev_configure(net, dev, conf, false, extack);
        if (err)
                return err;
@@ -3577,14 +3581,14 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
        dev->ethtool_ops = &vxlan_ethtool_ops;
 
        /* create an fdb entry for a valid default destination */
-       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+       if (!vxlan_addr_any(&dst->remote_ip)) {
                err = vxlan_fdb_create(vxlan, all_zeros_mac,
-                                      &vxlan->default_dst.remote_ip,
+                                      &dst->remote_ip,
                                       NUD_REACHABLE | NUD_PERMANENT,
                                       vxlan->cfg.dst_port,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_ifindex,
+                                      dst->remote_vni,
+                                      dst->remote_vni,
+                                      dst->remote_ifindex,
                                       NTF_SELF, &f);
                if (err)
                        return err;
@@ -3595,26 +3599,41 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
                goto errout;
        unregister = true;
 
+       if (dst->remote_ifindex) {
+               remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
+               if (!remote_dev)
+                       goto errout;
+
+               err = netdev_upper_dev_link(remote_dev, dev, extack);
+               if (err)
+                       goto errout;
+       }
+
        err = rtnl_configure_link(dev, NULL);
        if (err)
-               goto errout;
+               goto unlink;
 
        if (f) {
-               vxlan_fdb_insert(vxlan, all_zeros_mac,
-                                vxlan->default_dst.remote_vni, f);
+               vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
 
                /* notify default fdb entry */
                err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
                                       RTM_NEWNEIGH, true, extack);
                if (err) {
                        vxlan_fdb_destroy(vxlan, f, false, false);
+                       if (remote_dev)
+                               netdev_upper_dev_unlink(remote_dev, dev);
                        goto unregister;
                }
        }
 
        list_add(&vxlan->next, &vn->vxlan_list);
+       if (remote_dev)
+               dst->remote_dev = remote_dev;
        return 0;
-
+unlink:
+       if (remote_dev)
+               netdev_upper_dev_unlink(remote_dev, dev);
 errout:
        /* unregister_netdevice() destroys the default FDB entry with deletion
         * notification. But the addition notification was not sent yet, so
@@ -3932,11 +3951,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                            struct netlink_ext_ack *extack)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_rdst *dst = &vxlan->default_dst;
        struct net_device *lowerdev;
        struct vxlan_config conf;
+       struct vxlan_rdst *dst;
        int err;
 
+       dst = &vxlan->default_dst;
        err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
        if (err)
                return err;
@@ -3946,6 +3966,14 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        if (err)
                return err;
 
+       if (dst->remote_dev == lowerdev)
+               lowerdev = NULL;
+
+       err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev,
+                                            extack);
+       if (err)
+               return err;
+
        /* handle default dst entry */
        if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
                u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
@@ -3962,6 +3990,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                                               NTF_SELF, true, extack);
                        if (err) {
                                spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+                               netdev_adjacent_change_abort(dst->remote_dev,
+                                                            lowerdev, dev);
                                return err;
                        }
                }
@@ -3979,6 +4009,11 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        if (conf.age_interval != vxlan->cfg.age_interval)
                mod_timer(&vxlan->age_timer, jiffies);
 
+       netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
+       if (lowerdev && lowerdev != dst->remote_dev) {
+               dst->remote_dev = lowerdev;
+               netdev_update_lockdep_key(lowerdev);
+       }
        vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
        return 0;
 }
@@ -3991,6 +4026,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 
        list_del(&vxlan->next);
        unregister_netdevice_queue(dev, head);
+       if (vxlan->default_dst.remote_dev)
+               netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev);
 }
 
 static size_t vxlan_get_size(const struct net_device *dev)
index 73f5892..1c640b4 100644 (file)
@@ -26,7 +26,7 @@ int debugfs_netdev_queue_stopped_get(void *data, u64 *val)
        *val = netif_queue_stopped(i2400m->wimax_dev.net_dev);
        return 0;
 }
-DEFINE_SIMPLE_ATTRIBUTE(fops_netdev_queue_stopped,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_netdev_queue_stopped,
                        debugfs_netdev_queue_stopped_get,
                        NULL, "%llu\n");
 
@@ -154,7 +154,7 @@ int debugfs_i2400m_suspend_set(void *data, u64 val)
                result = 0;
        return result;
 }
-DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_suspend,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_suspend,
                        NULL, debugfs_i2400m_suspend_set,
                        "%llu\n");
 
@@ -183,7 +183,7 @@ int debugfs_i2400m_reset_set(void *data, u64 val)
        }
        return result;
 }
-DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_reset,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_reset,
                        NULL, debugfs_i2400m_reset_set,
                        "%llu\n");
 
index 8efb493..5c79f05 100644 (file)
@@ -127,12 +127,12 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
                        "%d\n", result);
        result = 0;
 error_cmd:
-       kfree(cmd);
        kfree_skb(ack_skb);
 error_msg_to_dev:
 error_alloc:
        d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
                wimax_dev, state, result);
+       kfree(cmd);
        return result;
 }
 
index 6953f90..9659f9e 100644 (file)
@@ -511,7 +511,7 @@ error_alloc_netdev:
 
 
 /*
- * Disconect a i2400m from the system.
+ * Disconnect a i2400m from the system.
  *
  * i2400m_stop() has been called before, so al the rx and tx contexts
  * have been taken down already. Make sure the queue is stopped,
index 2fe12b0..42f00a2 100644 (file)
@@ -1037,7 +1037,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
        }
 
        /*
-        * Configire PCIE after Ini init. SERDES values now come from ini file
+        * Configure PCIE after Ini init. SERDES values now come from ini file
         * This enables PCIe low power mode.
         */
        array = power_off ? &ah->iniPcieSerdes :
index 39c6485..c0750ce 100644 (file)
@@ -520,7 +520,7 @@ struct iwl_scan_dwell {
 } __packed;
 
 /**
- * struct iwl_scan_config
+ * struct iwl_scan_config_v1
  * @flags:                     enum scan_config_flags
  * @tx_chains:                 valid_tx antenna - ANT_* definitions
  * @rx_chains:                 valid_rx antenna - ANT_* definitions
@@ -552,7 +552,7 @@ struct iwl_scan_config_v1 {
 #define SCAN_LB_LMAC_IDX 0
 #define SCAN_HB_LMAC_IDX 1
 
-struct iwl_scan_config {
+struct iwl_scan_config_v2 {
        __le32 flags;
        __le32 tx_chains;
        __le32 rx_chains;
@@ -564,6 +564,24 @@ struct iwl_scan_config {
        u8 bcast_sta_id;
        u8 channel_flags;
        u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
+
+/**
+ * struct iwl_scan_config
+ * @enable_cam_mode: whether to enable CAM mode.
+ * @enable_promiscouos_mode: whether to enable promiscouos mode
+ * @bcast_sta_id: the index of the station in the fw
+ * @reserved: reserved
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ */
+struct iwl_scan_config {
+       u8 enable_cam_mode;
+       u8 enable_promiscouos_mode;
+       u8 bcast_sta_id;
+       u8 reserved;
+       __le32 tx_chains;
+       __le32 rx_chains;
 } __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
 
 /**
index 423cc0c..0d5bc4c 100644 (file)
@@ -288,6 +288,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
  *     STA_CONTEXT_DOT11AX_API_S
  * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar
  *     version tables.
+ * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
+ *  SCAN_CONFIG_DB_CMD_API_S.
  *
  * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
@@ -321,6 +323,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE   = (__force iwl_ucode_tlv_api_t)53,
        IWL_UCODE_TLV_API_FTM_RTT_ACCURACY      = (__force iwl_ucode_tlv_api_t)54,
        IWL_UCODE_TLV_API_SAR_TABLE_VER         = (__force iwl_ucode_tlv_api_t)55,
+       IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG   = (__force iwl_ucode_tlv_api_t)56,
        IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP    = (__force iwl_ucode_tlv_api_t)57,
        IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER     = (__force iwl_ucode_tlv_api_t)58,
 
index cb4c551..695bbaa 100644 (file)
  *         Indicates MAC is entering a power-saving sleep power-down.
  *         Not a good time to access device-internal resources.
  */
+#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE                     (0x00000004)
 #define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP         (0x00000010)
 #define CSR_GP_CNTRL_REG_FLAG_XTAL_ON               (0x00000400)
 
index 0c12df5..05c1c77 100644 (file)
@@ -148,7 +148,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
  *
  * Bits 3:0:
  * Define the maximum number of pending read requests.
- * Maximum configration value allowed is 0xC
+ * Maximum configuration value allowed is 0xC
  * Bits 9:8:
  * Define the maximum transfer size. (64 / 128 / 256)
  * Bit 10:
index f47e0f9..23c25a7 100644 (file)
@@ -449,6 +449,11 @@ enum {
 #define PERSISTENCE_BIT                        BIT(12)
 #define PREG_WFPM_ACCESS               BIT(12)
 
+#define HPM_HIPM_GEN_CFG                       0xA03458
+#define HPM_HIPM_GEN_CFG_CR_PG_EN              BIT(0)
+#define HPM_HIPM_GEN_CFG_CR_SLP_EN             BIT(1)
+#define HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE       BIT(10)
+
 #define UREG_DOORBELL_TO_ISR6          0xA05C04
 #define UREG_DOORBELL_TO_ISR6_NMI_BIT  BIT(0)
 #define UREG_DOORBELL_TO_ISR6_SUSPEND  BIT(18)
index 843d00b..5ca50f3 100644 (file)
@@ -1405,6 +1405,12 @@ static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
                          IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
 }
 
+static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_api(&mvm->fw->ucode_capa,
+                         IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
+}
+
 static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
 {
        return fw_has_api(&mvm->fw->ucode_capa,
index f6b3045..fcafa22 100644 (file)
@@ -1137,11 +1137,11 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
        iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
 }
 
-static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
-                                    u32 flags, u8 channel_flags,
-                                    u32 max_channels)
+static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
+                                       u32 flags, u8 channel_flags,
+                                       u32 max_channels)
 {
-       struct iwl_scan_config *cfg = config;
+       struct iwl_scan_config_v2 *cfg = config;
 
        cfg->flags = cpu_to_le32(flags);
        cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -1185,7 +1185,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
        iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
 }
 
-int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
 {
        void *cfg;
        int ret, cmd_size;
@@ -1217,7 +1217,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        }
 
        if (iwl_mvm_cdb_scan_api(mvm))
-               cmd_size = sizeof(struct iwl_scan_config);
+               cmd_size = sizeof(struct iwl_scan_config_v2);
        else
                cmd_size = sizeof(struct iwl_scan_config_v1);
        cmd_size += num_channels;
@@ -1254,8 +1254,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
                        flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
                                 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
                                 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
-               iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags,
-                                        num_channels);
+               iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
+                                           num_channels);
        } else {
                iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
                                            num_channels);
@@ -1277,6 +1277,30 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        return ret;
 }
 
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+       struct iwl_scan_config cfg;
+       struct iwl_host_cmd cmd = {
+               .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .len[0] = sizeof(cfg),
+               .data[0] = &cfg,
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+       };
+
+       if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
+               return iwl_mvm_legacy_config_scan(mvm);
+
+       memset(&cfg, 0, sizeof(cfg));
+
+       cfg.bcast_sta_id = mvm->aux_sta.sta_id;
+       cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+       cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+
+       IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+       return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
 static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
 {
        int i;
index 1d6bc62..7b35f41 100644 (file)
@@ -1482,6 +1482,13 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
                                            mvm_sta->sta_id, i);
                        txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
                                                         i, wdg);
+                       /*
+                        * on failures, just set it to IWL_MVM_INVALID_QUEUE
+                        * to try again later, we have no other good way of
+                        * failing here
+                        */
+                       if (txq_id < 0)
+                               txq_id = IWL_MVM_INVALID_QUEUE;
                        tid_data->txq_id = txq_id;
 
                        /*
@@ -1950,30 +1957,73 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
        sta->sta_id = IWL_MVM_INVALID_STA;
 }
 
-static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
                                          u8 sta_id, u8 fifo)
 {
        unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
                mvm->trans->trans_cfg->base_params->wd_timeout :
                IWL_WATCHDOG_DISABLED;
+       struct iwl_trans_txq_scd_cfg cfg = {
+               .fifo = fifo,
+               .sta_id = sta_id,
+               .tid = IWL_MAX_TID_COUNT,
+               .aggregate = false,
+               .frame_limit = IWL_FRAME_LIMIT,
+       };
+
+       WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+       iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
+}
+
+static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
+{
+       unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+               mvm->trans->trans_cfg->base_params->wd_timeout :
+               IWL_WATCHDOG_DISABLED;
+
+       WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+       return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
+                                      wdg_timeout);
+}
 
+static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
+                                         int maccolor,
+                                         struct iwl_mvm_int_sta *sta,
+                                         u16 *queue, int fifo)
+{
+       int ret;
+
+       /* Map queue to fifo - needs to happen before adding station */
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
+
+       ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
+       if (ret) {
+               if (!iwl_mvm_has_new_tx_api(mvm))
+                       iwl_mvm_disable_txq(mvm, NULL, *queue,
+                                           IWL_MAX_TID_COUNT, 0);
+               return ret;
+       }
+
+       /*
+        * For 22000 firmware and on we cannot add queue to a station unknown
+        * to firmware so enable queue here - after the station was added
+        */
        if (iwl_mvm_has_new_tx_api(mvm)) {
-               int tvqm_queue =
-                       iwl_mvm_tvqm_enable_txq(mvm, sta_id,
-                                               IWL_MAX_TID_COUNT,
-                                               wdg_timeout);
-               *queue = tvqm_queue;
-       } else {
-               struct iwl_trans_txq_scd_cfg cfg = {
-                       .fifo = fifo,
-                       .sta_id = sta_id,
-                       .tid = IWL_MAX_TID_COUNT,
-                       .aggregate = false,
-                       .frame_limit = IWL_FRAME_LIMIT,
-               };
+               int txq;
 
-               iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
+               txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
+               if (txq < 0) {
+                       iwl_mvm_rm_sta_common(mvm, sta->sta_id);
+                       return txq;
+               }
+
+               *queue = txq;
        }
+
+       return 0;
 }
 
 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
@@ -1989,59 +2039,26 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
        if (ret)
                return ret;
 
-       /* Map Aux queue to fifo - needs to happen before adding Aux station */
-       if (!iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
-                                             mvm->aux_sta.sta_id,
-                                             IWL_MVM_TX_FIFO_MCAST);
-
-       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
-                                        MAC_INDEX_AUX, 0);
+       ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0,
+                                            &mvm->aux_sta, &mvm->aux_queue,
+                                            IWL_MVM_TX_FIFO_MCAST);
        if (ret) {
                iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
                return ret;
        }
 
-       /*
-        * For 22000 firmware and on we cannot add queue to a station unknown
-        * to firmware so enable queue here - after the station was added
-        */
-       if (iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
-                                             mvm->aux_sta.sta_id,
-                                             IWL_MVM_TX_FIFO_MCAST);
-
        return 0;
 }
 
 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       int ret;
 
        lockdep_assert_held(&mvm->mutex);
 
-       /* Map snif queue to fifo - must happen before adding snif station */
-       if (!iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
-                                             mvm->snif_sta.sta_id,
+       return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
+                                             &mvm->snif_sta, &mvm->snif_queue,
                                              IWL_MVM_TX_FIFO_BE);
-
-       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
-                                        mvmvif->id, 0);
-       if (ret)
-               return ret;
-
-       /*
-        * For 22000 firmware and on we cannot add queue to a station unknown
-        * to firmware so enable queue here - after the station was added
-        */
-       if (iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
-                                             mvm->snif_sta.sta_id,
-                                             IWL_MVM_TX_FIFO_BE);
-
-       return 0;
 }
 
 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2133,6 +2150,10 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
                                                IWL_MAX_TID_COUNT,
                                                wdg_timeout);
+               if (queue < 0) {
+                       iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+                       return queue;
+               }
 
                if (vif->type == NL80211_IFTYPE_AP ||
                    vif->type == NL80211_IFTYPE_ADHOC)
@@ -2307,10 +2328,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        }
        ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
                                         mvmvif->id, mvmvif->color);
-       if (ret) {
-               iwl_mvm_dealloc_int_sta(mvm, msta);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        /*
         * Enable cab queue after the ADD_STA command is sent.
@@ -2323,6 +2342,10 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
                                                    0,
                                                    timeout);
+               if (queue < 0) {
+                       ret = queue;
+                       goto err;
+               }
                mvmvif->cab_queue = queue;
        } else if (!fw_has_api(&mvm->fw->ucode_capa,
                               IWL_UCODE_TLV_API_STA_TYPE))
@@ -2330,6 +2353,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                   timeout);
 
        return 0;
+err:
+       iwl_mvm_dealloc_int_sta(mvm, msta);
+       return ret;
 }
 
 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
index 6f4bb7c..040cec1 100644 (file)
@@ -573,20 +573,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9461_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
@@ -603,7 +603,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
@@ -618,60 +618,61 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
-
-       {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
-       {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+       {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+       {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
 
        {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
@@ -1067,11 +1068,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
-                  ((cfg != &iwl_ax200_cfg_cc &&
-                    cfg != &killer1650x_2ax_cfg &&
-                    cfg != &killer1650w_2ax_cfg &&
-                    cfg != &iwl_ax201_cfg_quz_hr) ||
-                   iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
+                  iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
                u32 hw_status;
 
                hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
index df8455f..ca3bb4d 100644 (file)
 #include "internal.h"
 #include "fw/dbg.h"
 
+static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+{
+       iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                         HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+       udelay(20);
+       iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                         HPM_HIPM_GEN_CFG_CR_PG_EN |
+                         HPM_HIPM_GEN_CFG_CR_SLP_EN);
+       udelay(20);
+       iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                           HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+
+       iwl_trans_sw_reset(trans);
+       iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+       return 0;
+}
+
 /*
  * Start up NIC's basic functionality after it has been reset
  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
@@ -92,6 +110,13 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
 
        iwl_pcie_apm_config(trans);
 
+       if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+           trans->cfg->integrated) {
+               ret = iwl_pcie_gen2_force_power_gating(trans);
+               if (ret)
+                       return ret;
+       }
+
        ret = iwl_finish_nic_init(trans, trans->trans_cfg);
        if (ret)
                return ret;
index 158a3d7..e323e9a 100644 (file)
@@ -3041,30 +3041,6 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
        }
 }
 
-
-/*
- * HostAP uses two layers of net devices, where the inner
- * layer gets called all the time from the outer layer.
- * This is a natural nesting, which needs a split lock type.
- */
-static struct lock_class_key hostap_netdev_xmit_lock_key;
-static struct lock_class_key hostap_netdev_addr_lock_key;
-
-static void prism2_set_lockdep_class_one(struct net_device *dev,
-                                        struct netdev_queue *txq,
-                                        void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock,
-                         &hostap_netdev_xmit_lock_key);
-}
-
-static void prism2_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock,
-                         &hostap_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
-}
-
 static struct net_device *
 prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
                       struct device *sdev)
@@ -3223,7 +3199,6 @@ while (0)
        if (ret >= 0)
                ret = register_netdevice(dev);
 
-       prism2_set_lockdep_class(dev);
        rtnl_unlock();
        if (ret < 0) {
                printk(KERN_WARNING "%s: register netdevice failed!\n",
index 4d03596..d7a1ddc 100644 (file)
@@ -8,6 +8,8 @@ mt76-y := \
        mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
        tx.o agg-rx.o mcu.o
 
+mt76-$(CONFIG_PCI) += pci.o
+
 mt76-usb-y := usb.o usb_trace.o
 
 CFLAGS_trace.o := -I$(src)
index c747eb2..8f69d00 100644 (file)
@@ -53,8 +53,10 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
        u32 ctrl;
        int i, idx = -1;
 
-       if (txwi)
+       if (txwi) {
                q->entry[q->head].txwi = DMA_DUMMY_DATA;
+               q->entry[q->head].skip_buf0 = true;
+       }
 
        for (i = 0; i < nbufs; i += 2, buf += 2) {
                u32 buf0 = buf[0].addr, buf1 = 0;
@@ -97,7 +99,7 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
        __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
        u32 ctrl = le32_to_cpu(__ctrl);
 
-       if (!e->txwi || !e->skb) {
+       if (!e->skip_buf0) {
                __le32 addr = READ_ONCE(q->desc[idx].buf0);
                u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
 
index 570c159..8aec7cc 100644 (file)
@@ -93,8 +93,9 @@ struct mt76_queue_entry {
                struct urb *urb;
        };
        enum mt76_txq_id qid;
-       bool schedule;
-       bool done;
+       bool skip_buf0:1;
+       bool schedule:1;
+       bool done:1;
 };
 
 struct mt76_queue_regs {
@@ -578,6 +579,7 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
 
 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+void mt76_pci_disable_aspm(struct pci_dev *pdev);
 
 static inline u16 mt76_chip(struct mt76_dev *dev)
 {
index 73c3104..cf611d1 100644 (file)
@@ -81,6 +81,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
        mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
 
+       mt76_pci_disable_aspm(pdev);
+
        return 0;
 
 error:
diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c
new file mode 100644 (file)
index 0000000..04c5a69
--- /dev/null
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/pci.h>
+
+void mt76_pci_disable_aspm(struct pci_dev *pdev)
+{
+       struct pci_dev *parent = pdev->bus->self;
+       u16 aspm_conf, parent_aspm_conf = 0;
+
+       pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf);
+       aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+       if (parent) {
+               pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+                                         &parent_aspm_conf);
+               parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+       }
+
+       if (!aspm_conf && (!parent || !parent_aspm_conf)) {
+               /* aspm already disabled */
+               return;
+       }
+
+       dev_info(&pdev->dev, "disabling ASPM %s %s\n",
+                (aspm_conf & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
+                (aspm_conf & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
+
+       if (IS_ENABLED(CONFIG_PCIEASPM)) {
+               int err;
+
+               err = pci_disable_link_state(pdev, aspm_conf);
+               if (!err)
+                       return;
+       }
+
+       /* both device and parent should have the same ASPM setting.
+        * disable ASPM in downstream component first and then upstream.
+        */
+       pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_conf);
+       if (parent)
+               pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+                                          aspm_conf);
+}
+EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm);
index 6087ec7..f88d265 100644 (file)
@@ -822,7 +822,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                hdr = rtl_get_hdr(skb);
                fc = rtl_get_fc(skb);
 
-               if (!stats.crc && !stats.hwerror) {
+               if (!stats.crc && !stats.hwerror && (skb->len > FCS_LEN)) {
                        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
                               sizeof(rx_status));
 
@@ -859,6 +859,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                                _rtl_pci_rx_to_mac80211(hw, skb, rx_status);
                        }
                } else {
+                       /* drop packets with errors or those too short */
                        dev_kfree_skb_any(skb);
                }
 new_trx_end:
index 70f04c2..fff8dda 100644 (file)
@@ -754,6 +754,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
                                return;
                        } else {
                                noa_num = (noa_len - 2) / 13;
+                               if (noa_num > P2P_MAX_NOA_NUM)
+                                       noa_num = P2P_MAX_NOA_NUM;
+
                        }
                        noa_index = ie[3];
                        if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
@@ -848,6 +851,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
                                return;
                        } else {
                                noa_num = (noa_len - 2) / 13;
+                               if (noa_num > P2P_MAX_NOA_NUM)
+                                       noa_num = P2P_MAX_NOA_NUM;
+
                        }
                        noa_index = ie[3];
                        if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
index 6d6e899..81313e0 100644 (file)
@@ -1352,9 +1352,9 @@ static void _rtl92s_phy_set_rfhalt(struct ieee80211_hw *hw)
        /* SW/HW radio off or halt adapter!! For example S3/S4 */
        } else {
                /* LED function disable. Power range is about 8mA now. */
-               /* if write 0xF1 disconnet_pci power
+               /* if write 0xF1 disconnect_pci power
                 *       ifconfig wlan0 down power are both high 35:70 */
-               /* if write oxF9 disconnet_pci power
+               /* if write oxF9 disconnect_pci power
                 * ifconfig wlan0 down power are both low  12:45*/
                rtl_write_byte(rtlpriv, 0x03, 0xF9);
        }
index d4c09e5..18c4d99 100644 (file)
@@ -186,7 +186,7 @@ static void wl12xx_spi_init(struct device *child)
 
        spi_sync(to_spi_device(glue->dev), &m);
 
-       /* Restore chip select configration to normal */
+       /* Restore chip select configuration to normal */
        spi->mode ^= SPI_CS_HIGH;
        kfree(cmd);
 }
index be92e12..7997cc6 100644 (file)
@@ -548,6 +548,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
        priv->is_connected = false;
        priv->is_up = false;
        INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete);
+       __module_get(THIS_MODULE);
 
        return 0;
 unregister_netdev:
@@ -578,6 +579,7 @@ static void virt_wifi_dellink(struct net_device *dev,
        netdev_upper_dev_unlink(priv->lowerdev, dev);
 
        unregister_netdevice_queue(dev, head);
+       module_put(THIS_MODULE);
 
        /* Deleting the wiphy is handled in the module destructor. */
 }
@@ -590,6 +592,42 @@ static struct rtnl_link_ops virt_wifi_link_ops = {
        .priv_size      = sizeof(struct virt_wifi_netdev_priv),
 };
 
+static bool netif_is_virt_wifi_dev(const struct net_device *dev)
+{
+       return rcu_access_pointer(dev->rx_handler) == virt_wifi_rx_handler;
+}
+
+static int virt_wifi_event(struct notifier_block *this, unsigned long event,
+                          void *ptr)
+{
+       struct net_device *lower_dev = netdev_notifier_info_to_dev(ptr);
+       struct virt_wifi_netdev_priv *priv;
+       struct net_device *upper_dev;
+       LIST_HEAD(list_kill);
+
+       if (!netif_is_virt_wifi_dev(lower_dev))
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_UNREGISTER:
+               priv = rtnl_dereference(lower_dev->rx_handler_data);
+               if (!priv)
+                       return NOTIFY_DONE;
+
+               upper_dev = priv->upperdev;
+
+               upper_dev->rtnl_link_ops->dellink(upper_dev, &list_kill);
+               unregister_netdevice_many(&list_kill);
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block virt_wifi_notifier = {
+       .notifier_call = virt_wifi_event,
+};
+
 /* Acquires and releases the rtnl lock. */
 static int __init virt_wifi_init_module(void)
 {
@@ -598,14 +636,25 @@ static int __init virt_wifi_init_module(void)
        /* Guaranteed to be locallly-administered and not multicast. */
        eth_random_addr(fake_router_bssid);
 
+       err = register_netdevice_notifier(&virt_wifi_notifier);
+       if (err)
+               return err;
+
+       err = -ENOMEM;
        common_wiphy = virt_wifi_make_wiphy();
        if (!common_wiphy)
-               return -ENOMEM;
+               goto notifier;
 
        err = rtnl_link_register(&virt_wifi_link_ops);
        if (err)
-               virt_wifi_destroy_wiphy(common_wiphy);
+               goto destroy_wiphy;
 
+       return 0;
+
+destroy_wiphy:
+       virt_wifi_destroy_wiphy(common_wiphy);
+notifier:
+       unregister_netdevice_notifier(&virt_wifi_notifier);
        return err;
 }
 
@@ -615,6 +664,7 @@ static void __exit virt_wifi_cleanup_module(void)
        /* Will delete any devices that depend on the wiphy. */
        rtnl_link_unregister(&virt_wifi_link_ops);
        virt_wifi_destroy_wiphy(common_wiphy);
+       unregister_netdevice_notifier(&virt_wifi_notifier);
 }
 
 module_init(virt_wifi_init_module);
index f6d6b34..7fe1bbe 100644 (file)
@@ -26,3 +26,14 @@ config NFC_PN533_I2C
 
          If you choose to build a module, it'll be called pn533_i2c.
          Say N if unsure.
+
+config NFC_PN532_UART
+       tristate "NFC PN532 device support (UART)"
+       depends on SERIAL_DEV_BUS
+       select NFC_PN533
+       ---help---
+         This module adds support for the NXP pn532 UART interface.
+         Select this if your platform is using the UART bus.
+
+         If you choose to build a module, it'll be called pn532_uart.
+         Say N if unsure.
index 43c25b4..b964833 100644 (file)
@@ -4,7 +4,9 @@
 #
 pn533_usb-objs  = usb.o
 pn533_i2c-objs  = i2c.o
+pn532_uart-objs  = uart.o
 
 obj-$(CONFIG_NFC_PN533)     += pn533.o
 obj-$(CONFIG_NFC_PN533_USB) += pn533_usb.o
 obj-$(CONFIG_NFC_PN533_I2C) += pn533_i2c.o
+obj-$(CONFIG_NFC_PN532_UART) += pn532_uart.o
index 1832cd9..7507176 100644 (file)
@@ -193,12 +193,10 @@ static int pn533_i2c_probe(struct i2c_client *client,
        phy->i2c_dev = client;
        i2c_set_clientdata(client, phy);
 
-       priv = pn533_register_device(PN533_DEVICE_PN532,
-                                    PN533_NO_TYPE_B_PROTOCOLS,
-                                    PN533_PROTO_REQ_ACK_RESP,
-                                    phy, &i2c_phy_ops, NULL,
-                                    &phy->i2c_dev->dev,
-                                    &client->dev);
+       priv = pn53x_common_init(PN533_DEVICE_PN532,
+                               PN533_PROTO_REQ_ACK_RESP,
+                               phy, &i2c_phy_ops, NULL,
+                               &phy->i2c_dev->dev);
 
        if (IS_ERR(priv)) {
                r = PTR_ERR(priv);
@@ -206,6 +204,9 @@ static int pn533_i2c_probe(struct i2c_client *client,
        }
 
        phy->priv = priv;
+       r = pn532_i2c_nfc_alloc(priv, PN533_NO_TYPE_B_PROTOCOLS, &client->dev);
+       if (r)
+               goto nfc_alloc_err;
 
        r = request_threaded_irq(client->irq, NULL, pn533_i2c_irq_thread_fn,
                                IRQF_TRIGGER_FALLING |
@@ -220,13 +221,20 @@ static int pn533_i2c_probe(struct i2c_client *client,
        if (r)
                goto fn_setup_err;
 
-       return 0;
+       r = nfc_register_device(priv->nfc_dev);
+       if (r)
+               goto fn_setup_err;
+
+       return r;
 
 fn_setup_err:
        free_irq(client->irq, phy);
 
 irq_rqst_err:
-       pn533_unregister_device(phy->priv);
+       nfc_free_device(priv->nfc_dev);
+
+nfc_alloc_err:
+       pn53x_common_clean(phy->priv);
 
        return r;
 }
@@ -239,12 +247,18 @@ static int pn533_i2c_remove(struct i2c_client *client)
 
        free_irq(client->irq, phy);
 
-       pn533_unregister_device(phy->priv);
+       pn53x_unregister_nfc(phy->priv);
+       pn53x_common_clean(phy->priv);
 
        return 0;
 }
 
 static const struct of_device_id of_pn533_i2c_match[] = {
+       { .compatible = "nxp,pn532", },
+       /*
+        * NOTE: The use of the compatibles with the trailing "...-i2c" is
+        * deprecated and will be removed.
+        */
        { .compatible = "nxp,pn533-i2c", },
        { .compatible = "nxp,pn532-i2c", },
        {},
index a172a32..aa766e7 100644 (file)
@@ -185,6 +185,32 @@ struct pn533_cmd_jump_dep_response {
        u8 gt[];
 } __packed;
 
+struct pn532_autopoll_resp {
+       u8 type;
+       u8 ln;
+       u8 tg;
+       u8 tgdata[];
+};
+
+/* PN532_CMD_IN_AUTOPOLL */
+#define PN532_AUTOPOLL_POLLNR_INFINITE 0xff
+#define PN532_AUTOPOLL_PERIOD          0x03 /* in units of 150 ms */
+
+#define PN532_AUTOPOLL_TYPE_GENERIC_106                0x00
+#define PN532_AUTOPOLL_TYPE_GENERIC_212                0x01
+#define PN532_AUTOPOLL_TYPE_GENERIC_424                0x02
+#define PN532_AUTOPOLL_TYPE_JEWEL              0x04
+#define PN532_AUTOPOLL_TYPE_MIFARE             0x10
+#define PN532_AUTOPOLL_TYPE_FELICA212          0x11
+#define PN532_AUTOPOLL_TYPE_FELICA424          0x12
+#define PN532_AUTOPOLL_TYPE_ISOA               0x20
+#define PN532_AUTOPOLL_TYPE_ISOB               0x23
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106    0x40
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212    0x41
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424    0x42
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106     0x80
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_212     0x81
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_424     0x82
 
 /* PN533_TG_INIT_AS_TARGET */
 #define PN533_INIT_TARGET_PASSIVE 0x1
@@ -1389,6 +1415,101 @@ static int pn533_poll_dep(struct nfc_dev *nfc_dev)
        return rc;
 }
 
+static int pn533_autopoll_complete(struct pn533 *dev, void *arg,
+                              struct sk_buff *resp)
+{
+       struct pn532_autopoll_resp *apr;
+       struct nfc_target nfc_tgt;
+       u8 nbtg;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+
+               nfc_err(dev->dev, "%s  autopoll complete error %d\n",
+                       __func__, rc);
+
+               if (rc == -ENOENT) {
+                       if (dev->poll_mod_count != 0)
+                               return rc;
+                       goto stop_poll;
+               } else if (rc < 0) {
+                       nfc_err(dev->dev,
+                               "Error %d when running autopoll\n", rc);
+                       goto stop_poll;
+               }
+       }
+
+       nbtg = resp->data[0];
+       if ((nbtg > 2) || (nbtg <= 0))
+               return -EAGAIN;
+
+       apr = (struct pn532_autopoll_resp *)&resp->data[1];
+       while (nbtg--) {
+               memset(&nfc_tgt, 0, sizeof(struct nfc_target));
+               switch (apr->type) {
+               case PN532_AUTOPOLL_TYPE_ISOA:
+                       dev_dbg(dev->dev, "ISOA\n");
+                       rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata,
+                                                      apr->ln - 1);
+                       break;
+               case PN532_AUTOPOLL_TYPE_FELICA212:
+               case PN532_AUTOPOLL_TYPE_FELICA424:
+                       dev_dbg(dev->dev, "FELICA\n");
+                       rc = pn533_target_found_felica(&nfc_tgt, apr->tgdata,
+                                                      apr->ln - 1);
+                       break;
+               case PN532_AUTOPOLL_TYPE_JEWEL:
+                       dev_dbg(dev->dev, "JEWEL\n");
+                       rc = pn533_target_found_jewel(&nfc_tgt, apr->tgdata,
+                                                     apr->ln - 1);
+                       break;
+               case PN532_AUTOPOLL_TYPE_ISOB:
+                       dev_dbg(dev->dev, "ISOB\n");
+                       rc = pn533_target_found_type_b(&nfc_tgt, apr->tgdata,
+                                                      apr->ln - 1);
+                       break;
+               case PN532_AUTOPOLL_TYPE_MIFARE:
+                       dev_dbg(dev->dev, "Mifare\n");
+                       rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata,
+                                                      apr->ln - 1);
+                       break;
+               default:
+                       nfc_err(dev->dev,
+                                   "Unknown current poll modulation\n");
+                       rc = -EPROTO;
+               }
+
+               if (rc)
+                       goto done;
+
+               if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
+                       nfc_err(dev->dev,
+                                   "The Tg found doesn't have the desired protocol\n");
+                       rc = -EAGAIN;
+                       goto done;
+               }
+
+               dev->tgt_available_prots = nfc_tgt.supported_protocols;
+               apr = (struct pn532_autopoll_resp *)
+                       (apr->tgdata + (apr->ln - 1));
+       }
+
+       pn533_poll_reset_mod_list(dev);
+       nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
+
+done:
+       dev_kfree_skb(resp);
+       return rc;
+
+stop_poll:
+       nfc_err(dev->dev, "autopoll operation has been stopped\n");
+
+       pn533_poll_reset_mod_list(dev);
+       dev->poll_protocols = 0;
+       return rc;
+}
+
 static int pn533_poll_complete(struct pn533 *dev, void *arg,
                               struct sk_buff *resp)
 {
@@ -1532,6 +1653,7 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
 {
        struct pn533 *dev = nfc_get_drvdata(nfc_dev);
        struct pn533_poll_modulations *cur_mod;
+       struct sk_buff *skb;
        u8 rand_mod;
        int rc;
 
@@ -1557,9 +1679,73 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
                        tm_protocols = 0;
        }
 
-       pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
        dev->poll_protocols = im_protocols;
        dev->listen_protocols = tm_protocols;
+       if (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL) {
+               skb = pn533_alloc_skb(dev, 4 + 6);
+               if (!skb)
+                       return -ENOMEM;
+
+               *((u8 *)skb_put(skb, sizeof(u8))) =
+                       PN532_AUTOPOLL_POLLNR_INFINITE;
+               *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_PERIOD;
+
+               if ((im_protocols & NFC_PROTO_MIFARE_MASK) &&
+                               (im_protocols & NFC_PROTO_ISO14443_MASK) &&
+                               (im_protocols & NFC_PROTO_NFC_DEP_MASK))
+                       *((u8 *)skb_put(skb, sizeof(u8))) =
+                               PN532_AUTOPOLL_TYPE_GENERIC_106;
+               else {
+                       if (im_protocols & NFC_PROTO_MIFARE_MASK)
+                               *((u8 *)skb_put(skb, sizeof(u8))) =
+                                       PN532_AUTOPOLL_TYPE_MIFARE;
+
+                       if (im_protocols & NFC_PROTO_ISO14443_MASK)
+                               *((u8 *)skb_put(skb, sizeof(u8))) =
+                                       PN532_AUTOPOLL_TYPE_ISOA;
+
+                       if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+                               *((u8 *)skb_put(skb, sizeof(u8))) =
+                                       PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106;
+                               *((u8 *)skb_put(skb, sizeof(u8))) =
+                                       PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212;
+                               *((u8 *)skb_put(skb, sizeof(u8))) =
+                                       PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424;
+                       }
+               }
+
+               if (im_protocols & NFC_PROTO_FELICA_MASK ||
+                               im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+                       *((u8 *)skb_put(skb, sizeof(u8))) =
+                               PN532_AUTOPOLL_TYPE_FELICA212;
+                       *((u8 *)skb_put(skb, sizeof(u8))) =
+                               PN532_AUTOPOLL_TYPE_FELICA424;
+               }
+
+               if (im_protocols & NFC_PROTO_JEWEL_MASK)
+                       *((u8 *)skb_put(skb, sizeof(u8))) =
+                               PN532_AUTOPOLL_TYPE_JEWEL;
+
+               if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
+                       *((u8 *)skb_put(skb, sizeof(u8))) =
+                               PN532_AUTOPOLL_TYPE_ISOB;
+
+               if (tm_protocols)
+                       *((u8 *)skb_put(skb, sizeof(u8))) =
+                               PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106;
+
+               rc = pn533_send_cmd_async(dev, PN533_CMD_IN_AUTOPOLL, skb,
+                               pn533_autopoll_complete, NULL);
+
+               if (rc < 0)
+                       dev_kfree_skb(skb);
+               else
+                       dev->poll_mod_count++;
+
+               return rc;
+       }
+
+       pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
 
        /* Do not always start polling from the same modulation */
        get_random_bytes(&rand_mod, sizeof(rand_mod));
@@ -2458,7 +2644,11 @@ static int pn533_dev_up(struct nfc_dev *nfc_dev)
 {
        struct pn533 *dev = nfc_get_drvdata(nfc_dev);
 
-       if (dev->device_type == PN533_DEVICE_PN532) {
+       if (dev->phy_ops->dev_up)
+               dev->phy_ops->dev_up(dev);
+
+       if ((dev->device_type == PN533_DEVICE_PN532) ||
+               (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL)) {
                int rc = pn532_sam_configuration(nfc_dev);
 
                if (rc)
@@ -2470,7 +2660,14 @@ static int pn533_dev_up(struct nfc_dev *nfc_dev)
 
 static int pn533_dev_down(struct nfc_dev *nfc_dev)
 {
-       return pn533_rf_field(nfc_dev, 0);
+       struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+       int ret;
+
+       ret = pn533_rf_field(nfc_dev, 0);
+       if (dev->phy_ops->dev_down && !ret)
+               dev->phy_ops->dev_down(dev);
+
+       return ret;
 }
 
 static struct nfc_ops pn533_nfc_ops = {
@@ -2498,6 +2695,7 @@ static int pn533_setup(struct pn533 *dev)
        case PN533_DEVICE_PASORI:
        case PN533_DEVICE_ACR122U:
        case PN533_DEVICE_PN532:
+       case PN533_DEVICE_PN532_AUTOPOLL:
                max_retries.mx_rty_atr = 0x2;
                max_retries.mx_rty_psl = 0x1;
                max_retries.mx_rty_passive_act =
@@ -2534,6 +2732,7 @@ static int pn533_setup(struct pn533 *dev)
        switch (dev->device_type) {
        case PN533_DEVICE_STD:
        case PN533_DEVICE_PN532:
+       case PN533_DEVICE_PN532_AUTOPOLL:
                break;
 
        case PN533_DEVICE_PASORI:
@@ -2580,14 +2779,12 @@ int pn533_finalize_setup(struct pn533 *dev)
 }
 EXPORT_SYMBOL_GPL(pn533_finalize_setup);
 
-struct pn533 *pn533_register_device(u32 device_type,
-                               u32 protocols,
+struct pn533 *pn53x_common_init(u32 device_type,
                                enum pn533_protocol_type protocol_type,
                                void *phy,
                                struct pn533_phy_ops *phy_ops,
                                struct pn533_frame_ops *fops,
-                               struct device *dev,
-                               struct device *parent)
+                               struct device *dev)
 {
        struct pn533 *priv;
        int rc = -ENOMEM;
@@ -2628,43 +2825,18 @@ struct pn533 *pn533_register_device(u32 device_type,
        skb_queue_head_init(&priv->fragment_skb);
 
        INIT_LIST_HEAD(&priv->cmd_queue);
-
-       priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
-                                          priv->ops->tx_header_len +
-                                          PN533_CMD_DATAEXCH_HEAD_LEN,
-                                          priv->ops->tx_tail_len);
-       if (!priv->nfc_dev) {
-               rc = -ENOMEM;
-               goto destroy_wq;
-       }
-
-       nfc_set_parent_dev(priv->nfc_dev, parent);
-       nfc_set_drvdata(priv->nfc_dev, priv);
-
-       rc = nfc_register_device(priv->nfc_dev);
-       if (rc)
-               goto free_nfc_dev;
-
        return priv;
 
-free_nfc_dev:
-       nfc_free_device(priv->nfc_dev);
-
-destroy_wq:
-       destroy_workqueue(priv->wq);
 error:
        kfree(priv);
        return ERR_PTR(rc);
 }
-EXPORT_SYMBOL_GPL(pn533_register_device);
+EXPORT_SYMBOL_GPL(pn53x_common_init);
 
-void pn533_unregister_device(struct pn533 *priv)
+void pn53x_common_clean(struct pn533 *priv)
 {
        struct pn533_cmd *cmd, *n;
 
-       nfc_unregister_device(priv->nfc_dev);
-       nfc_free_device(priv->nfc_dev);
-
        flush_delayed_work(&priv->poll_work);
        destroy_workqueue(priv->wq);
 
@@ -2679,8 +2851,47 @@ void pn533_unregister_device(struct pn533 *priv)
 
        kfree(priv);
 }
-EXPORT_SYMBOL_GPL(pn533_unregister_device);
+EXPORT_SYMBOL_GPL(pn53x_common_clean);
+
+int pn532_i2c_nfc_alloc(struct pn533 *priv, u32 protocols,
+                       struct device *parent)
+{
+       priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
+                                          priv->ops->tx_header_len +
+                                          PN533_CMD_DATAEXCH_HEAD_LEN,
+                                          priv->ops->tx_tail_len);
+       if (!priv->nfc_dev)
+               return -ENOMEM;
+
+       nfc_set_parent_dev(priv->nfc_dev, parent);
+       nfc_set_drvdata(priv->nfc_dev, priv);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(pn532_i2c_nfc_alloc);
 
+int pn53x_register_nfc(struct pn533 *priv, u32 protocols,
+                       struct device *parent)
+{
+       int rc;
+
+       rc = pn532_i2c_nfc_alloc(priv, protocols, parent);
+       if (rc)
+               return rc;
+
+       rc = nfc_register_device(priv->nfc_dev);
+       if (rc)
+               nfc_free_device(priv->nfc_dev);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(pn53x_register_nfc);
+
+void pn53x_unregister_nfc(struct pn533 *priv)
+{
+       nfc_unregister_device(priv->nfc_dev);
+       nfc_free_device(priv->nfc_dev);
+}
+EXPORT_SYMBOL_GPL(pn53x_unregister_nfc);
 
 MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
 MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
index 8bf9d6e..b66f02a 100644 (file)
@@ -6,10 +6,11 @@
  * Copyright (C) 2012-2013 Tieto Poland
  */
 
-#define PN533_DEVICE_STD     0x1
-#define PN533_DEVICE_PASORI  0x2
-#define PN533_DEVICE_ACR122U 0x3
-#define PN533_DEVICE_PN532   0x4
+#define PN533_DEVICE_STD               0x1
+#define PN533_DEVICE_PASORI            0x2
+#define PN533_DEVICE_ACR122U           0x3
+#define PN533_DEVICE_PN532             0x4
+#define PN533_DEVICE_PN532_AUTOPOLL    0x5
 
 #define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
                             NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
 
 /* Preamble (1), SoPC (2), ACK Code (2), Postamble (1) */
 #define PN533_STD_FRAME_ACK_SIZE 6
+/*
+ * Preamble (1), SoPC (2), Packet Length (1), Packet Length Checksum (1),
+ * Specific Application Level Error Code (1) , Postamble (1)
+ */
+#define PN533_STD_ERROR_FRAME_SIZE 8
 #define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
 #define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
 /* Half start code (3), LEN (4) should be 0xffff for extended frame */
@@ -70,6 +76,7 @@
 #define PN533_CMD_IN_ATR 0x50
 #define PN533_CMD_IN_RELEASE 0x52
 #define PN533_CMD_IN_JUMP_FOR_DEP 0x56
+#define PN533_CMD_IN_AUTOPOLL 0x60
 
 #define PN533_CMD_TG_INIT_AS_TARGET 0x8c
 #define PN533_CMD_TG_GET_DATA 0x86
@@ -84,6 +91,9 @@
 #define PN533_CMD_MI_MASK 0x40
 #define PN533_CMD_RET_SUCCESS 0x00
 
+#define PN533_FRAME_DATALEN_ACK 0x00
+#define PN533_FRAME_DATALEN_ERROR 0x01
+#define PN533_FRAME_DATALEN_EXTENDED 0xFF
 
 enum  pn533_protocol_type {
        PN533_PROTO_REQ_ACK_RESP = 0,
@@ -207,21 +217,33 @@ struct pn533_phy_ops {
                          struct sk_buff *out);
        int (*send_ack)(struct pn533 *dev, gfp_t flags);
        void (*abort_cmd)(struct pn533 *priv, gfp_t flags);
+       /*
+        * dev_up and dev_down are optional.
+        * They are used to inform the phy layer that the nfc chip
+        * is going to be really used very soon. The phy layer can then
+        * bring up it's interface to the chip and have it suspended for power
+        * saving reasons otherwise.
+        */
+       void (*dev_up)(struct pn533 *priv);
+       void (*dev_down)(struct pn533 *priv);
 };
 
 
-struct pn533 *pn533_register_device(u32 device_type,
-                               u32 protocols,
+struct pn533 *pn53x_common_init(u32 device_type,
                                enum pn533_protocol_type protocol_type,
                                void *phy,
                                struct pn533_phy_ops *phy_ops,
                                struct pn533_frame_ops *fops,
-                               struct device *dev,
-                               struct device *parent);
+                               struct device *dev);
 
 int pn533_finalize_setup(struct pn533 *dev);
-void pn533_unregister_device(struct pn533 *priv);
+void pn53x_common_clean(struct pn533 *priv);
 void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status);
+int pn532_i2c_nfc_alloc(struct pn533 *priv, u32 protocols,
+                       struct device *parent);
+int pn53x_register_nfc(struct pn533 *priv, u32 protocols,
+                       struct device *parent);
+void pn53x_unregister_nfc(struct pn533 *priv);
 
 bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame);
 bool pn533_rx_frame_is_ack(void *_frame);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
new file mode 100644 (file)
index 0000000..46e5ff1
--- /dev/null
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for NXP PN532 NFC Chip - UART transport layer
+ *
+ * Copyright (C) 2018 Lemonage Software GmbH
+ * Author: Lars Pöschel <poeschel@lemonage.de>
+ * All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
+#include "pn533.h"
+
+#define PN532_UART_SKB_BUFF_LEN        (PN533_CMD_DATAEXCH_DATA_MAXLEN * 2)
+
+enum send_wakeup {
+       PN532_SEND_NO_WAKEUP = 0,
+       PN532_SEND_WAKEUP,
+       PN532_SEND_LAST_WAKEUP,
+};
+
+
+struct pn532_uart_phy {
+       struct serdev_device *serdev;
+       struct sk_buff *recv_skb;
+       struct pn533 *priv;
+       /*
+        * send_wakeup variable is used to control if we need to send a wakeup
+        * request to the pn532 chip prior to our actual command. There is a
+        * little propability of a race condition. We decided to not mutex the
+        * variable as the worst that could happen is, that we send a wakeup
+        * to the chip that is already awake. This does not hurt. It is a
+        * no-op to the chip.
+        */
+       enum send_wakeup send_wakeup;
+       struct timer_list cmd_timeout;
+       struct sk_buff *cur_out_buf;
+};
+
+static int pn532_uart_send_frame(struct pn533 *dev,
+                               struct sk_buff *out)
+{
+       /* wakeup sequence and dummy bytes for waiting time */
+       static const u8 wakeup[] = {
+               0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+       struct pn532_uart_phy *pn532 = dev->phy;
+       int err;
+
+       print_hex_dump_debug("PN532_uart TX: ", DUMP_PREFIX_NONE, 16, 1,
+                            out->data, out->len, false);
+
+       pn532->cur_out_buf = out;
+       if (pn532->send_wakeup) {
+               err = serdev_device_write(pn532->serdev,
+                               wakeup, sizeof(wakeup),
+                               MAX_SCHEDULE_TIMEOUT);
+               if (err < 0)
+                       return err;
+       }
+
+       if (pn532->send_wakeup == PN532_SEND_LAST_WAKEUP)
+               pn532->send_wakeup = PN532_SEND_NO_WAKEUP;
+
+       err = serdev_device_write(pn532->serdev, out->data, out->len,
+                       MAX_SCHEDULE_TIMEOUT);
+       if (err < 0)
+               return err;
+
+       mod_timer(&pn532->cmd_timeout, HZ / 40 + jiffies);
+       return 0;
+}
+
+static int pn532_uart_send_ack(struct pn533 *dev, gfp_t flags)
+{
+       /* spec 7.1.1.3:  Preamble, SoPC (2), ACK Code (2), Postamble */
+       static const u8 ack[PN533_STD_FRAME_ACK_SIZE] = {
+                       0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+       struct pn532_uart_phy *pn532 = dev->phy;
+       int err;
+
+       err = serdev_device_write(pn532->serdev, ack, sizeof(ack),
+                       MAX_SCHEDULE_TIMEOUT);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void pn532_uart_abort_cmd(struct pn533 *dev, gfp_t flags)
+{
+       /* An ack will cancel the last issued command */
+       pn532_uart_send_ack(dev, flags);
+       /* schedule cmd_complete_work to finish current command execution */
+       pn533_recv_frame(dev, NULL, -ENOENT);
+}
+
+static void pn532_dev_up(struct pn533 *dev)
+{
+       struct pn532_uart_phy *pn532 = dev->phy;
+
+       serdev_device_open(pn532->serdev);
+       pn532->send_wakeup = PN532_SEND_LAST_WAKEUP;
+}
+
+static void pn532_dev_down(struct pn533 *dev)
+{
+       struct pn532_uart_phy *pn532 = dev->phy;
+
+       serdev_device_close(pn532->serdev);
+       pn532->send_wakeup = PN532_SEND_WAKEUP;
+}
+
+static struct pn533_phy_ops uart_phy_ops = {
+       .send_frame = pn532_uart_send_frame,
+       .send_ack = pn532_uart_send_ack,
+       .abort_cmd = pn532_uart_abort_cmd,
+       .dev_up = pn532_dev_up,
+       .dev_down = pn532_dev_down,
+};
+
+static void pn532_cmd_timeout(struct timer_list *t)
+{
+       struct pn532_uart_phy *dev = from_timer(dev, t, cmd_timeout);
+
+       pn532_uart_send_frame(dev->priv, dev->cur_out_buf);
+}
+
+/*
+ * scans the buffer if it contains a pn532 frame. It is not checked if the
+ * frame is really valid. This is later done with pn533_rx_frame_is_valid.
+ * This is useful for malformed or errornous transmitted frames. Adjusts the
+ * bufferposition where the frame starts, since pn533_recv_frame expects a
+ * well formed frame.
+ */
+static int pn532_uart_rx_is_frame(struct sk_buff *skb)
+{
+       struct pn533_std_frame *std;
+       struct pn533_ext_frame *ext;
+       u16 frame_len;
+       int i;
+
+       for (i = 0; i + PN533_STD_FRAME_ACK_SIZE <= skb->len; i++) {
+               std = (struct pn533_std_frame *)&skb->data[i];
+               /* search start code */
+               if (std->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
+                       continue;
+
+               /* frame type */
+               switch (std->datalen) {
+               case PN533_FRAME_DATALEN_ACK:
+                       if (std->datalen_checksum == 0xff) {
+                               skb_pull(skb, i);
+                               return 1;
+                       }
+
+                       break;
+               case PN533_FRAME_DATALEN_ERROR:
+                       if ((std->datalen_checksum == 0xff) &&
+                                       (skb->len >=
+                                        PN533_STD_ERROR_FRAME_SIZE)) {
+                               skb_pull(skb, i);
+                               return 1;
+                       }
+
+                       break;
+               case PN533_FRAME_DATALEN_EXTENDED:
+                       ext = (struct pn533_ext_frame *)&skb->data[i];
+                       frame_len = be16_to_cpu(ext->datalen);
+                       if (skb->len >= frame_len +
+                                       sizeof(struct pn533_ext_frame) +
+                                       2 /* CKS + Postamble */) {
+                               skb_pull(skb, i);
+                               return 1;
+                       }
+
+                       break;
+               default: /* normal information frame */
+                       frame_len = std->datalen;
+                       if (skb->len >= frame_len +
+                                       sizeof(struct pn533_std_frame) +
+                                       2 /* CKS + Postamble */) {
+                               skb_pull(skb, i);
+                               return 1;
+                       }
+
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int pn532_receive_buf(struct serdev_device *serdev,
+               const unsigned char *data, size_t count)
+{
+       struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev);
+       size_t i;
+
+       del_timer(&dev->cmd_timeout);
+       for (i = 0; i < count; i++) {
+               skb_put_u8(dev->recv_skb, *data++);
+               if (!pn532_uart_rx_is_frame(dev->recv_skb))
+                       continue;
+
+               pn533_recv_frame(dev->priv, dev->recv_skb, 0);
+               dev->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL);
+               if (!dev->recv_skb)
+                       return 0;
+       }
+
+       return i;
+}
+
+static struct serdev_device_ops pn532_serdev_ops = {
+       .receive_buf = pn532_receive_buf,
+       .write_wakeup = serdev_device_write_wakeup,
+};
+
+static const struct of_device_id pn532_uart_of_match[] = {
+       { .compatible = "nxp,pn532", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, pn532_uart_of_match);
+
+static int pn532_uart_probe(struct serdev_device *serdev)
+{
+       struct pn532_uart_phy *pn532;
+       struct pn533 *priv;
+       int err;
+
+       err = -ENOMEM;
+       pn532 = kzalloc(sizeof(*pn532), GFP_KERNEL);
+       if (!pn532)
+               goto err_exit;
+
+       pn532->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL);
+       if (!pn532->recv_skb)
+               goto err_free;
+
+       pn532->serdev = serdev;
+       serdev_device_set_drvdata(serdev, pn532);
+       serdev_device_set_client_ops(serdev, &pn532_serdev_ops);
+       err = serdev_device_open(serdev);
+       if (err) {
+               dev_err(&serdev->dev, "Unable to open device\n");
+               goto err_skb;
+       }
+
+       err = serdev_device_set_baudrate(serdev, 115200);
+       if (err != 115200) {
+               err = -EINVAL;
+               goto err_serdev;
+       }
+
+       serdev_device_set_flow_control(serdev, false);
+       pn532->send_wakeup = PN532_SEND_WAKEUP;
+       timer_setup(&pn532->cmd_timeout, pn532_cmd_timeout, 0);
+       priv = pn53x_common_init(PN533_DEVICE_PN532_AUTOPOLL,
+                                    PN533_PROTO_REQ_ACK_RESP,
+                                    pn532, &uart_phy_ops, NULL,
+                                    &pn532->serdev->dev);
+       if (IS_ERR(priv)) {
+               err = PTR_ERR(priv);
+               goto err_serdev;
+       }
+
+       pn532->priv = priv;
+       err = pn533_finalize_setup(pn532->priv);
+       if (err)
+               goto err_clean;
+
+       serdev_device_close(serdev);
+       err = pn53x_register_nfc(priv, PN533_NO_TYPE_B_PROTOCOLS, &serdev->dev);
+       if (err) {
+               pn53x_common_clean(pn532->priv);
+               goto err_skb;
+       }
+
+       return err;
+
+err_clean:
+       pn53x_common_clean(pn532->priv);
+err_serdev:
+       serdev_device_close(serdev);
+err_skb:
+       kfree_skb(pn532->recv_skb);
+err_free:
+       kfree(pn532);
+err_exit:
+       return err;
+}
+
+static void pn532_uart_remove(struct serdev_device *serdev)
+{
+       struct pn532_uart_phy *pn532 = serdev_device_get_drvdata(serdev);
+
+       pn53x_unregister_nfc(pn532->priv);
+       serdev_device_close(serdev);
+       pn53x_common_clean(pn532->priv);
+       kfree_skb(pn532->recv_skb);
+       kfree(pn532);
+}
+
+static struct serdev_device_driver pn532_uart_driver = {
+       .probe = pn532_uart_probe,
+       .remove = pn532_uart_remove,
+       .driver = {
+               .name = "pn532_uart",
+               .of_match_table = of_match_ptr(pn532_uart_of_match),
+       },
+};
+
+module_serdev_device_driver(pn532_uart_driver);
+
+MODULE_AUTHOR("Lars Pöschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("PN532 UART driver");
+MODULE_LICENSE("GPL");
index e897e4d..4590fbf 100644 (file)
@@ -534,9 +534,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
                goto error;
        }
 
-       priv = pn533_register_device(id->driver_info, protocols, protocol_type,
+       priv = pn53x_common_init(id->driver_info, protocol_type,
                                        phy, &usb_phy_ops, fops,
-                                       &phy->udev->dev, &interface->dev);
+                                       &phy->udev->dev);
 
        if (IS_ERR(priv)) {
                rc = PTR_ERR(priv);
@@ -547,14 +547,17 @@ static int pn533_usb_probe(struct usb_interface *interface,
 
        rc = pn533_finalize_setup(priv);
        if (rc)
-               goto err_deregister;
+               goto err_clean;
 
        usb_set_intfdata(interface, phy);
+       rc = pn53x_register_nfc(priv, protocols, &interface->dev);
+       if (rc)
+               goto err_clean;
 
        return 0;
 
-err_deregister:
-       pn533_unregister_device(phy->priv);
+err_clean:
+       pn53x_common_clean(priv);
 error:
        usb_kill_urb(phy->in_urb);
        usb_kill_urb(phy->out_urb);
@@ -577,7 +580,8 @@ static void pn533_usb_disconnect(struct usb_interface *interface)
        if (!phy)
                return;
 
-       pn533_unregister_device(phy->priv);
+       pn53x_unregister_nfc(phy->priv);
+       pn53x_common_clean(phy->priv);
 
        usb_set_intfdata(interface, NULL);
 
index 30de7ef..fc99a40 100644 (file)
@@ -522,14 +522,13 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
        return 0;
 }
 
-static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
+static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
 {
        u32 nr_change_groups = 0;
        int error;
 
        mutex_lock(&ctrl->ana_lock);
-       error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
-                       groups_only ? NVME_ANA_LOG_RGO : 0,
+       error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
                        ctrl->ana_log_buf, ctrl->ana_log_size, 0);
        if (error) {
                dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
@@ -565,7 +564,7 @@ static void nvme_ana_work(struct work_struct *work)
 {
        struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
 
-       nvme_read_ana_log(ctrl, false);
+       nvme_read_ana_log(ctrl);
 }
 
 static void nvme_anatt_timeout(struct timer_list *t)
@@ -715,7 +714,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
                goto out;
        }
 
-       error = nvme_read_ana_log(ctrl, true);
+       error = nvme_read_ana_log(ctrl);
        if (error)
                goto out_free_ana_log_buf;
        return 0;
index 770dbcb..7544be8 100644 (file)
@@ -2219,7 +2219,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
        struct nvme_tcp_queue *queue = hctx->driver_data;
        struct sock *sk = queue->sock->sk;
 
-       if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue))
+       if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
                sk_busy_loop(sk, true);
        nvme_tcp_try_recv(queue);
        return queue->nr_cqe;
index 7989703..6bd610e 100644 (file)
@@ -324,8 +324,10 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
        if (!target)
                return -ENODEV;
 
-       if (!of_device_is_available(target))
+       if (!of_device_is_available(target)) {
+               of_node_put(target);
                return 0;
+       }
 
        rmem = __find_rmem(target);
        of_node_put(target);
index 480a21e..92e895d 100644 (file)
@@ -1207,6 +1207,7 @@ static int __init unittest_data_add(void)
        of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
        if (!unittest_data_node) {
                pr_warn("%s: No tree to attach; not running tests\n", __func__);
+               kfree(unittest_data);
                return -ENODATA;
        }
 
index 3b7ffd0..9ff0538 100644 (file)
@@ -1626,12 +1626,6 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
                        goto free_regulators;
                }
 
-               ret = regulator_enable(reg);
-               if (ret < 0) {
-                       regulator_put(reg);
-                       goto free_regulators;
-               }
-
                opp_table->regulators[i] = reg;
        }
 
@@ -1645,10 +1639,8 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
        return opp_table;
 
 free_regulators:
-       while (i--) {
-               regulator_disable(opp_table->regulators[i]);
-               regulator_put(opp_table->regulators[i]);
-       }
+       while (i != 0)
+               regulator_put(opp_table->regulators[--i]);
 
        kfree(opp_table->regulators);
        opp_table->regulators = NULL;
@@ -1674,10 +1666,8 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
        /* Make sure there are no concurrent readers while updating opp_table */
        WARN_ON(!list_empty(&opp_table->opp_list));
 
-       for (i = opp_table->regulator_count - 1; i >= 0; i--) {
-               regulator_disable(opp_table->regulators[i]);
+       for (i = opp_table->regulator_count - 1; i >= 0; i--)
                regulator_put(opp_table->regulators[i]);
-       }
 
        _free_set_opp_data(opp_table);
 
index 1813f5a..1cbb582 100644 (file)
@@ -77,8 +77,6 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
 {
        struct dev_pm_opp *opp;
 
-       lockdep_assert_held(&opp_table_lock);
-
        mutex_lock(&opp_table->lock);
 
        list_for_each_entry(opp, &opp_table->opp_list, node) {
@@ -665,6 +663,13 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
                return 0;
        }
 
+       /*
+        * Re-initialize list_kref every time we add static OPPs to the OPP
+        * table as the reference count may be 0 after the last tie static OPPs
+        * were removed.
+        */
+       kref_init(&opp_table->list_kref);
+
        /* We have opp-table node now, iterate over it and add OPPs */
        for_each_available_child_of_node(opp_table->np, np) {
                opp = _opp_add_static_v2(opp_table, dev, np);
index 648ddb7..c6800d2 100644 (file)
@@ -87,7 +87,7 @@ FUNC_GROUP_DECL(MACLINK3, L23);
 
 #define K25 7
 SIG_EXPR_LIST_DECL_SESG(K25, MACLINK4, MACLINK4, SIG_DESC_SET(SCU410, 7));
-SIG_EXPR_LIST_DECL_SESG(K25, SDA14, SDA14, SIG_DESC_SET(SCU4B0, 7));
+SIG_EXPR_LIST_DECL_SESG(K25, SDA14, I2C14, SIG_DESC_SET(SCU4B0, 7));
 PIN_DECL_2(K25, GPIOA7, MACLINK4, SDA14);
 FUNC_GROUP_DECL(MACLINK4, K25);
 
@@ -1262,13 +1262,13 @@ GROUP_DECL(SPI1, AB11, AC11, AA11);
 #define AD11 206
 SIG_EXPR_LIST_DECL_SEMG(AD11, SPI1DQ2, QSPI1, SPI1, SIG_DESC_SET(SCU438, 14));
 SIG_EXPR_LIST_DECL_SEMG(AD11, TXD13, UART13G1, UART13,
-                       SIG_DESC_SET(SCU438, 14));
+                       SIG_DESC_CLEAR(SCU4B8, 2), SIG_DESC_SET(SCU4D8, 14));
 PIN_DECL_2(AD11, GPIOZ6, SPI1DQ2, TXD13);
 
 #define AF10 207
 SIG_EXPR_LIST_DECL_SEMG(AF10, SPI1DQ3, QSPI1, SPI1, SIG_DESC_SET(SCU438, 15));
 SIG_EXPR_LIST_DECL_SEMG(AF10, RXD13, UART13G1, UART13,
-                       SIG_DESC_SET(SCU438, 15));
+                       SIG_DESC_CLEAR(SCU4B8, 3), SIG_DESC_SET(SCU4D8, 15));
 PIN_DECL_2(AF10, GPIOZ7, SPI1DQ3, RXD13);
 
 GROUP_DECL(QSPI1, AB11, AC11, AA11, AD11, AF10);
@@ -1440,91 +1440,85 @@ FUNC_GROUP_DECL(RGMII2, D4, C2, C1, D3, E4, F5, D2, E3, D1, F4, E2, E1);
 FUNC_GROUP_DECL(RMII2, D4, C2, C1, D3, D2, D1, F4, E2, E1);
 
 #define AB4 232
-SIG_EXPR_LIST_DECL_SESG(AB4, SD3CLK, SD3, SIG_DESC_SET(SCU400, 24));
-PIN_DECL_1(AB4, GPIO18D0, SD3CLK);
+SIG_EXPR_LIST_DECL_SEMG(AB4, EMMCCLK, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 24));
+PIN_DECL_1(AB4, GPIO18D0, EMMCCLK);
 
 #define AA4 233
-SIG_EXPR_LIST_DECL_SESG(AA4, SD3CMD, SD3, SIG_DESC_SET(SCU400, 25));
-PIN_DECL_1(AA4, GPIO18D1, SD3CMD);
+SIG_EXPR_LIST_DECL_SEMG(AA4, EMMCCMD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 25));
+PIN_DECL_1(AA4, GPIO18D1, EMMCCMD);
 
 #define AC4 234
-SIG_EXPR_LIST_DECL_SESG(AC4, SD3DAT0, SD3, SIG_DESC_SET(SCU400, 26));
-PIN_DECL_1(AC4, GPIO18D2, SD3DAT0);
+SIG_EXPR_LIST_DECL_SEMG(AC4, EMMCDAT0, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 26));
+PIN_DECL_1(AC4, GPIO18D2, EMMCDAT0);
 
 #define AA5 235
-SIG_EXPR_LIST_DECL_SESG(AA5, SD3DAT1, SD3, SIG_DESC_SET(SCU400, 27));
-PIN_DECL_1(AA5, GPIO18D3, SD3DAT1);
+SIG_EXPR_LIST_DECL_SEMG(AA5, EMMCDAT1, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 27));
+PIN_DECL_1(AA5, GPIO18D3, EMMCDAT1);
 
 #define Y5 236
-SIG_EXPR_LIST_DECL_SESG(Y5, SD3DAT2, SD3, SIG_DESC_SET(SCU400, 28));
-PIN_DECL_1(Y5, GPIO18D4, SD3DAT2);
+SIG_EXPR_LIST_DECL_SEMG(Y5, EMMCDAT2, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 28));
+PIN_DECL_1(Y5, GPIO18D4, EMMCDAT2);
 
 #define AB5 237
-SIG_EXPR_LIST_DECL_SESG(AB5, SD3DAT3, SD3, SIG_DESC_SET(SCU400, 29));
-PIN_DECL_1(AB5, GPIO18D5, SD3DAT3);
+SIG_EXPR_LIST_DECL_SEMG(AB5, EMMCDAT3, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 29));
+PIN_DECL_1(AB5, GPIO18D5, EMMCDAT3);
 
 #define AB6 238
-SIG_EXPR_LIST_DECL_SESG(AB6, SD3CD, SD3, SIG_DESC_SET(SCU400, 30));
-PIN_DECL_1(AB6, GPIO18D6, SD3CD);
+SIG_EXPR_LIST_DECL_SEMG(AB6, EMMCCD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 30));
+PIN_DECL_1(AB6, GPIO18D6, EMMCCD);
 
 #define AC5 239
-SIG_EXPR_LIST_DECL_SESG(AC5, SD3WP, SD3, SIG_DESC_SET(SCU400, 31));
-PIN_DECL_1(AC5, GPIO18D7, SD3WP);
+SIG_EXPR_LIST_DECL_SEMG(AC5, EMMCWP, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 31));
+PIN_DECL_1(AC5, GPIO18D7, EMMCWP);
 
-FUNC_GROUP_DECL(SD3, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
+GROUP_DECL(EMMCG1, AB4, AA4, AC4, AB6, AC5);
+GROUP_DECL(EMMCG4, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
 
 #define Y1 240
 SIG_EXPR_LIST_DECL_SEMG(Y1, FWSPIDCS, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y1, VBCS, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y1, SD3DAT4, SD3DAT4, SIG_DESC_SET(SCU404, 0));
-PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, SD3DAT4);
-FUNC_GROUP_DECL(SD3DAT4, Y1);
+SIG_EXPR_LIST_DECL_SEMG(Y1, EMMCDAT4, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 0));
+PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, EMMCDAT4);
 
 #define Y2 241
 SIG_EXPR_LIST_DECL_SEMG(Y2, FWSPIDCK, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y2, VBCK, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y2, SD3DAT5, SD3DAT5, SIG_DESC_SET(SCU404, 1));
-PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, SD3DAT5);
-FUNC_GROUP_DECL(SD3DAT5, Y2);
+SIG_EXPR_LIST_DECL_SEMG(Y2, EMMCDAT5, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 1));
+PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, EMMCDAT5);
 
 #define Y3 242
 SIG_EXPR_LIST_DECL_SEMG(Y3, FWSPIDMOSI, FWSPID, FWSPID,
                        SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y3, VBMOSI, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y3, SD3DAT6, SD3DAT6, SIG_DESC_SET(SCU404, 2));
-PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, SD3DAT6);
-FUNC_GROUP_DECL(SD3DAT6, Y3);
+SIG_EXPR_LIST_DECL_SEMG(Y3, EMMCDAT6, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 2));
+PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, EMMCDAT6);
 
 #define Y4 243
 SIG_EXPR_LIST_DECL_SEMG(Y4, FWSPIDMISO, FWSPID, FWSPID,
                        SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y4, VBMISO, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y4, SD3DAT7, SD3DAT7, SIG_DESC_SET(SCU404, 3));
-PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, SD3DAT7);
-FUNC_GROUP_DECL(SD3DAT7, Y4);
+SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
+PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
 
 GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
 GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
+GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
 FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
 FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
-
+FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
 /*
  * FIXME: Confirm bits and priorities are the right way around for the
  * following 4 pins
  */
 #define AF25 244
-SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20),
-                       SIG_DESC_SET(SCU4D8, 20));
-SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_CLEAR(SCU438, 20),
-                       SIG_DESC_SET(SCU4D8, 20));
+SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20));
+SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_SET(SCU4D8, 20));
 PIN_DECL_(AF25, SIG_EXPR_LIST_PTR(AF25, I3C3SCL),
          SIG_EXPR_LIST_PTR(AF25, FSI1CLK));
 
 #define AE26 245
-SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21),
-                       SIG_DESC_SET(SCU4D8, 21));
-SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_CLEAR(SCU438, 21),
-                       SIG_DESC_SET(SCU4D8, 21));
+SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21));
+SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_SET(SCU4D8, 21));
 PIN_DECL_(AE26, SIG_EXPR_LIST_PTR(AE26, I3C3SDA),
          SIG_EXPR_LIST_PTR(AE26, FSI1DATA));
 
@@ -1533,18 +1527,14 @@ FUNC_DECL_2(I3C3, HVI3C3, I3C3);
 FUNC_GROUP_DECL(FSI1, AF25, AE26);
 
 #define AE25 246
-SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22),
-                       SIG_DESC_SET(SCU4D8, 22));
-SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_CLEAR(SCU438, 22),
-                       SIG_DESC_SET(SCU4D8, 22));
+SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22));
+SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_SET(SCU4D8, 22));
 PIN_DECL_(AE25, SIG_EXPR_LIST_PTR(AE25, I3C4SCL),
          SIG_EXPR_LIST_PTR(AE25, FSI2CLK));
 
 #define AF24 247
-SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23),
-                       SIG_DESC_SET(SCU4D8, 23));
-SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_CLEAR(SCU438, 23),
-                       SIG_DESC_SET(SCU4D8, 23));
+SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23));
+SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_SET(SCU4D8, 23));
 PIN_DECL_(AF24, SIG_EXPR_LIST_PTR(AF24, I3C4SDA),
          SIG_EXPR_LIST_PTR(AF24, FSI2DATA));
 
@@ -1574,6 +1564,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(A3),
        ASPEED_PINCTRL_PIN(AA11),
        ASPEED_PINCTRL_PIN(AA12),
+       ASPEED_PINCTRL_PIN(AA16),
+       ASPEED_PINCTRL_PIN(AA17),
        ASPEED_PINCTRL_PIN(AA23),
        ASPEED_PINCTRL_PIN(AA24),
        ASPEED_PINCTRL_PIN(AA25),
@@ -1585,6 +1577,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AB11),
        ASPEED_PINCTRL_PIN(AB12),
        ASPEED_PINCTRL_PIN(AB15),
+       ASPEED_PINCTRL_PIN(AB16),
+       ASPEED_PINCTRL_PIN(AB17),
        ASPEED_PINCTRL_PIN(AB18),
        ASPEED_PINCTRL_PIN(AB19),
        ASPEED_PINCTRL_PIN(AB22),
@@ -1602,6 +1596,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AC11),
        ASPEED_PINCTRL_PIN(AC12),
        ASPEED_PINCTRL_PIN(AC15),
+       ASPEED_PINCTRL_PIN(AC16),
        ASPEED_PINCTRL_PIN(AC17),
        ASPEED_PINCTRL_PIN(AC18),
        ASPEED_PINCTRL_PIN(AC19),
@@ -1619,6 +1614,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AD12),
        ASPEED_PINCTRL_PIN(AD14),
        ASPEED_PINCTRL_PIN(AD15),
+       ASPEED_PINCTRL_PIN(AD16),
        ASPEED_PINCTRL_PIN(AD19),
        ASPEED_PINCTRL_PIN(AD20),
        ASPEED_PINCTRL_PIN(AD22),
@@ -1634,8 +1630,11 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AE12),
        ASPEED_PINCTRL_PIN(AE14),
        ASPEED_PINCTRL_PIN(AE15),
+       ASPEED_PINCTRL_PIN(AE16),
        ASPEED_PINCTRL_PIN(AE18),
        ASPEED_PINCTRL_PIN(AE19),
+       ASPEED_PINCTRL_PIN(AE25),
+       ASPEED_PINCTRL_PIN(AE26),
        ASPEED_PINCTRL_PIN(AE7),
        ASPEED_PINCTRL_PIN(AE8),
        ASPEED_PINCTRL_PIN(AF10),
@@ -1643,6 +1642,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AF12),
        ASPEED_PINCTRL_PIN(AF14),
        ASPEED_PINCTRL_PIN(AF15),
+       ASPEED_PINCTRL_PIN(AF24),
+       ASPEED_PINCTRL_PIN(AF25),
        ASPEED_PINCTRL_PIN(AF7),
        ASPEED_PINCTRL_PIN(AF8),
        ASPEED_PINCTRL_PIN(AF9),
@@ -1792,17 +1793,6 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(Y3),
        ASPEED_PINCTRL_PIN(Y4),
        ASPEED_PINCTRL_PIN(Y5),
-       ASPEED_PINCTRL_PIN(AB16),
-       ASPEED_PINCTRL_PIN(AA17),
-       ASPEED_PINCTRL_PIN(AB17),
-       ASPEED_PINCTRL_PIN(AE16),
-       ASPEED_PINCTRL_PIN(AC16),
-       ASPEED_PINCTRL_PIN(AA16),
-       ASPEED_PINCTRL_PIN(AD16),
-       ASPEED_PINCTRL_PIN(AF25),
-       ASPEED_PINCTRL_PIN(AE26),
-       ASPEED_PINCTRL_PIN(AE25),
-       ASPEED_PINCTRL_PIN(AF24),
 };
 
 static const struct aspeed_pin_group aspeed_g6_groups[] = {
@@ -1976,11 +1966,9 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
        ASPEED_PINCTRL_GROUP(SALT9G1),
        ASPEED_PINCTRL_GROUP(SD1),
        ASPEED_PINCTRL_GROUP(SD2),
-       ASPEED_PINCTRL_GROUP(SD3),
-       ASPEED_PINCTRL_GROUP(SD3DAT4),
-       ASPEED_PINCTRL_GROUP(SD3DAT5),
-       ASPEED_PINCTRL_GROUP(SD3DAT6),
-       ASPEED_PINCTRL_GROUP(SD3DAT7),
+       ASPEED_PINCTRL_GROUP(EMMCG1),
+       ASPEED_PINCTRL_GROUP(EMMCG4),
+       ASPEED_PINCTRL_GROUP(EMMCG8),
        ASPEED_PINCTRL_GROUP(SGPM1),
        ASPEED_PINCTRL_GROUP(SGPS1),
        ASPEED_PINCTRL_GROUP(SIOONCTRL),
@@ -2059,6 +2047,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
        ASPEED_PINCTRL_FUNC(ADC8),
        ASPEED_PINCTRL_FUNC(ADC9),
        ASPEED_PINCTRL_FUNC(BMCINT),
+       ASPEED_PINCTRL_FUNC(EMMC),
        ASPEED_PINCTRL_FUNC(ESPI),
        ASPEED_PINCTRL_FUNC(ESPIALT),
        ASPEED_PINCTRL_FUNC(FSI1),
@@ -2191,11 +2180,6 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
        ASPEED_PINCTRL_FUNC(SALT9),
        ASPEED_PINCTRL_FUNC(SD1),
        ASPEED_PINCTRL_FUNC(SD2),
-       ASPEED_PINCTRL_FUNC(SD3),
-       ASPEED_PINCTRL_FUNC(SD3DAT4),
-       ASPEED_PINCTRL_FUNC(SD3DAT5),
-       ASPEED_PINCTRL_FUNC(SD3DAT6),
-       ASPEED_PINCTRL_FUNC(SD3DAT7),
        ASPEED_PINCTRL_FUNC(SGPM1),
        ASPEED_PINCTRL_FUNC(SGPS1),
        ASPEED_PINCTRL_FUNC(SIOONCTRL),
index a2c0d52..140c5ce 100644 (file)
@@ -508,7 +508,7 @@ struct aspeed_pin_desc {
  * @idx: The bit index in the register
  */
 #define SIG_DESC_SET(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 1)
-#define SIG_DESC_CLEAR(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 0)
+#define SIG_DESC_CLEAR(reg, idx) { ASPEED_IP_SCU, reg, BIT_MASK(idx), 0, 0 }
 
 #define SIG_DESC_LIST_SYM(sig, group) sig_descs_ ## sig ## _ ## group
 #define SIG_DESC_LIST_DECL(sig, group, ...) \
@@ -738,6 +738,7 @@ struct aspeed_pin_desc {
        static const char *FUNC_SYM(func)[] = { __VA_ARGS__ }
 
 #define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two)
+#define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three)
 
 #define FUNC_GROUP_DECL(func, ...) \
        GROUP_DECL(func, __VA_ARGS__); \
index 6f7d3a2..42f7ab3 100644 (file)
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 /*
@@ -853,7 +845,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
 
        /* optional GPIO interrupt support */
        irq = platform_get_irq(pdev, 0);
-       if (irq) {
+       if (irq > 0) {
                struct irq_chip *irqc;
                struct gpio_irq_chip *girq;
 
index 2bf6af7..9fabc45 100644 (file)
@@ -640,8 +640,8 @@ static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev,
        const struct ns2_pin_function *func;
        const struct ns2_pin_group *grp;
 
-       if (grp_select > pinctrl->num_groups ||
-               func_select > pinctrl->num_functions)
+       if (grp_select >= pinctrl->num_groups ||
+               func_select >= pinctrl->num_functions)
                return -EINVAL;
 
        func = &pinctrl->functions[func_select];
index 44f8ccd..9dfdc27 100644 (file)
@@ -43,7 +43,7 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
                        BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */
                        BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */
                        BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */
-                       BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "spdifib"), /* SPDIFIB */
                        BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
                        BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */
        BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x12,
index aae51c5..c6251ea 100644 (file)
@@ -1513,7 +1513,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1521,7 +1520,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "HP"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1529,7 +1527,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1537,7 +1534,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {}
index 1f13bcd..bc01359 100644 (file)
@@ -96,6 +96,7 @@ struct intel_pinctrl_context {
  * @pctldesc: Pin controller description
  * @pctldev: Pointer to the pin controller device
  * @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
  * @soc: SoC/PCH specific pin configuration data
  * @communities: All communities in this pin controller
  * @ncommunities: Number of communities in this pin controller
@@ -108,6 +109,7 @@ struct intel_pinctrl {
        struct pinctrl_desc pctldesc;
        struct pinctrl_dev *pctldev;
        struct gpio_chip chip;
+       struct irq_chip irqchip;
        const struct intel_pinctrl_soc_data *soc;
        struct intel_community *communities;
        size_t ncommunities;
@@ -1139,16 +1141,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
        return ret;
 }
 
-static struct irq_chip intel_gpio_irqchip = {
-       .name = "intel-gpio",
-       .irq_ack = intel_gpio_irq_ack,
-       .irq_mask = intel_gpio_irq_mask,
-       .irq_unmask = intel_gpio_irq_unmask,
-       .irq_set_type = intel_gpio_irq_type,
-       .irq_set_wake = intel_gpio_irq_wake,
-       .flags = IRQCHIP_MASK_ON_SUSPEND,
-};
-
 static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
                                     const struct intel_community *community)
 {
@@ -1198,12 +1190,22 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
 
        pctrl->chip = intel_gpio_chip;
 
+       /* Setup GPIO chip */
        pctrl->chip.ngpio = intel_gpio_ngpio(pctrl);
        pctrl->chip.label = dev_name(pctrl->dev);
        pctrl->chip.parent = pctrl->dev;
        pctrl->chip.base = -1;
        pctrl->irq = irq;
 
+       /* Setup IRQ chip */
+       pctrl->irqchip.name = dev_name(pctrl->dev);
+       pctrl->irqchip.irq_ack = intel_gpio_irq_ack;
+       pctrl->irqchip.irq_mask = intel_gpio_irq_mask;
+       pctrl->irqchip.irq_unmask = intel_gpio_irq_unmask;
+       pctrl->irqchip.irq_set_type = intel_gpio_irq_type;
+       pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
+       pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
        ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
        if (ret) {
                dev_err(pctrl->dev, "failed to register gpiochip\n");
@@ -1233,15 +1235,14 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
                return ret;
        }
 
-       ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
+       ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0,
                                   handle_bad_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add irqchip\n");
                return ret;
        }
 
-       gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
-                                    NULL);
+       gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL);
        return 0;
 }
 
index 6462d3c..f2f5fcd 100644 (file)
@@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
        PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
                      BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
                      18, 2, "gpio", "uart"),
-       PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
-       PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
-       PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
-       PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
+       PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
+       PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
+       PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
+       PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
 
 };
 
@@ -221,11 +221,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
 };
 
 static inline void armada_37xx_update_reg(unsigned int *reg,
-                                         unsigned int offset)
+                                         unsigned int *offset)
 {
        /* We never have more than 2 registers */
-       if (offset >= GPIO_PER_REG) {
-               offset -= GPIO_PER_REG;
+       if (*offset >= GPIO_PER_REG) {
+               *offset -= GPIO_PER_REG;
                *reg += sizeof(u32);
        }
 }
@@ -376,7 +376,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
 {
        int offset = irqd_to_hwirq(d);
 
-       armada_37xx_update_reg(reg, offset);
+       armada_37xx_update_reg(reg, &offset);
 }
 
 static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
@@ -386,7 +386,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        return regmap_update_bits(info->regmap, reg, mask, 0);
@@ -399,7 +399,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int val, mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
        regmap_read(info->regmap, reg, &val);
 
@@ -413,7 +413,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int mask, val, ret;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        ret = regmap_update_bits(info->regmap, reg, mask, mask);
@@ -434,7 +434,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
        unsigned int reg = INPUT_VAL;
        unsigned int val, mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        regmap_read(info->regmap, reg, &val);
@@ -449,7 +449,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
        unsigned int reg = OUTPUT_VAL;
        unsigned int mask, val;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
        val = value ? mask : 0;
 
index 9749737..5646600 100644 (file)
@@ -705,7 +705,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
 
 static int stmfx_pinctrl_remove(struct platform_device *pdev)
 {
-       struct stmfx *stmfx = dev_get_platdata(&pdev->dev);
+       struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
 
        return stmfx_function_disable(stmfx,
                                      STMFX_FUNC_GPIO |
index afe9447..a46be22 100644 (file)
@@ -5053,6 +5053,19 @@ regulator_register(const struct regulator_desc *regulator_desc,
 
        init_data = regulator_of_get_init_data(dev, regulator_desc, config,
                                               &rdev->dev.of_node);
+
+       /*
+        * Sometimes not all resources are probed already so we need to take
+        * that into account. This happens most the time if the ena_gpiod comes
+        * from a gpio extender or something else.
+        */
+       if (PTR_ERR(init_data) == -EPROBE_DEFER) {
+               kfree(config);
+               kfree(rdev);
+               ret = -EPROBE_DEFER;
+               goto rinse;
+       }
+
        /*
         * We need to keep track of any GPIO descriptor coming from the
         * device tree until we have handled it over to the core. If the
index 56f3f72..710e670 100644 (file)
@@ -136,7 +136,6 @@ static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
 static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
 {
        struct da9062_regulator *regl = rdev_get_drvdata(rdev);
-       struct regmap_field *field;
        unsigned int val, mode = 0;
        int ret;
 
@@ -158,18 +157,7 @@ static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
                return REGULATOR_MODE_NORMAL;
        }
 
-       /* Detect current regulator state */
-       ret = regmap_field_read(regl->suspend, &val);
-       if (ret < 0)
-               return 0;
-
-       /* Read regulator mode from proper register, depending on state */
-       if (val)
-               field = regl->suspend_sleep;
-       else
-               field = regl->sleep;
-
-       ret = regmap_field_read(field, &val);
+       ret = regmap_field_read(regl->sleep, &val);
        if (ret < 0)
                return 0;
 
@@ -208,21 +196,9 @@ static int da9062_ldo_set_mode(struct regulator_dev *rdev, unsigned mode)
 static unsigned da9062_ldo_get_mode(struct regulator_dev *rdev)
 {
        struct da9062_regulator *regl = rdev_get_drvdata(rdev);
-       struct regmap_field *field;
        int ret, val;
 
-       /* Detect current regulator state */
-       ret = regmap_field_read(regl->suspend, &val);
-       if (ret < 0)
-               return 0;
-
-       /* Read regulator mode from proper register, depending on state */
-       if (val)
-               field = regl->suspend_sleep;
-       else
-               field = regl->sleep;
-
-       ret = regmap_field_read(field, &val);
+       ret = regmap_field_read(regl->sleep, &val);
        if (ret < 0)
                return 0;
 
@@ -408,10 +384,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9061_ID_BUCK2,
@@ -444,10 +420,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9061_ID_BUCK3,
@@ -480,10 +456,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9061_ID_LDO1,
@@ -509,10 +485,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO1_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -542,10 +518,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO2_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -575,10 +551,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO3_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -608,10 +584,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO4_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -652,10 +628,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_BUCK2,
@@ -688,10 +664,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK2_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK2_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK2_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK2_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK2_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK2_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK2_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_BUCK3,
@@ -724,10 +700,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_BUCK4,
@@ -760,10 +736,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_LDO1,
@@ -789,10 +765,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO1_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -822,10 +798,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO2_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -855,10 +831,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO3_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -888,10 +864,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO4_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
index d90a6fd..f815330 100644 (file)
@@ -144,8 +144,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct fixed_voltage_config *config;
        struct fixed_voltage_data *drvdata;
-       const struct fixed_dev_type *drvtype =
-               of_match_device(dev->driver->of_match_table, dev)->data;
+       const struct fixed_dev_type *drvtype = of_device_get_match_data(dev);
        struct regulator_config cfg = { };
        enum gpiod_flags gflags;
        int ret;
@@ -177,7 +176,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        drvdata->desc.type = REGULATOR_VOLTAGE;
        drvdata->desc.owner = THIS_MODULE;
 
-       if (drvtype->has_enable_clock) {
+       if (drvtype && drvtype->has_enable_clock) {
                drvdata->desc.ops = &fixed_voltage_clkenabled_ops;
 
                drvdata->enable_clock = devm_clk_get(dev, NULL);
index ff97cc5..9b05e03 100644 (file)
@@ -210,6 +210,7 @@ static const struct regulator_desc lochnagar_regulators[] = {
 
                .enable_time = 3000,
                .ramp_delay = 1000,
+               .off_on_delay = 15000,
 
                .owner = THIS_MODULE,
        },
index afefb29..87637eb 100644 (file)
@@ -231,12 +231,12 @@ static int of_get_regulation_constraints(struct device *dev,
                                        "regulator-off-in-suspend"))
                        suspend_state->enabled = DISABLE_IN_SUSPEND;
 
-               if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
-                                         &pval))
+               if (!of_property_read_u32(suspend_np,
+                               "regulator-suspend-min-microvolt", &pval))
                        suspend_state->min_uV = pval;
 
-               if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
-                                         &pval))
+               if (!of_property_read_u32(suspend_np,
+                               "regulator-suspend-max-microvolt", &pval))
                        suspend_state->max_uV = pval;
 
                if (!of_property_read_u32(suspend_np,
@@ -445,11 +445,20 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
                goto error;
        }
 
-       if (desc->of_parse_cb && desc->of_parse_cb(child, desc, config)) {
-               dev_err(dev,
-                       "driver callback failed to parse DT for regulator %pOFn\n",
-                       child);
-               goto error;
+       if (desc->of_parse_cb) {
+               int ret;
+
+               ret = desc->of_parse_cb(child, desc, config);
+               if (ret) {
+                       if (ret == -EPROBE_DEFER) {
+                               of_node_put(child);
+                               return ERR_PTR(-EPROBE_DEFER);
+                       }
+                       dev_err(dev,
+                               "driver callback failed to parse DT for regulator %pOFn\n",
+                               child);
+                       goto error;
+               }
        }
 
        *node = child;
index df5df1c..6895379 100644 (file)
@@ -788,7 +788,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
 
                /* SW2~SW4 high bit check and modify the voltage value table */
                if (i >= sw_check_start && i <= sw_check_end) {
-                       regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
+                       ret = regmap_read(pfuze_chip->regmap,
+                                               desc->vsel_reg, &val);
+                       if (ret) {
+                               dev_err(&client->dev, "Fails to read from the register.\n");
+                               return ret;
+                       }
+
                        if (val & sw_hi) {
                                if (pfuze_chip->chip_id == PFUZE3000 ||
                                        pfuze_chip->chip_id == PFUZE3001) {
index db6c085..0246b6f 100644 (file)
@@ -735,8 +735,8 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
 static const struct rpmh_vreg_hw_data pmic5_bob = {
        .regulator_type = VRM,
        .ops = &rpmh_regulator_vrm_bypass_ops,
-       .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 135, 32000),
-       .n_voltages = 136,
+       .voltage_range = REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000),
+       .n_voltages = 32,
        .pmic_mode_map = pmic_mode_map_pmic5_bob,
        .of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
 };
index cced1ff..89b9314 100644 (file)
@@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb)
        while (timeout++ <= abb->settling_time) {
                status = ti_abb_check_txdone(abb);
                if (status)
-                       break;
+                       return 0;
 
                udelay(1);
        }
 
-       if (timeout > abb->settling_time) {
-               dev_warn_ratelimited(dev,
-                                    "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
-                                    __func__, timeout, readl(abb->int_base));
-               return -ETIMEDOUT;
-       }
-
-       return 0;
+       dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+                            __func__, timeout, readl(abb->int_base));
+       return -ETIMEDOUT;
 }
 
 /**
@@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, const struct ti_abb *abb)
 
                status = ti_abb_check_txdone(abb);
                if (!status)
-                       break;
+                       return 0;
 
                udelay(1);
        }
 
-       if (timeout > abb->settling_time) {
-               dev_warn_ratelimited(dev,
-                                    "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
-                                    __func__, timeout, readl(abb->int_base));
-               return -ETIMEDOUT;
-       }
-
-       return 0;
+       dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+                            __func__, timeout, readl(abb->int_base));
+       return -ETIMEDOUT;
 }
 
 /**
index a58b45d..2a34a2a 100644 (file)
@@ -82,6 +82,7 @@ enum qdio_irq_states {
 #define QDIO_SIGA_WRITE                0x00
 #define QDIO_SIGA_READ         0x01
 #define QDIO_SIGA_SYNC         0x02
+#define QDIO_SIGA_WRITEM       0x03
 #define QDIO_SIGA_WRITEQ       0x04
 #define QDIO_SIGA_QEBSM_FLAG   0x80
 
index 5b63c50..7368407 100644 (file)
@@ -310,18 +310,19 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
                return qdio_siga_sync(q, q->mask, 0);
 }
 
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
-       unsigned long aob)
+static int qdio_siga_output(struct qdio_q *q, unsigned int count,
+                           unsigned int *busy_bit, unsigned long aob)
 {
        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
        unsigned int fc = QDIO_SIGA_WRITE;
        u64 start_time = 0;
        int retries = 0, cc;
-       unsigned long laob = 0;
 
-       if (aob) {
-               fc = QDIO_SIGA_WRITEQ;
-               laob = aob;
+       if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
+               if (count > 1)
+                       fc = QDIO_SIGA_WRITEM;
+               else if (aob)
+                       fc = QDIO_SIGA_WRITEQ;
        }
 
        if (is_qebsm(q)) {
@@ -329,7 +330,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
                fc |= QDIO_SIGA_QEBSM_FLAG;
        }
 again:
-       cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
+       cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
 
        /* hipersocket busy condition */
        if (unlikely(*busy_bit)) {
@@ -781,7 +782,8 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
        return count;
 }
 
-static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
+                               unsigned long aob)
 {
        int retries = 0, cc;
        unsigned int busy_bit;
@@ -793,7 +795,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
 retry:
        qperf_inc(q, siga_write);
 
-       cc = qdio_siga_output(q, &busy_bit, aob);
+       cc = qdio_siga_output(q, count, &busy_bit, aob);
        switch (cc) {
        case 0:
                break;
@@ -1526,7 +1528,7 @@ set:
  * @count: how many buffers are filled
  */
 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
-                          int bufnr, int count)
+                          unsigned int bufnr, unsigned int count)
 {
        const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
        unsigned char state = 0;
@@ -1549,13 +1551,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
        if (queue_type(q) == QDIO_IQDIO_QFMT) {
                unsigned long phys_aob = 0;
 
-               /* One SIGA-W per buffer required for unicast HSI */
-               WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
-
-               if (q->u.out.use_cq)
+               if (q->u.out.use_cq && count == 1)
                        phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
 
-               rc = qdio_kick_outbound_q(q, phys_aob);
+               rc = qdio_kick_outbound_q(q, count, phys_aob);
        } else if (need_siga_sync(q)) {
                rc = qdio_siga_sync_q(q);
        } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
@@ -1564,7 +1563,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
                /* The previous buffer is not processed yet, tack on. */
                qperf_inc(q, fast_requeue);
        } else {
-               rc = qdio_kick_outbound_q(q, 0);
+               rc = qdio_kick_outbound_q(q, count, 0);
        }
 
        /* Let drivers implement their own completion scanning: */
index 45bdb47..9157e72 100644 (file)
@@ -522,8 +522,7 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
        if (filp->f_inode->i_cdev == &zcrypt_cdev) {
                struct zcdn_device *zcdndev;
 
-               if (mutex_lock_interruptible(&ap_perms_mutex))
-                       return -ERESTARTSYS;
+               mutex_lock(&ap_perms_mutex);
                zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
                mutex_unlock(&ap_perms_mutex);
                if (zcdndev) {
index e4b55f9..d081545 100644 (file)
@@ -532,6 +532,8 @@ struct qeth_qdio_out_q {
        struct timer_list timer;
        struct qeth_hdr *prev_hdr;
        u8 bulk_start;
+       u8 bulk_count;
+       u8 bulk_max;
 };
 
 #define qeth_for_each_output_queue(card, q, i)         \
@@ -878,6 +880,13 @@ static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
        return txq;
 }
 
+static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card,
+                                          struct qeth_qdio_out_q *queue)
+{
+       return qeth_iqd_translate_txq(card->dev, queue->queue_no) ==
+              QETH_IQD_MCAST_TXQ;
+}
+
 static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
                                          unsigned int elements)
 {
index dda2743..9e8bd8e 100644 (file)
@@ -1513,7 +1513,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
        rc = qeth_clear_halt_card(card, use_halt);
        if (rc)
                QETH_CARD_TEXT_(card, 3, "2err%d", rc);
-       card->state = CARD_STATE_DOWN;
        return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
@@ -2634,6 +2633,18 @@ static int qeth_init_input_buffer(struct qeth_card *card,
        return 0;
 }
 
+static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
+                                           struct qeth_qdio_out_q *queue)
+{
+       if (!IS_IQD(card) ||
+           qeth_iqd_is_mcast_queue(card, queue) ||
+           card->options.cq == QETH_CQ_ENABLED ||
+           qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
+               return 1;
+
+       return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
+}
+
 int qeth_init_qdio_queues(struct qeth_card *card)
 {
        unsigned int i;
@@ -2673,6 +2684,8 @@ int qeth_init_qdio_queues(struct qeth_card *card)
                queue->do_pack = 0;
                queue->prev_hdr = NULL;
                queue->bulk_start = 0;
+               queue->bulk_count = 0;
+               queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
                atomic_set(&queue->used_buffers, 0);
                atomic_set(&queue->set_pci_flags_count, 0);
                atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
@@ -3107,7 +3120,7 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
                for (i = queue->next_buf_to_init;
                     i < queue->next_buf_to_init + count; ++i) {
                        if (qeth_init_input_buffer(card,
-                               &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
+                               &queue->bufs[QDIO_BUFNR(i)])) {
                                break;
                        } else {
                                newcount++;
@@ -3149,8 +3162,8 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
                if (rc) {
                        QETH_CARD_TEXT(card, 2, "qinberr");
                }
-               queue->next_buf_to_init = (queue->next_buf_to_init + count) %
-                                         QDIO_MAX_BUFFERS_PER_Q;
+               queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
+                                                    count);
        }
 }
 
@@ -3198,7 +3211,7 @@ static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
                /* it's a packing buffer */
                atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
                queue->next_buf_to_fill =
-                       (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
+                       QDIO_BUFNR(queue->next_buf_to_fill + 1);
                return 1;
        }
        return 0;
@@ -3252,7 +3265,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
        unsigned int qdio_flags;
 
        for (i = index; i < index + count; ++i) {
-               int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+               unsigned int bidx = QDIO_BUFNR(i);
+
                buf = queue->bufs[bidx];
                buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
                                SBAL_EFLAGS_LAST_ENTRY;
@@ -3318,10 +3332,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
 
 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
 {
-       qeth_flush_buffers(queue, queue->bulk_start, 1);
+       qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
 
-       queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
+       queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
        queue->prev_hdr = NULL;
+       queue->bulk_count = 0;
 }
 
 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
@@ -3419,8 +3434,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
        }
 
        for (i = first_element; i < first_element + count; ++i) {
-               int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
-               struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
+               struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
                int e = 0;
 
                while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
@@ -3441,8 +3455,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
                        "QDIO reported an error, rc=%i\n", rc);
                QETH_CARD_TEXT(card, 2, "qcqherr");
        }
-       card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
-                                  + count) % QDIO_MAX_BUFFERS_PER_Q;
+
+       cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
 }
 
 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
@@ -3468,7 +3482,6 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
 {
        struct qeth_card *card        = (struct qeth_card *) card_ptr;
        struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
-       struct qeth_qdio_out_buffer *buffer;
        struct net_device *dev = card->dev;
        struct netdev_queue *txq;
        int i;
@@ -3482,10 +3495,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
        }
 
        for (i = first_element; i < (first_element + count); ++i) {
-               int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
-               buffer = queue->bufs[bidx];
-               qeth_handle_send_error(card, buffer, qdio_error);
-               qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
+               struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
+
+               qeth_handle_send_error(card, buf, qdio_error);
+               qeth_clear_output_buffer(queue, buf, qdio_error, 0);
        }
 
        atomic_sub(count, &queue->used_buffers);
@@ -3680,10 +3693,10 @@ check_layout:
 }
 
 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
-                             struct qeth_qdio_out_buffer *buffer,
                              struct sk_buff *curr_skb,
                              struct qeth_hdr *curr_hdr)
 {
+       struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
        struct qeth_hdr *prev_hdr = queue->prev_hdr;
 
        if (!prev_hdr)
@@ -3803,13 +3816,14 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
                       struct qeth_hdr *hdr, unsigned int offset,
                       unsigned int hd_len)
 {
-       struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
        unsigned int bytes = qdisc_pkt_len(skb);
+       struct qeth_qdio_out_buffer *buffer;
        unsigned int next_element;
        struct netdev_queue *txq;
        bool stopped = false;
        bool flush;
 
+       buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
        txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
 
        /* Just a sanity check, the wake/stop logic should ensure that we always
@@ -3818,11 +3832,23 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
        if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
                return -EBUSY;
 
-       if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
-           !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
-               atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
-               qeth_flush_queue(queue);
-               buffer = queue->bufs[queue->bulk_start];
+       flush = !qeth_iqd_may_bulk(queue, skb, hdr);
+
+       if (flush ||
+           (buffer->next_element_to_fill + elements > queue->max_elements)) {
+               if (buffer->next_element_to_fill > 0) {
+                       atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+                       queue->bulk_count++;
+               }
+
+               if (queue->bulk_count >= queue->bulk_max)
+                       flush = true;
+
+               if (flush)
+                       qeth_flush_queue(queue);
+
+               buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
+                                               queue->bulk_count)];
 
                /* Sanity-check again: */
                if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
@@ -3848,7 +3874,13 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
 
        if (flush || next_element >= queue->max_elements) {
                atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
-               qeth_flush_queue(queue);
+               queue->bulk_count++;
+
+               if (queue->bulk_count >= queue->bulk_max)
+                       flush = true;
+
+               if (flush)
+                       qeth_flush_queue(queue);
        }
 
        if (stopped && !qeth_out_queue_is_full(queue))
@@ -3898,8 +3930,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
                        atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
                        flush_count++;
                        queue->next_buf_to_fill =
-                               (queue->next_buf_to_fill + 1) %
-                               QDIO_MAX_BUFFERS_PER_Q;
+                               QDIO_BUFNR(queue->next_buf_to_fill + 1);
                        buffer = queue->bufs[queue->next_buf_to_fill];
 
                        /* We stepped forward, so sanity-check again: */
@@ -3932,8 +3963,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
        if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
                flush_count++;
                atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
-               queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
-                                         QDIO_MAX_BUFFERS_PER_Q;
+               queue->next_buf_to_fill =
+                               QDIO_BUFNR(queue->next_buf_to_fill + 1);
        }
 
        if (flush_count)
@@ -4261,7 +4292,6 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
        }
        return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
 
 void qeth_tx_timeout(struct net_device *dev)
 {
@@ -4977,6 +5007,15 @@ retriable:
                        goto out;
                }
        }
+
+       if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
+           (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
+               card->info.hwtrap = 0;
+
+       rc = qeth_set_access_ctrl_online(card, 0);
+       if (rc)
+               goto out;
+
        return 0;
 out:
        dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
@@ -5165,8 +5204,7 @@ int qeth_poll(struct napi_struct *napi, int budget)
                                card->rx.b_count--;
                                if (card->rx.b_count) {
                                        card->rx.b_index =
-                                               (card->rx.b_index + 1) %
-                                               QDIO_MAX_BUFFERS_PER_Q;
+                                               QDIO_BUFNR(card->rx.b_index + 1);
                                        card->rx.b_element =
                                                &card->qdio.in_q
                                                ->bufs[card->rx.b_index]
@@ -5182,9 +5220,9 @@ int qeth_poll(struct napi_struct *napi, int budget)
                }
        }
 
-       napi_complete_done(napi, work_done);
-       if (qdio_start_irq(card->data.ccwdev, 0))
-               napi_schedule(&card->napi);
+       if (napi_complete_done(napi, work_done) &&
+           qdio_start_irq(CARD_DDEV(card), 0))
+               napi_schedule(napi);
 out:
        return work_done;
 }
index 6420b58..9ad0d6f 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <asm/qeth.h>
 #include <uapi/linux/if_ether.h>
+#include <uapi/linux/in6.h>
 
 #define IPA_PDU_HEADER_SIZE    0x40
 #define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
@@ -365,8 +366,7 @@ struct qeth_ipacmd_setdelip6 {
 struct qeth_ipacmd_setdelipm {
        __u8 mac[6];
        __u8 padding[2];
-       __u8 ip6[12];
-       __u8 ip4[4];
+       struct in6_addr ip;
 } __attribute__ ((packed));
 
 struct qeth_ipacmd_layer2setdelmac {
index bd8143e..8f3093d 100644 (file)
@@ -759,14 +759,6 @@ add_napi:
        return rc;
 }
 
-static int qeth_l2_start_ipassists(struct qeth_card *card)
-{
-       /* configure isolation level */
-       if (qeth_set_access_ctrl_online(card, 0))
-               return -ENODEV;
-       return 0;
-}
-
 static void qeth_l2_trace_features(struct qeth_card *card)
 {
        /* Set BridgePort features */
@@ -797,13 +789,6 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
                goto out_remove;
        }
 
-       if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
-               if (card->info.hwtrap &&
-                   qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
-                       card->info.hwtrap = 0;
-       } else
-               card->info.hwtrap = 0;
-
        qeth_bridgeport_query_support(card);
        if (card->options.sbp.supported_funcs)
                dev_info(&card->gdev->dev,
@@ -825,12 +810,6 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
        /* softsetup */
        QETH_CARD_TEXT(card, 2, "softsetp");
 
-       if (IS_OSD(card) || IS_OSX(card)) {
-               rc = qeth_l2_start_ipassists(card);
-               if (rc)
-                       goto out_remove;
-       }
-
        rc = qeth_init_qdio_queues(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "6err%d", rc);
index 87659cf..ba913d1 100644 (file)
@@ -24,7 +24,6 @@ enum qeth_ip_types {
 struct qeth_ipaddr {
        struct hlist_node hnode;
        enum qeth_ip_types type;
-       unsigned char mac[ETH_ALEN];
        u8 is_multicast:1;
        u8 in_progress:1;
        u8 disp_flag:2;
@@ -37,7 +36,7 @@ struct qeth_ipaddr {
        enum qeth_prot_versions proto;
        union {
                struct {
-                       unsigned int addr;
+                       __be32 addr;
                        unsigned int mask;
                } a4;
                struct {
@@ -74,12 +73,10 @@ static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
         * so 'proto' and 'addr' match for sure.
         *
         * For ucast:
-        * -    'mac' is always 0.
         * -    'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
         *      values are required to avoid mixups in takeover eligibility.
         *
         * For mcast,
-        * -    'mac' is mapped from the IP, and thus always matches.
         * -    'mask'/'pfxlen' is always 0.
         */
        if (a1->type != a2->type)
@@ -89,21 +86,12 @@ static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
        return a1->u.a4.mask == a2->u.a4.mask;
 }
 
-static inline  u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
+static inline u32 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
 {
-       u64  ret = 0;
-       u8 *point;
-
-       if (addr->proto == QETH_PROT_IPV6) {
-               point = (u8 *) &addr->u.a6.addr;
-               ret = get_unaligned((u64 *)point) ^
-                       get_unaligned((u64 *) (point + 8));
-       }
-       if (addr->proto == QETH_PROT_IPV4) {
-               point = (u8 *) &addr->u.a4.addr;
-               ret = get_unaligned((u32 *) point);
-       }
-       return ret;
+       if (addr->proto == QETH_PROT_IPV6)
+               return ipv6_addr_hash(&addr->u.a6.addr);
+       else
+               return ipv4_addr_hash(addr->u.a4.addr);
 }
 
 struct qeth_ipato_entry {
index d7bfc7a..70d4586 100644 (file)
@@ -76,7 +76,7 @@ static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)
 static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
                                                   struct qeth_ipaddr *query)
 {
-       u64 key = qeth_l3_ipaddr_hash(query);
+       u32 key = qeth_l3_ipaddr_hash(query);
        struct qeth_ipaddr *addr;
 
        if (query->is_multicast) {
@@ -381,12 +381,13 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
        if (!iob)
                return -ENOMEM;
        cmd = __ipa_cmd(iob);
-       ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
-       if (addr->proto == QETH_PROT_IPV6)
-               memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
-                      sizeof(struct in6_addr));
-       else
-               memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
+       if (addr->proto == QETH_PROT_IPV6) {
+               cmd->data.setdelipm.ip = addr->u.a6.addr;
+               ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac);
+       } else {
+               cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr;
+               ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac);
+       }
 
        return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
 }
@@ -953,8 +954,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
 {
        QETH_CARD_TEXT(card, 3, "strtipas");
 
-       if (qeth_set_access_ctrl_online(card, 0))
-               return -EIO;
        qeth_l3_start_ipa_arp_processing(card); /* go on*/
        qeth_l3_start_ipa_source_mac(card);     /* go on*/
        qeth_l3_start_ipa_vlan(card);           /* go on*/
@@ -1129,8 +1128,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 
        for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
             im4 = rcu_dereference(im4->next_rcu)) {
-               ip_eth_mc_map(im4->multiaddr, tmp->mac);
-               tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
+               tmp->u.a4.addr = im4->multiaddr;
                tmp->is_multicast = 1;
 
                ipm = qeth_l3_find_addr_by_ip(card, tmp);
@@ -1141,8 +1139,8 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
                        ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
                        if (!ipm)
                                continue;
-                       ether_addr_copy(ipm->mac, tmp->mac);
-                       ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
+
+                       ipm->u.a4.addr = im4->multiaddr;
                        ipm->is_multicast = 1;
                        ipm->disp_flag = QETH_DISP_ADDR_ADD;
                        hash_add(card->ip_mc_htable,
@@ -1209,9 +1207,7 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
                return;
 
        for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
-               ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
-               memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
-                      sizeof(struct in6_addr));
+               tmp->u.a6.addr = im6->mca_addr;
                tmp->is_multicast = 1;
 
                ipm = qeth_l3_find_addr_by_ip(card, tmp);
@@ -1225,9 +1221,7 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
                if (!ipm)
                        continue;
 
-               ether_addr_copy(ipm->mac, tmp->mac);
-               memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
-                      sizeof(struct in6_addr));
+               ipm->u.a6.addr = im6->mca_addr;
                ipm->is_multicast = 1;
                ipm->disp_flag = QETH_DISP_ADDR_ADD;
                hash_add(card->ip_mc_htable,
@@ -2313,13 +2307,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
                goto out_remove;
        }
 
-       if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
-               if (card->info.hwtrap &&
-                   qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
-                       card->info.hwtrap = 0;
-       } else
-               card->info.hwtrap = 0;
-
        card->state = CARD_STATE_HARDSETUP;
        qeth_print_status_message(card);
 
@@ -2557,7 +2544,7 @@ static int qeth_l3_ip_event(struct notifier_block *this,
        QETH_CARD_TEXT(card, 3, "ipevent");
 
        qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
-       addr.u.a4.addr = be32_to_cpu(ifa->ifa_address);
+       addr.u.a4.addr = ifa->ifa_address;
        addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
 
        return qeth_l3_handle_ip_event(card, &addr, event);
index 1b92f3c..90cf469 100644 (file)
@@ -898,7 +898,7 @@ config SCSI_SNI_53C710
 
 config 53C700_LE_ON_BE
        bool
-       depends on SCSI_LASI700
+       depends on SCSI_LASI700 || SCSI_SNI_53C710
        default y
 
 config SCSI_STEX
index 5f8153c..76751d6 100644 (file)
@@ -579,7 +579,6 @@ ch_release(struct inode *inode, struct file *file)
        scsi_changer *ch = file->private_data;
 
        scsi_device_put(ch->device);
-       ch->device = NULL;
        file->private_data = NULL;
        kref_put(&ch->ref, ch_destroy);
        return 0;
index 4971104..f32da0c 100644 (file)
@@ -512,6 +512,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
        unsigned int tpg_desc_tbl_off;
        unsigned char orig_transition_tmo;
        unsigned long flags;
+       bool transitioning_sense = false;
 
        if (!pg->expiry) {
                unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
@@ -572,13 +573,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                        goto retry;
                }
                /*
-                * Retry on ALUA state transition or if any
-                * UNIT ATTENTION occurred.
+                * If the array returns with 'ALUA state transition'
+                * sense code here it cannot return RTPG data during
+                * transition. So set the state to 'transitioning' directly.
                 */
                if (sense_hdr.sense_key == NOT_READY &&
-                   sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
-                       err = SCSI_DH_RETRY;
-               else if (sense_hdr.sense_key == UNIT_ATTENTION)
+                   sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
+                       transitioning_sense = true;
+                       goto skip_rtpg;
+               }
+               /*
+                * Retry on any other UNIT ATTENTION occurred.
+                */
+               if (sense_hdr.sense_key == UNIT_ATTENTION)
                        err = SCSI_DH_RETRY;
                if (err == SCSI_DH_RETRY &&
                    pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
@@ -666,7 +673,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                off = 8 + (desc[7] * 4);
        }
 
+ skip_rtpg:
        spin_lock_irqsave(&pg->lock, flags);
+       if (transitioning_sense)
+               pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+
        sdev_printk(KERN_INFO, sdev,
                    "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
                    ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
index ac39ed7..216e557 100644 (file)
@@ -5477,6 +5477,8 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
+       c->device = dev;
+
        enqueue_cmd_and_start_io(h, c);
        /* the cmd'll come back via intr handler in complete_scsi_command()  */
        return 0;
@@ -5548,6 +5550,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
                hpsa_cmd_init(h, c->cmdindex, c);
                c->cmd_type = CMD_SCSI;
                c->scsi_cmd = cmd;
+               c->device = dev;
                rc = hpsa_scsi_ioaccel_raid_map(h, c);
                if (rc < 0)     /* scsi_dma_map failed. */
                        rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -5555,6 +5558,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
                hpsa_cmd_init(h, c->cmdindex, c);
                c->cmd_type = CMD_SCSI;
                c->scsi_cmd = cmd;
+               c->device = dev;
                rc = hpsa_scsi_ioaccel_direct_map(h, c);
                if (rc < 0)     /* scsi_dma_map failed. */
                        rc = SCSI_MLQUEUE_HOST_BUSY;
index e91377a..e8813d2 100644 (file)
@@ -9055,7 +9055,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                }
        }
 
-#if defined(BUILD_NVME)
        /* Clear NVME stats */
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
@@ -9063,7 +9062,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                               sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
                }
        }
-#endif
 
        /* Clear SCSI stats */
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
index fe10976..6822cd9 100644 (file)
@@ -528,7 +528,6 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
                        list_del_init(&psb->list);
                        psb->exch_busy = 0;
                        psb->status = IOSTAT_SUCCESS;
-#ifdef BUILD_NVME
                        if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
                                qp->abts_nvme_io_bufs--;
                                spin_unlock(&qp->abts_io_buf_list_lock);
@@ -536,7 +535,6 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
                                lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
                                return;
                        }
-#endif
                        qp->abts_scsi_io_bufs--;
                        spin_unlock(&qp->abts_io_buf_list_lock);
 
index 3568031..bcb1e85 100644 (file)
@@ -3224,6 +3224,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
 
        ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+       if (unlikely(!ha->wq)) {
+               ret = -ENOMEM;
+               goto probe_failed;
+       }
 
        if (ha->isp_ops->initialize_adapter(base_vha)) {
                ql_log(ql_log_fatal, base_vha, 0x00d6,
index 64c96c7..6d7362e 100644 (file)
@@ -730,6 +730,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
                  const char *buf, size_t count)
 {
        struct kernfs_node *kn;
+       struct scsi_device *sdev = to_scsi_device(dev);
+
+       /*
+        * We need to try to get module, avoiding the module been removed
+        * during delete.
+        */
+       if (scsi_device_get(sdev))
+               return -ENODEV;
 
        kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
        WARN_ON_ONCE(!kn);
@@ -744,9 +752,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
         * state into SDEV_DEL.
         */
        device_remove_file(dev, attr);
-       scsi_remove_device(to_scsi_device(dev));
+       scsi_remove_device(sdev);
        if (kn)
                sysfs_unbreak_active_protection(kn);
+       scsi_device_put(sdev);
        return count;
 };
 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
index aef4881..a85d52b 100644 (file)
@@ -66,10 +66,8 @@ static int snirm710_probe(struct platform_device *dev)
 
        base = res->start;
        hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
-       if (!hostdata) {
-               dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
+       if (!hostdata)
                return -ENOMEM;
-       }
 
        hostdata->dev = &dev->dev;
        dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
index bf68d86..1e164e0 100644 (file)
@@ -1749,6 +1749,13 @@ struct qman_portal *qman_get_affine_portal(int cpu)
 }
 EXPORT_SYMBOL(qman_get_affine_portal);
 
+int qman_start_using_portal(struct qman_portal *p, struct device *dev)
+{
+       return (!device_link_add(dev, p->config->dev,
+                                DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(qman_start_using_portal);
+
 int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
 {
        return __poll_portal_fast(p, limit);
index 50831eb..c68882e 100644 (file)
@@ -46,7 +46,7 @@ static ssize_t soc_uid_show(struct device *dev,
        hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
        hdr->size = 1;
 
-       ret = imx_scu_call_rpc(soc_ipc_handle, &msg, false);
+       ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
        if (ret) {
                pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
                return ret;
index 6f1fa4c..333308f 100644 (file)
@@ -125,4 +125,6 @@ source "drivers/staging/exfat/Kconfig"
 
 source "drivers/staging/qlge/Kconfig"
 
+source "drivers/staging/hp/Kconfig"
+
 endif # STAGING
index a90f9b3..e4943cd 100644 (file)
@@ -53,3 +53,4 @@ obj-$(CONFIG_UWB)             += uwb/
 obj-$(CONFIG_USB_WUSB)         += wusbcore/
 obj-$(CONFIG_EXFAT_FS)         += exfat/
 obj-$(CONFIG_QLGE)             += qlge/
+obj-$(CONFIG_NET_VENDOR_HP)    += hp/
diff --git a/drivers/staging/hp/Kconfig b/drivers/staging/hp/Kconfig
new file mode 100644 (file)
index 0000000..fb395cf
--- /dev/null
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# HP network device configuration
+#
+
+config NET_VENDOR_HP
+       bool "HP devices"
+       default y
+       depends on ISA || EISA || PCI
+       ---help---
+         If you have a network (Ethernet) card belonging to this class, say Y.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about HP cards. If you say Y, you will be asked for
+         your specific card in the following questions.
+
+if NET_VENDOR_HP
+
+config HP100
+       tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
+       depends on (ISA || EISA || PCI)
+       ---help---
+         If you have a network (Ethernet) card of this type, say Y here.
+
+         To compile this driver as a module, choose M here. The module
+         will be called hp100.
+
+endif # NET_VENDOR_HP
diff --git a/drivers/staging/hp/Makefile b/drivers/staging/hp/Makefile
new file mode 100644 (file)
index 0000000..5ed723b
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the HP network device drivers.
+#
+
+obj-$(CONFIG_HP100) += hp100.o
diff --git a/drivers/staging/hp/hp100.c b/drivers/staging/hp/hp100.c
new file mode 100644 (file)
index 0000000..6ec78f5
--- /dev/null
@@ -0,0 +1,3037 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+** hp100.c
+** HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters
+**
+** $Id: hp100.c,v 1.58 2001/09/24 18:03:01 perex Exp perex $
+**
+** Based on the HP100 driver written by Jaroslav Kysela <perex@jcu.cz>
+** Extended for new busmaster capable chipsets by
+** Siegfried "Frieder" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>
+**
+** Maintained by: Jaroslav Kysela <perex@perex.cz>
+**
+** This driver has only been tested with
+** -- HP J2585B 10/100 Mbit/s PCI Busmaster
+** -- HP J2585A 10/100 Mbit/s PCI
+** -- HP J2970A 10 Mbit/s PCI Combo 10base-T/BNC
+** -- HP J2973A 10 Mbit/s PCI 10base-T
+** -- HP J2573  10/100 ISA
+** -- Compex ReadyLink ENET100-VG4  10/100 Mbit/s PCI / EISA
+** -- Compex FreedomLine 100/VG  10/100 Mbit/s ISA / EISA / PCI
+**
+** but it should also work with the other CASCADE based adapters.
+**
+** TODO:
+**       -  J2573 seems to hang sometimes when in shared memory mode.
+**       -  Mode for Priority TX
+**       -  Check PCI registers, performance might be improved?
+**       -  To reduce interrupt load in busmaster, one could switch off
+**          the interrupts that are used to refill the queues whenever the
+**          queues are filled up to more than a certain threshold.
+**       -  some updates for EISA version of card
+**
+**
+**
+** 1.57c -> 1.58
+**   - used indent to change coding-style
+**   - added KTI DP-200 EISA ID
+**   - ioremap is also used for low (<1MB) memory (multi-architecture support)
+**
+** 1.57b -> 1.57c - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+**   - release resources on failure in init_module
+**
+** 1.57 -> 1.57b - Jean II
+**   - fix spinlocks, SMP is now working !
+**
+** 1.56 -> 1.57
+**   - updates for new PCI interface for 2.1 kernels
+**
+** 1.55 -> 1.56
+**   - removed printk in misc. interrupt and update statistics to allow
+**     monitoring of card status
+**   - timing changes in xmit routines, relogin to 100VG hub added when
+**     driver does reset
+**   - included fix for Compex FreedomLine PCI adapter
+**
+** 1.54 -> 1.55
+**   - fixed bad initialization in init_module
+**   - added Compex FreedomLine adapter
+**   - some fixes in card initialization
+**
+** 1.53 -> 1.54
+**   - added hardware multicast filter support (doesn't work)
+**   - little changes in hp100_sense_lan routine
+**     - added support for Coax and AUI (J2970)
+**   - fix for multiple cards and hp100_mode parameter (insmod)
+**   - fix for shared IRQ
+**
+** 1.52 -> 1.53
+**   - fixed bug in multicast support
+**
+*/
+
+#define HP100_DEFAULT_PRIORITY_TX 0
+
+#undef HP100_DEBUG
+#undef HP100_DEBUG_B           /* Trace  */
+#undef HP100_DEBUG_BM          /* Debug busmaster code (PDL stuff) */
+
+#undef HP100_DEBUG_TRAINING    /* Debug login-to-hub procedure */
+#undef HP100_DEBUG_TX
+#undef HP100_DEBUG_IRQ
+#undef HP100_DEBUG_RX
+
+#undef HP100_MULTICAST_FILTER  /* Need to be debugged... */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/eisa.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+
+#include <asm/io.h>
+
+#include "hp100.h"
+
+/*
+ *  defines
+ */
+
+#define HP100_BUS_ISA     0
+#define HP100_BUS_EISA    1
+#define HP100_BUS_PCI     2
+
+#define HP100_REGION_SIZE      0x20    /* for ioports */
+#define HP100_SIG_LEN          8       /* same as EISA_SIG_LEN */
+
+#define HP100_MAX_PACKET_SIZE  (1536+4)
+#define HP100_MIN_PACKET_SIZE  60
+
+#ifndef HP100_DEFAULT_RX_RATIO
+/* default - 75% onboard memory on the card are used for RX packets */
+#define HP100_DEFAULT_RX_RATIO 75
+#endif
+
+#ifndef HP100_DEFAULT_PRIORITY_TX
+/* default - don't enable transmit outgoing packets as priority */
+#define HP100_DEFAULT_PRIORITY_TX 0
+#endif
+
+/*
+ *  structures
+ */
+
+struct hp100_private {
+       spinlock_t lock;
+       char id[HP100_SIG_LEN];
+       u_short chip;
+       u_short soft_model;
+       u_int memory_size;
+       u_int virt_memory_size;
+       u_short rx_ratio;       /* 1 - 99 */
+       u_short priority_tx;    /* != 0 - priority tx */
+       u_short mode;           /* PIO, Shared Mem or Busmaster */
+       u_char bus;
+       struct pci_dev *pci_dev;
+       short mem_mapped;       /* memory mapped access */
+       void __iomem *mem_ptr_virt;     /* virtual memory mapped area, maybe NULL */
+       unsigned long mem_ptr_phys;     /* physical memory mapped area */
+       short lan_type;         /* 10Mb/s, 100Mb/s or -1 (error) */
+       int hub_status;         /* was login to hub successful? */
+       u_char mac1_mode;
+       u_char mac2_mode;
+       u_char hash_bytes[8];
+
+       /* Rings for busmaster mode: */
+       hp100_ring_t *rxrhead;  /* Head (oldest) index into rxring */
+       hp100_ring_t *rxrtail;  /* Tail (newest) index into rxring */
+       hp100_ring_t *txrhead;  /* Head (oldest) index into txring */
+       hp100_ring_t *txrtail;  /* Tail (newest) index into txring */
+
+       hp100_ring_t rxring[MAX_RX_PDL];
+       hp100_ring_t txring[MAX_TX_PDL];
+
+       u_int *page_vaddr_algn; /* Aligned virtual address of allocated page */
+       u_long whatever_offset; /* Offset to bus/phys/dma address */
+       int rxrcommit;          /* # Rx PDLs committed to adapter */
+       int txrcommit;          /* # Tx PDLs committed to adapter */
+};
+
+/*
+ *  variables
+ */
+#ifdef CONFIG_ISA
+static const char *hp100_isa_tbl[] = {
+       "HWPF150", /* HP J2573 rev A */
+       "HWP1950", /* HP J2573 */
+};
+#endif
+
+static const struct eisa_device_id hp100_eisa_tbl[] = {
+       { "HWPF180" }, /* HP J2577 rev A */
+       { "HWP1920" }, /* HP 27248B */
+       { "HWP1940" }, /* HP J2577 */
+       { "HWP1990" }, /* HP J2577 */
+       { "CPX0301" }, /* ReadyLink ENET100-VG4 */
+       { "CPX0401" }, /* FreedomLine 100/VG */
+       { "" }         /* Mandatory final entry ! */
+};
+MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
+
+static const struct pci_device_id hp100_pci_tbl[] = {
+       {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
+       {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
+       {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
+       {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2973A, PCI_ANY_ID, PCI_ANY_ID,},
+       {PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_ENET100VG4, PCI_ANY_ID, PCI_ANY_ID,},
+       {PCI_VENDOR_ID_COMPEX2, PCI_DEVICE_ID_COMPEX2_100VG, PCI_ANY_ID, PCI_ANY_ID,},
+/*     {PCI_VENDOR_ID_KTI, PCI_DEVICE_ID_KTI_DP200, PCI_ANY_ID, PCI_ANY_ID }, */
+       {}                      /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
+
+static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
+static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
+static int hp100_mode = 1;
+
+module_param(hp100_rx_ratio, int, 0);
+module_param(hp100_priority_tx, int, 0);
+module_param(hp100_mode, int, 0);
+
+/*
+ *  prototypes
+ */
+
+static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
+                       struct pci_dev *pci_dev);
+
+
+static int hp100_open(struct net_device *dev);
+static int hp100_close(struct net_device *dev);
+static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
+                                   struct net_device *dev);
+static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
+                                      struct net_device *dev);
+static void hp100_rx(struct net_device *dev);
+static struct net_device_stats *hp100_get_stats(struct net_device *dev);
+static void hp100_misc_interrupt(struct net_device *dev);
+static void hp100_update_stats(struct net_device *dev);
+static void hp100_clear_stats(struct hp100_private *lp, int ioaddr);
+static void hp100_set_multicast_list(struct net_device *dev);
+static irqreturn_t hp100_interrupt(int irq, void *dev_id);
+static void hp100_start_interface(struct net_device *dev);
+static void hp100_stop_interface(struct net_device *dev);
+static void hp100_load_eeprom(struct net_device *dev, u_short ioaddr);
+static int hp100_sense_lan(struct net_device *dev);
+static int hp100_login_to_vg_hub(struct net_device *dev,
+                                u_short force_relogin);
+static int hp100_down_vg_link(struct net_device *dev);
+static void hp100_cascade_reset(struct net_device *dev, u_short enable);
+static void hp100_BM_shutdown(struct net_device *dev);
+static void hp100_mmuinit(struct net_device *dev);
+static void hp100_init_pdls(struct net_device *dev);
+static int hp100_init_rxpdl(struct net_device *dev,
+                           register hp100_ring_t * ringptr,
+                           register u_int * pdlptr);
+static int hp100_init_txpdl(struct net_device *dev,
+                           register hp100_ring_t * ringptr,
+                           register u_int * pdlptr);
+static void hp100_rxfill(struct net_device *dev);
+static void hp100_hwinit(struct net_device *dev);
+static void hp100_clean_txring(struct net_device *dev);
+#ifdef HP100_DEBUG
+static void hp100_RegisterDump(struct net_device *dev);
+#endif
+
+/* Conversion to new PCI API :
+ * Convert an address in a kernel buffer to a bus/phys/dma address.
+ * This work *only* for memory fragments part of lp->page_vaddr,
+ * because it was properly DMA allocated via pci_alloc_consistent(),
+ * so we just need to "retrieve" the original mapping to bus/phys/dma
+ * address - Jean II */
+static inline dma_addr_t virt_to_whatever(struct net_device *dev, u32 * ptr)
+{
+       struct hp100_private *lp = netdev_priv(dev);
+       return ((u_long) ptr) + lp->whatever_offset;
+}
+
+static inline u_int pdl_map_data(struct hp100_private *lp, void *data)
+{
+       return pci_map_single(lp->pci_dev, data,
+                             MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE);
+}
+
+/* TODO: This function should not really be needed in a good design... */
+static void wait(void)
+{
+       mdelay(1);
+}
+
+/*
+ *  probe functions
+ *  These functions should - if possible - avoid doing write operations
+ *  since this could cause problems when the card is not installed.
+ */
+
+/*
+ * Read board id and convert to string.
+ * Effectively same code as decode_eisa_sig
+ */
+static const char *hp100_read_id(int ioaddr)
+{
+       int i;
+       static char str[HP100_SIG_LEN];
+       unsigned char sig[4], sum;
+        unsigned short rev;
+
+       hp100_page(ID_MAC_ADDR);
+       sum = 0;
+       for (i = 0; i < 4; i++) {
+               sig[i] = hp100_inb(BOARD_ID + i);
+               sum += sig[i];
+       }
+
+       sum += hp100_inb(BOARD_ID + i);
+       if (sum != 0xff)
+               return NULL;    /* bad checksum */
+
+        str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1);
+        str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1);
+        str[2] = (sig[1] & 0x1f) + ('A' - 1);
+        rev = (sig[2] << 8) | sig[3];
+        sprintf(str + 3, "%04X", rev);
+
+       return str;
+}
+
+#ifdef CONFIG_ISA
+static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
+{
+       const char *sig;
+       int i;
+
+       if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100"))
+               goto err;
+
+       if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE) {
+               release_region(ioaddr, HP100_REGION_SIZE);
+               goto err;
+       }
+
+       sig = hp100_read_id(ioaddr);
+       release_region(ioaddr, HP100_REGION_SIZE);
+
+       if (sig == NULL)
+               goto err;
+
+       for (i = 0; i < ARRAY_SIZE(hp100_isa_tbl); i++) {
+               if (!strcmp(hp100_isa_tbl[i], sig))
+                       break;
+
+       }
+
+       if (i < ARRAY_SIZE(hp100_isa_tbl))
+               return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
+ err:
+       return -ENODEV;
+
+}
+/*
+ * Probe for ISA board.
+ * EISA and PCI are handled by device infrastructure.
+ */
+
+static int  __init hp100_isa_probe(struct net_device *dev, int addr)
+{
+       int err = -ENODEV;
+
+       /* Probe for a specific ISA address */
+       if (addr > 0xff && addr < 0x400)
+               err = hp100_isa_probe1(dev, addr);
+
+       else if (addr != 0)
+               err = -ENXIO;
+
+       else {
+               /* Probe all ISA possible port regions */
+               for (addr = 0x100; addr < 0x400; addr += 0x20) {
+                       err = hp100_isa_probe1(dev, addr);
+                       if (!err)
+                               break;
+               }
+       }
+       return err;
+}
+#endif /* CONFIG_ISA */
+
+#if !defined(MODULE) && defined(CONFIG_ISA)
+struct net_device * __init hp100_probe(int unit)
+{
+       struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
+       int err;
+
+       if (!dev)
+               return ERR_PTR(-ENODEV);
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4200, TRACE);
+       printk("hp100: %s: probe\n", dev->name);
+#endif
+
+       if (unit >= 0) {
+               sprintf(dev->name, "eth%d", unit);
+               netdev_boot_setup_check(dev);
+       }
+
+       err = hp100_isa_probe(dev, dev->base_addr);
+       if (err)
+               goto out;
+
+       return dev;
+ out:
+       free_netdev(dev);
+       return ERR_PTR(err);
+}
+#endif /* !MODULE && CONFIG_ISA */
+
+static const struct net_device_ops hp100_bm_netdev_ops = {
+       .ndo_open               = hp100_open,
+       .ndo_stop               = hp100_close,
+       .ndo_start_xmit         = hp100_start_xmit_bm,
+       .ndo_get_stats          = hp100_get_stats,
+       .ndo_set_rx_mode        = hp100_set_multicast_list,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+static const struct net_device_ops hp100_netdev_ops = {
+       .ndo_open               = hp100_open,
+       .ndo_stop               = hp100_close,
+       .ndo_start_xmit         = hp100_start_xmit,
+       .ndo_get_stats          = hp100_get_stats,
+       .ndo_set_rx_mode        = hp100_set_multicast_list,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
+                       struct pci_dev *pci_dev)
+{
+       int i;
+       int err = -ENODEV;
+       const char *eid;
+       u_int chip;
+       u_char uc;
+       u_int memory_size = 0, virt_memory_size = 0;
+       u_short local_mode, lsw;
+       short mem_mapped;
+       unsigned long mem_ptr_phys;
+       void __iomem *mem_ptr_virt;
+       struct hp100_private *lp;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4201, TRACE);
+       printk("hp100: %s: probe1\n", dev->name);
+#endif
+
+       /* memory region for programmed i/o */
+       if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100"))
+               goto out1;
+
+       if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE)
+               goto out2;
+
+       chip = hp100_inw(PAGING) & HP100_CHIPID_MASK;
+#ifdef HP100_DEBUG
+       if (chip == HP100_CHIPID_SHASTA)
+               printk("hp100: %s: Shasta Chip detected. (This is a pre 802.12 chip)\n", dev->name);
+       else if (chip == HP100_CHIPID_RAINIER)
+               printk("hp100: %s: Rainier Chip detected. (This is a pre 802.12 chip)\n", dev->name);
+       else if (chip == HP100_CHIPID_LASSEN)
+               printk("hp100: %s: Lassen Chip detected.\n", dev->name);
+       else
+               printk("hp100: %s: Warning: Unknown CASCADE chip (id=0x%.4x).\n", dev->name, chip);
+#endif
+
+       dev->base_addr = ioaddr;
+
+       eid = hp100_read_id(ioaddr);
+       if (eid == NULL) {      /* bad checksum? */
+               printk(KERN_WARNING "%s: bad ID checksum at base port 0x%x\n",
+                      __func__, ioaddr);
+               goto out2;
+       }
+
+       hp100_page(ID_MAC_ADDR);
+       for (i = uc = 0; i < 7; i++)
+               uc += hp100_inb(LAN_ADDR + i);
+       if (uc != 0xff) {
+               printk(KERN_WARNING
+                      "%s: bad lan address checksum at port 0x%x)\n",
+                      __func__, ioaddr);
+               err = -EIO;
+               goto out2;
+       }
+
+       /* Make sure, that all registers are correctly updated... */
+
+       hp100_load_eeprom(dev, ioaddr);
+       wait();
+
+       /*
+        * Determine driver operation mode
+        *
+        * Use the variable "hp100_mode" upon insmod or as kernel parameter to
+        * force driver modes:
+        * hp100_mode=1 -> default, use busmaster mode if configured.
+        * hp100_mode=2 -> enable shared memory mode
+        * hp100_mode=3 -> force use of i/o mapped mode.
+        * hp100_mode=4 -> same as 1, but re-set the enable bit on the card.
+        */
+
+       /*
+        * LSW values:
+        *   0x2278 -> J2585B, PnP shared memory mode
+        *   0x2270 -> J2585B, shared memory mode, 0xdc000
+        *   0xa23c -> J2585B, I/O mapped mode
+        *   0x2240 -> EISA COMPEX, BusMaster (Shasta Chip)
+        *   0x2220 -> EISA HP, I/O (Shasta Chip)
+        *   0x2260 -> EISA HP, BusMaster (Shasta Chip)
+        */
+
+#if 0
+       local_mode = 0x2270;
+       hp100_outw(0xfefe, OPTION_LSW);
+       hp100_outw(local_mode | HP100_SET_LB | HP100_SET_HB, OPTION_LSW);
+#endif
+
+       /* hp100_mode value maybe used in future by another card */
+       local_mode = hp100_mode;
+       if (local_mode < 1 || local_mode > 4)
+               local_mode = 1; /* default */
+#ifdef HP100_DEBUG
+       printk("hp100: %s: original LSW = 0x%x\n", dev->name,
+              hp100_inw(OPTION_LSW));
+#endif
+
+       if (local_mode == 3) {
+               hp100_outw(HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
+               hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+               hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
+               printk("hp100: IO mapped mode forced.\n");
+       } else if (local_mode == 2) {
+               hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
+               hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+               hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
+               printk("hp100: Shared memory mode requested.\n");
+       } else if (local_mode == 4) {
+               if (chip == HP100_CHIPID_LASSEN) {
+                       hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_SET_HB, OPTION_LSW);
+                       hp100_outw(HP100_IO_EN | HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
+                       printk("hp100: Busmaster mode requested.\n");
+               }
+               local_mode = 1;
+       }
+
+       if (local_mode == 1) {  /* default behaviour */
+               lsw = hp100_inw(OPTION_LSW);
+
+               if ((lsw & HP100_IO_EN) && (~lsw & HP100_MEM_EN) &&
+                   (~lsw & (HP100_BM_WRITE | HP100_BM_READ))) {
+#ifdef HP100_DEBUG
+                       printk("hp100: %s: IO_EN bit is set on card.\n", dev->name);
+#endif
+                       local_mode = 3;
+               } else if (chip == HP100_CHIPID_LASSEN &&
+                          (lsw & (HP100_BM_WRITE | HP100_BM_READ)) == (HP100_BM_WRITE | HP100_BM_READ)) {
+                       /* Conversion to new PCI API :
+                        * I don't have the doc, but I assume that the card
+                        * can map the full 32bit address space.
+                        * Also, we can have EISA Busmaster cards (not tested),
+                        * so beware !!! - Jean II */
+                       if((bus == HP100_BUS_PCI) &&
+                          (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)))) {
+                               /* Gracefully fallback to shared memory */
+                               goto busmasterfail;
+                       }
+                       printk("hp100: Busmaster mode enabled.\n");
+                       hp100_outw(HP100_MEM_EN | HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
+               } else {
+               busmasterfail:
+#ifdef HP100_DEBUG
+                       printk("hp100: %s: Card not configured for BM or BM not supported with this card.\n", dev->name);
+                       printk("hp100: %s: Trying shared memory mode.\n", dev->name);
+#endif
+                       /* In this case, try shared memory mode */
+                       local_mode = 2;
+                       hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
+                       /* hp100_outw(HP100_IO_EN|HP100_RESET_LB, OPTION_LSW); */
+               }
+       }
+#ifdef HP100_DEBUG
+       printk("hp100: %s: new LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW));
+#endif
+
+       /* Check for shared memory on the card, eventually remap it */
+       hp100_page(HW_MAP);
+       mem_mapped = ((hp100_inw(OPTION_LSW) & (HP100_MEM_EN)) != 0);
+       mem_ptr_phys = 0UL;
+       mem_ptr_virt = NULL;
+       memory_size = (8192 << ((hp100_inb(SRAM) >> 5) & 0x07));
+       virt_memory_size = 0;
+
+       /* For memory mapped or busmaster mode, we want the memory address */
+       if (mem_mapped || (local_mode == 1)) {
+               mem_ptr_phys = (hp100_inw(MEM_MAP_LSW) | (hp100_inw(MEM_MAP_MSW) << 16));
+               mem_ptr_phys &= ~0x1fff;        /* 8k alignment */
+
+               if (bus == HP100_BUS_ISA && (mem_ptr_phys & ~0xfffff) != 0) {
+                       printk("hp100: Can only use programmed i/o mode.\n");
+                       mem_ptr_phys = 0;
+                       mem_mapped = 0;
+                       local_mode = 3; /* Use programmed i/o */
+               }
+
+               /* We do not need access to shared memory in busmaster mode */
+               /* However in slave mode we need to remap high (>1GB) card memory  */
+               if (local_mode != 1) {  /* = not busmaster */
+                       /* We try with smaller memory sizes, if ioremap fails */
+                       for (virt_memory_size = memory_size; virt_memory_size > 16383; virt_memory_size >>= 1) {
+                               if ((mem_ptr_virt = ioremap((u_long) mem_ptr_phys, virt_memory_size)) == NULL) {
+#ifdef HP100_DEBUG
+                                       printk("hp100: %s: ioremap for 0x%x bytes high PCI memory at 0x%lx failed\n", dev->name, virt_memory_size, mem_ptr_phys);
+#endif
+                               } else {
+#ifdef HP100_DEBUG
+                                       printk("hp100: %s: remapped 0x%x bytes high PCI memory at 0x%lx to %p.\n", dev->name, virt_memory_size, mem_ptr_phys, mem_ptr_virt);
+#endif
+                                       break;
+                               }
+                       }
+
+                       if (mem_ptr_virt == NULL) {     /* all ioremap tries failed */
+                               printk("hp100: Failed to ioremap the PCI card memory. Will have to use i/o mapped mode.\n");
+                               local_mode = 3;
+                               virt_memory_size = 0;
+                       }
+               }
+       }
+
+       if (local_mode == 3) {  /* io mapped forced */
+               mem_mapped = 0;
+               mem_ptr_phys = 0;
+               mem_ptr_virt = NULL;
+               printk("hp100: Using (slow) programmed i/o mode.\n");
+       }
+
+       /* Initialise the "private" data structure for this card. */
+       lp = netdev_priv(dev);
+
+       spin_lock_init(&lp->lock);
+       strlcpy(lp->id, eid, HP100_SIG_LEN);
+       lp->chip = chip;
+       lp->mode = local_mode;
+       lp->bus = bus;
+       lp->pci_dev = pci_dev;
+       lp->priority_tx = hp100_priority_tx;
+       lp->rx_ratio = hp100_rx_ratio;
+       lp->mem_ptr_phys = mem_ptr_phys;
+       lp->mem_ptr_virt = mem_ptr_virt;
+       hp100_page(ID_MAC_ADDR);
+       lp->soft_model = hp100_inb(SOFT_MODEL);
+       lp->mac1_mode = HP100_MAC1MODE3;
+       lp->mac2_mode = HP100_MAC2MODE3;
+       memset(&lp->hash_bytes, 0x00, 8);
+
+       dev->base_addr = ioaddr;
+
+       lp->memory_size = memory_size;
+       lp->virt_memory_size = virt_memory_size;
+       lp->rx_ratio = hp100_rx_ratio;  /* can be conf'd with insmod */
+
+       if (lp->mode == 1)      /* busmaster */
+               dev->netdev_ops = &hp100_bm_netdev_ops;
+       else
+               dev->netdev_ops = &hp100_netdev_ops;
+
+       /* Ask the card for which IRQ line it is configured */
+       if (bus == HP100_BUS_PCI) {
+               dev->irq = pci_dev->irq;
+       } else {
+               hp100_page(HW_MAP);
+               dev->irq = hp100_inb(IRQ_CHANNEL) & HP100_IRQMASK;
+               if (dev->irq == 2)
+                       dev->irq = 9;
+       }
+
+       if (lp->mode == 1)      /* busmaster */
+               dev->dma = 4;
+
+       /* Ask the card for its MAC address and store it for later use. */
+       hp100_page(ID_MAC_ADDR);
+       for (i = uc = 0; i < 6; i++)
+               dev->dev_addr[i] = hp100_inb(LAN_ADDR + i);
+
+       /* Reset statistics (counters) */
+       hp100_clear_stats(lp, ioaddr);
+
+       /* If busmaster mode is wanted, a dma-capable memory area is needed for
+        * the rx and tx PDLs
+        * PCI cards can access the whole PC memory. Therefore GFP_DMA is not
+        * needed for the allocation of the memory area.
+        */
+
+       /* TODO: We do not need this with old cards, where PDLs are stored
+        * in the cards shared memory area. But currently, busmaster has been
+        * implemented/tested only with the lassen chip anyway... */
+       if (lp->mode == 1) {    /* busmaster */
+               dma_addr_t page_baddr;
+               /* Get physically continuous memory for TX & RX PDLs    */
+               /* Conversion to new PCI API :
+                * Pages are always aligned and zeroed, no need to it ourself.
+                * Doc says should be OK for EISA bus as well - Jean II */
+               lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr);
+               if (!lp->page_vaddr_algn) {
+                       err = -ENOMEM;
+                       goto out_mem_ptr;
+               }
+               lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn);
+
+#ifdef HP100_DEBUG_BM
+               printk("hp100: %s: Reserved DMA memory from 0x%x to 0x%x\n", dev->name, (u_int) lp->page_vaddr_algn, (u_int) lp->page_vaddr_algn + MAX_RINGSIZE);
+#endif
+               lp->rxrcommit = lp->txrcommit = 0;
+               lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
+               lp->txrhead = lp->txrtail = &(lp->txring[0]);
+       }
+
+       /* Initialise the card. */
+       /* (I'm not really sure if it's a good idea to do this during probing, but
+        * like this it's assured that the lan connection type can be sensed
+        * correctly)
+        */
+       hp100_hwinit(dev);
+
+       /* Try to find out which kind of LAN the card is connected to. */
+       lp->lan_type = hp100_sense_lan(dev);
+
+       /* Print out a message what about what we think we have probed. */
+       printk("hp100: at 0x%x, IRQ %d, ", ioaddr, dev->irq);
+       switch (bus) {
+       case HP100_BUS_EISA:
+               printk("EISA");
+               break;
+       case HP100_BUS_PCI:
+               printk("PCI");
+               break;
+       default:
+               printk("ISA");
+               break;
+       }
+       printk(" bus, %dk SRAM (rx/tx %d%%).\n", lp->memory_size >> 10, lp->rx_ratio);
+
+       if (lp->mode == 2) {    /* memory mapped */
+               printk("hp100: Memory area at 0x%lx-0x%lx", mem_ptr_phys,
+                               (mem_ptr_phys + (mem_ptr_phys > 0x100000 ? (u_long) lp->memory_size : 16 * 1024)) - 1);
+               if (mem_ptr_virt)
+                       printk(" (virtual base %p)", mem_ptr_virt);
+               printk(".\n");
+
+               /* Set for info when doing ifconfig */
+               dev->mem_start = mem_ptr_phys;
+               dev->mem_end = mem_ptr_phys + lp->memory_size;
+       }
+
+       printk("hp100: ");
+       if (lp->lan_type != HP100_LAN_ERR)
+               printk("Adapter is attached to ");
+       switch (lp->lan_type) {
+       case HP100_LAN_100:
+               printk("100Mb/s Voice Grade AnyLAN network.\n");
+               break;
+       case HP100_LAN_10:
+               printk("10Mb/s network (10baseT).\n");
+               break;
+       case HP100_LAN_COAX:
+               printk("10Mb/s network (coax).\n");
+               break;
+       default:
+               printk("Warning! Link down.\n");
+       }
+
+       err = register_netdev(dev);
+       if (err)
+               goto out3;
+
+       return 0;
+out3:
+       if (local_mode == 1)
+               pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f,
+                                   lp->page_vaddr_algn,
+                                   virt_to_whatever(dev, lp->page_vaddr_algn));
+out_mem_ptr:
+       if (mem_ptr_virt)
+               iounmap(mem_ptr_virt);
+out2:
+       release_region(ioaddr, HP100_REGION_SIZE);
+out1:
+       return err;
+}
+
+/* This procedure puts the card into a stable init state */
+static void hp100_hwinit(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4202, TRACE);
+       printk("hp100: %s: hwinit\n", dev->name);
+#endif
+
+       /* Initialise the card. -------------------------------------------- */
+
+       /* Clear all pending Ints and disable Ints */
+       hp100_page(PERFORMANCE);
+       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
+       hp100_outw(0xffff, IRQ_STATUS); /* clear all pending ints */
+
+       hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+       hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
+
+       if (lp->mode == 1) {
+               hp100_BM_shutdown(dev); /* disables BM, puts cascade in reset */
+               wait();
+       } else {
+               hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+               hp100_cascade_reset(dev, 1);
+               hp100_page(MAC_CTRL);
+               hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);
+       }
+
+       /* Initiate EEPROM reload */
+       hp100_load_eeprom(dev, 0);
+
+       wait();
+
+       /* Go into reset again. */
+       hp100_cascade_reset(dev, 1);
+
+       /* Set Option Registers to a safe state  */
+       hp100_outw(HP100_DEBUG_EN |
+                  HP100_RX_HDR |
+                  HP100_EE_EN |
+                  HP100_BM_WRITE |
+                  HP100_BM_READ | HP100_RESET_HB |
+                  HP100_FAKE_INT |
+                  HP100_INT_EN |
+                  HP100_MEM_EN |
+                  HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
+
+       hp100_outw(HP100_TRI_INT |
+                  HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW);
+
+       hp100_outb(HP100_PRIORITY_TX |
+                  HP100_ADV_NXT_PKT |
+                  HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW);
+
+       /* TODO: Configure MMU for Ram Test. */
+       /* TODO: Ram Test. */
+
+       /* Re-check if adapter is still at same i/o location      */
+       /* (If the base i/o in eeprom has been changed but the    */
+       /* registers had not been changed, a reload of the eeprom */
+       /* would move the adapter to the address stored in eeprom */
+
+       /* TODO: Code to implement. */
+
+       /* Until here it was code from HWdiscover procedure. */
+       /* Next comes code from mmuinit procedure of SCO BM driver which is
+        * called from HWconfigure in the SCO driver.  */
+
+       /* Initialise MMU, eventually switch on Busmaster Mode, initialise
+        * multicast filter...
+        */
+       hp100_mmuinit(dev);
+
+       /* We don't turn the interrupts on here - this is done by start_interface. */
+       wait();                 /* TODO: Do we really need this? */
+
+       /* Enable Hardware (e.g. unreset) */
+       hp100_cascade_reset(dev, 0);
+
+       /* ------- initialisation complete ----------- */
+
+       /* Finally try to log in the Hub if there may be a VG connection. */
+       if ((lp->lan_type == HP100_LAN_100) || (lp->lan_type == HP100_LAN_ERR))
+               hp100_login_to_vg_hub(dev, 0);  /* relogin */
+
+}
+
+
+/*
+ * mmuinit - Reinitialise Cascade MMU and MAC settings.
+ * Note: Must already be in reset and leaves card in reset.
+ */
+static void hp100_mmuinit(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+       int i;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4203, TRACE);
+       printk("hp100: %s: mmuinit\n", dev->name);
+#endif
+
+#ifdef HP100_DEBUG
+       if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) {
+               printk("hp100: %s: Not in reset when entering mmuinit. Fix me.\n", dev->name);
+               return;
+       }
+#endif
+
+       /* Make sure IRQs are masked off and ack'ed. */
+       hp100_page(PERFORMANCE);
+       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
+       hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
+
+       /*
+        * Enable Hardware
+        * - Clear Debug En, Rx Hdr Pipe, EE En, I/O En, Fake Int and Intr En
+        * - Set Tri-State Int, Bus Master Rd/Wr, and Mem Map Disable
+        * - Clear Priority, Advance Pkt and Xmit Cmd
+        */
+
+       hp100_outw(HP100_DEBUG_EN |
+                  HP100_RX_HDR |
+                  HP100_EE_EN | HP100_RESET_HB |
+                  HP100_IO_EN |
+                  HP100_FAKE_INT |
+                  HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+
+       hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
+
+       if (lp->mode == 1) {    /* busmaster */
+               hp100_outw(HP100_BM_WRITE |
+                          HP100_BM_READ |
+                          HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW);
+       } else if (lp->mode == 2) {     /* memory mapped */
+               hp100_outw(HP100_BM_WRITE |
+                          HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
+               hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW);
+               hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
+               hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+       } else if (lp->mode == 3) {     /* i/o mapped mode */
+               hp100_outw(HP100_MMAP_DIS | HP100_SET_HB |
+                          HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+       }
+
+       hp100_page(HW_MAP);
+       hp100_outb(0, EARLYRXCFG);
+       hp100_outw(0, EARLYTXCFG);
+
+       /*
+        * Enable Bus Master mode
+        */
+       if (lp->mode == 1) {    /* busmaster */
+               /* Experimental: Set some PCI configuration bits */
+               hp100_page(HW_MAP);
+               hp100_andb(~HP100_PDL_USE3, MODECTRL1); /* BM engine read maximum */
+               hp100_andb(~HP100_TX_DUALQ, MODECTRL1); /* No Queue for Priority TX */
+
+               /* PCI Bus failures should result in a Misc. Interrupt */
+               hp100_orb(HP100_EN_BUS_FAIL, MODECTRL2);
+
+               hp100_outw(HP100_BM_READ | HP100_BM_WRITE | HP100_SET_HB, OPTION_LSW);
+               hp100_page(HW_MAP);
+               /* Use Burst Mode and switch on PAGE_CK */
+               hp100_orb(HP100_BM_BURST_RD | HP100_BM_BURST_WR, BM);
+               if ((lp->chip == HP100_CHIPID_RAINIER) || (lp->chip == HP100_CHIPID_SHASTA))
+                       hp100_orb(HP100_BM_PAGE_CK, BM);
+               hp100_orb(HP100_BM_MASTER, BM);
+       } else {                /* not busmaster */
+
+               hp100_page(HW_MAP);
+               hp100_andb(~HP100_BM_MASTER, BM);
+       }
+
+       /*
+        * Divide card memory into regions for Rx, Tx and, if non-ETR chip, PDLs
+        */
+       hp100_page(MMU_CFG);
+       if (lp->mode == 1) {    /* only needed for Busmaster */
+               int xmit_stop, recv_stop;
+
+               if ((lp->chip == HP100_CHIPID_RAINIER) ||
+                   (lp->chip == HP100_CHIPID_SHASTA)) {
+                       int pdl_stop;
+
+                       /*
+                        * Each pdl is 508 bytes long. (63 frags * 4 bytes for address and
+                        * 4 bytes for header). We will leave NUM_RXPDLS * 508 (rounded
+                        * to the next higher 1k boundary) bytes for the rx-pdl's
+                        * Note: For non-etr chips the transmit stop register must be
+                        * programmed on a 1k boundary, i.e. bits 9:0 must be zero.
+                        */
+                       pdl_stop = lp->memory_size;
+                       xmit_stop = (pdl_stop - 508 * (MAX_RX_PDL) - 16) & ~(0x03ff);
+                       recv_stop = (xmit_stop * (lp->rx_ratio) / 100) & ~(0x03ff);
+                       hp100_outw((pdl_stop >> 4) - 1, PDL_MEM_STOP);
+#ifdef HP100_DEBUG_BM
+                       printk("hp100: %s: PDL_STOP = 0x%x\n", dev->name, pdl_stop);
+#endif
+               } else {
+                       /* ETR chip (Lassen) in busmaster mode */
+                       xmit_stop = (lp->memory_size) - 1;
+                       recv_stop = ((lp->memory_size * lp->rx_ratio) / 100) & ~(0x03ff);
+               }
+
+               hp100_outw(xmit_stop >> 4, TX_MEM_STOP);
+               hp100_outw(recv_stop >> 4, RX_MEM_STOP);
+#ifdef HP100_DEBUG_BM
+               printk("hp100: %s: TX_STOP  = 0x%x\n", dev->name, xmit_stop >> 4);
+               printk("hp100: %s: RX_STOP  = 0x%x\n", dev->name, recv_stop >> 4);
+#endif
+       } else {
+               /* Slave modes (memory mapped and programmed io)  */
+               hp100_outw((((lp->memory_size * lp->rx_ratio) / 100) >> 4), RX_MEM_STOP);
+               hp100_outw(((lp->memory_size - 1) >> 4), TX_MEM_STOP);
+#ifdef HP100_DEBUG
+               printk("hp100: %s: TX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(TX_MEM_STOP));
+               printk("hp100: %s: RX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(RX_MEM_STOP));
+#endif
+       }
+
+       /* Write MAC address into page 1 */
+       hp100_page(MAC_ADDRESS);
+       for (i = 0; i < 6; i++)
+               hp100_outb(dev->dev_addr[i], MAC_ADDR + i);
+
+       /* Zero the multicast hash registers */
+       for (i = 0; i < 8; i++)
+               hp100_outb(0x0, HASH_BYTE0 + i);
+
+       /* Set up MAC defaults */
+       hp100_page(MAC_CTRL);
+
+       /* Go to LAN Page and zero all filter bits */
+       /* Zero accept error, accept multicast, accept broadcast and accept */
+       /* all directed packet bits */
+       hp100_andb(~(HP100_RX_EN |
+                    HP100_TX_EN |
+                    HP100_ACC_ERRORED |
+                    HP100_ACC_MC |
+                    HP100_ACC_BC | HP100_ACC_PHY), MAC_CFG_1);
+
+       hp100_outb(0x00, MAC_CFG_2);
+
+       /* Zero the frame format bit. This works around a training bug in the */
+       /* new hubs. */
+       hp100_outb(0x00, VG_LAN_CFG_2); /* (use 802.3) */
+
+       if (lp->priority_tx)
+               hp100_outb(HP100_PRIORITY_TX | HP100_SET_LB, OPTION_MSW);
+       else
+               hp100_outb(HP100_PRIORITY_TX | HP100_RESET_LB, OPTION_MSW);
+
+       hp100_outb(HP100_ADV_NXT_PKT |
+                  HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW);
+
+       /* If busmaster, initialize the PDLs */
+       if (lp->mode == 1)
+               hp100_init_pdls(dev);
+
+       /* Go to performance page and initialize isr and imr registers */
+       hp100_page(PERFORMANCE);
+       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
+       hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
+}
+
+/*
+ *  open/close functions
+ */
+
+static int hp100_open(struct net_device *dev)
+{
+       struct hp100_private *lp = netdev_priv(dev);
+#ifdef HP100_DEBUG_B
+       int ioaddr = dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4204, TRACE);
+       printk("hp100: %s: open\n", dev->name);
+#endif
+
+       /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
+       if (request_irq(dev->irq, hp100_interrupt,
+                       lp->bus == HP100_BUS_PCI || lp->bus ==
+                       HP100_BUS_EISA ? IRQF_SHARED : 0,
+                       dev->name, dev)) {
+               printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
+               return -EAGAIN;
+       }
+
+       netif_trans_update(dev); /* prevent tx timeout */
+       netif_start_queue(dev);
+
+       lp->lan_type = hp100_sense_lan(dev);
+       lp->mac1_mode = HP100_MAC1MODE3;
+       lp->mac2_mode = HP100_MAC2MODE3;
+       memset(&lp->hash_bytes, 0x00, 8);
+
+       hp100_stop_interface(dev);
+
+       hp100_hwinit(dev);
+
+       hp100_start_interface(dev);     /* sets mac modes, enables interrupts */
+
+       return 0;
+}
+
+/* The close function is called when the interface is to be brought down */
+static int hp100_close(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4205, TRACE);
+       printk("hp100: %s: close\n", dev->name);
+#endif
+
+       hp100_page(PERFORMANCE);
+       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all IRQs */
+
+       hp100_stop_interface(dev);
+
+       if (lp->lan_type == HP100_LAN_100)
+               lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+
+       netif_stop_queue(dev);
+
+       free_irq(dev->irq, dev);
+
+#ifdef HP100_DEBUG
+       printk("hp100: %s: close LSW = 0x%x\n", dev->name,
+              hp100_inw(OPTION_LSW));
+#endif
+
+       return 0;
+}
+
+
+/*
+ * Configure the PDL Rx rings and LAN
+ */
+static void hp100_init_pdls(struct net_device *dev)
+{
+       struct hp100_private *lp = netdev_priv(dev);
+       hp100_ring_t *ringptr;
+       u_int *pageptr;         /* Warning : increment by 4 - Jean II */
+       int i;
+
+#ifdef HP100_DEBUG_B
+       int ioaddr = dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4206, TRACE);
+       printk("hp100: %s: init pdls\n", dev->name);
+#endif
+
+       if (!lp->page_vaddr_algn)
+               printk("hp100: %s: Warning: lp->page_vaddr_algn not initialised!\n", dev->name);
+       else {
+               /* pageptr shall point into the DMA accessible memory region  */
+               /* we use this pointer to status the upper limit of allocated */
+               /* memory in the allocated page. */
+               /* note: align the pointers to the pci cache line size */
+               memset(lp->page_vaddr_algn, 0, MAX_RINGSIZE);   /* Zero  Rx/Tx ring page */
+               pageptr = lp->page_vaddr_algn;
+
+               lp->rxrcommit = 0;
+               ringptr = lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
+
+               /* Initialise Rx Ring */
+               for (i = MAX_RX_PDL - 1; i >= 0; i--) {
+                       lp->rxring[i].next = ringptr;
+                       ringptr = &(lp->rxring[i]);
+                       pageptr += hp100_init_rxpdl(dev, ringptr, pageptr);
+               }
+
+               /* Initialise Tx Ring */
+               lp->txrcommit = 0;
+               ringptr = lp->txrhead = lp->txrtail = &(lp->txring[0]);
+               for (i = MAX_TX_PDL - 1; i >= 0; i--) {
+                       lp->txring[i].next = ringptr;
+                       ringptr = &(lp->txring[i]);
+                       pageptr += hp100_init_txpdl(dev, ringptr, pageptr);
+               }
+       }
+}
+
+
+/* These functions "format" the entries in the pdl structure   */
+/* They return how much memory the fragments need.            */
+static int hp100_init_rxpdl(struct net_device *dev,
+                           register hp100_ring_t * ringptr,
+                           register u32 * pdlptr)
+{
+       /* pdlptr is starting address for this pdl */
+
+       if (0 != (((unsigned long) pdlptr) & 0xf))
+               printk("hp100: %s: Init rxpdl: Unaligned pdlptr 0x%lx.\n",
+                      dev->name, (unsigned long) pdlptr);
+
+       ringptr->pdl = pdlptr + 1;
+       ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
+       ringptr->skb = NULL;
+
+       /*
+        * Write address and length of first PDL Fragment (which is used for
+        * storing the RX-Header
+        * We use the 4 bytes _before_ the PDH in the pdl memory area to
+        * store this information. (PDH is at offset 0x04)
+        */
+       /* Note that pdlptr+1 and not pdlptr is the pointer to the PDH */
+
+       *(pdlptr + 2) = (u_int) virt_to_whatever(dev, pdlptr);  /* Address Frag 1 */
+       *(pdlptr + 3) = 4;      /* Length  Frag 1 */
+
+       return roundup(MAX_RX_FRAG * 2 + 2, 4);
+}
+
+
+static int hp100_init_txpdl(struct net_device *dev,
+                           register hp100_ring_t * ringptr,
+                           register u32 * pdlptr)
+{
+       if (0 != (((unsigned long) pdlptr) & 0xf))
+               printk("hp100: %s: Init txpdl: Unaligned pdlptr 0x%lx.\n", dev->name, (unsigned long) pdlptr);
+
+       ringptr->pdl = pdlptr;  /* +1; */
+       ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr);     /* +1 */
+       ringptr->skb = NULL;
+
+       return roundup(MAX_TX_FRAG * 2 + 2, 4);
+}
+
+/*
+ * hp100_build_rx_pdl allocates an skb_buff of maximum size plus two bytes
+ * for possible odd word alignment rounding up to next dword and set PDL
+ * address for fragment#2
+ * Returns: 0 if unable to allocate skb_buff
+ *          1 if successful
+ */
+static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
+                             struct net_device *dev)
+{
+#ifdef HP100_DEBUG_B
+       int ioaddr = dev->base_addr;
+#endif
+#ifdef HP100_DEBUG_BM
+       u_int *p;
+#endif
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4207, TRACE);
+       printk("hp100: %s: build rx pdl\n", dev->name);
+#endif
+
+       /* Allocate skb buffer of maximum size */
+       /* Note: This depends on the alloc_skb functions allocating more
+        * space than requested, i.e. aligning to 16bytes */
+
+       ringptr->skb = netdev_alloc_skb(dev, roundup(MAX_ETHER_SIZE + 2, 4));
+
+       if (NULL != ringptr->skb) {
+               /*
+                * Reserve 2 bytes at the head of the buffer to land the IP header
+                * on a long word boundary (According to the Network Driver section
+                * in the Linux KHG, this should help to increase performance.)
+                */
+               skb_reserve(ringptr->skb, 2);
+
+               ringptr->skb->data = skb_put(ringptr->skb, MAX_ETHER_SIZE);
+
+               /* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
+               /* Note: 1st Fragment is used for the 4 byte packet status
+                * (receive header). Its PDL entries are set up by init_rxpdl. So
+                * here we only have to set up the PDL fragment entries for the data
+                * part. Those 4 bytes will be stored in the DMA memory region
+                * directly before the PDL.
+                */
+#ifdef HP100_DEBUG_BM
+               printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n",
+                                    dev->name, (u_int) ringptr->pdl,
+                                    roundup(MAX_ETHER_SIZE + 2, 4),
+                                    (unsigned int) ringptr->skb->data);
+#endif
+
+               /* Conversion to new PCI API : map skbuf data to PCI bus.
+                * Doc says it's OK for EISA as well - Jean II */
+               ringptr->pdl[0] = 0x00020000;   /* Write PDH */
+               ringptr->pdl[3] = pdl_map_data(netdev_priv(dev),
+                                              ringptr->skb->data);
+               ringptr->pdl[4] = MAX_ETHER_SIZE;       /* Length of Data */
+
+#ifdef HP100_DEBUG_BM
+               for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++)
+                       printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p);
+#endif
+               return 1;
+       }
+       /* else: */
+       /* alloc_skb failed (no memory) -> still can receive the header
+        * fragment into PDL memory. make PDL safe by clearing msgptr and
+        * making the PDL only 1 fragment (i.e. the 4 byte packet status)
+        */
+#ifdef HP100_DEBUG_BM
+       printk("hp100: %s: build_rx_pdl: PDH@0x%x, No space for skb.\n", dev->name, (u_int) ringptr->pdl);
+#endif
+
+       ringptr->pdl[0] = 0x00010000;   /* PDH: Count=1 Fragment */
+
+       return 0;
+}
+
+/*
+ *  hp100_rxfill - attempt to fill the Rx Ring will empty skb's
+ *
+ * Makes assumption that skb's are always contiguous memory areas and
+ * therefore PDLs contain only 2 physical fragments.
+ * -  While the number of Rx PDLs with buffers is less than maximum
+ *      a.  Get a maximum packet size skb
+ *      b.  Put the physical address of the buffer into the PDL.
+ *      c.  Output physical address of PDL to adapter.
+ */
+static void hp100_rxfill(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+
+       struct hp100_private *lp = netdev_priv(dev);
+       hp100_ring_t *ringptr;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4208, TRACE);
+       printk("hp100: %s: rxfill\n", dev->name);
+#endif
+
+       hp100_page(PERFORMANCE);
+
+       while (lp->rxrcommit < MAX_RX_PDL) {
+               /*
+                  ** Attempt to get a buffer and build a Rx PDL.
+                */
+               ringptr = lp->rxrtail;
+               if (0 == hp100_build_rx_pdl(ringptr, dev)) {
+                       return; /* None available, return */
+               }
+
+               /* Hand this PDL over to the card */
+               /* Note: This needs performance page selected! */
+#ifdef HP100_DEBUG_BM
+               printk("hp100: %s: rxfill: Hand to card: pdl #%d @0x%x phys:0x%x, buffer: 0x%x\n",
+                                    dev->name, lp->rxrcommit, (u_int) ringptr->pdl,
+                                    (u_int) ringptr->pdl_paddr, (u_int) ringptr->pdl[3]);
+#endif
+
+               hp100_outl((u32) ringptr->pdl_paddr, RX_PDA);
+
+               lp->rxrcommit += 1;
+               lp->rxrtail = ringptr->next;
+       }
+}
+
+/*
+ * BM_shutdown - shutdown bus mastering and leave chip in reset state
+ */
+
+static void hp100_BM_shutdown(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+       unsigned long time;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4209, TRACE);
+       printk("hp100: %s: bm shutdown\n", dev->name);
+#endif
+
+       hp100_page(PERFORMANCE);
+       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
+       hp100_outw(0xffff, IRQ_STATUS); /* Ack all ints */
+
+       /* Ensure Interrupts are off */
+       hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+
+       /* Disable all MAC activity */
+       hp100_page(MAC_CTRL);
+       hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);    /* stop rx/tx */
+
+       /* If cascade MMU is not already in reset */
+       if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) {
+               /* Wait 1.3ms (10Mb max packet time) to ensure MAC is idle so
+                * MMU pointers will not be reset out from underneath
+                */
+               hp100_page(MAC_CTRL);
+               for (time = 0; time < 5000; time++) {
+                       if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE))
+                               break;
+               }
+
+               /* Shutdown algorithm depends on the generation of Cascade */
+               if (lp->chip == HP100_CHIPID_LASSEN) {  /* ETR shutdown/reset */
+                       /* Disable Busmaster mode and wait for bit to go to zero. */
+                       hp100_page(HW_MAP);
+                       hp100_andb(~HP100_BM_MASTER, BM);
+                       /* 100 ms timeout */
+                       for (time = 0; time < 32000; time++) {
+                               if (0 == (hp100_inb(BM) & HP100_BM_MASTER))
+                                       break;
+                       }
+               } else {        /* Shasta or Rainier Shutdown/Reset */
+                       /* To ensure all bus master inloading activity has ceased,
+                        * wait for no Rx PDAs or no Rx packets on card.
+                        */
+                       hp100_page(PERFORMANCE);
+                       /* 100 ms timeout */
+                       for (time = 0; time < 10000; time++) {
+                               /* RX_PDL: PDLs not executed. */
+                               /* RX_PKT_CNT: RX'd packets on card. */
+                               if ((hp100_inb(RX_PDL) == 0) && (hp100_inb(RX_PKT_CNT) == 0))
+                                       break;
+                       }
+
+                       if (time >= 10000)
+                               printk("hp100: %s: BM shutdown error.\n", dev->name);
+
+                       /* To ensure all bus master outloading activity has ceased,
+                        * wait until the Tx PDA count goes to zero or no more Tx space
+                        * available in the Tx region of the card.
+                        */
+                       /* 100 ms timeout */
+                       for (time = 0; time < 10000; time++) {
+                               if ((0 == hp100_inb(TX_PKT_CNT)) &&
+                                   (0 != (hp100_inb(TX_MEM_FREE) & HP100_AUTO_COMPARE)))
+                                       break;
+                       }
+
+                       /* Disable Busmaster mode */
+                       hp100_page(HW_MAP);
+                       hp100_andb(~HP100_BM_MASTER, BM);
+               }       /* end of shutdown procedure for non-etr parts */
+
+               hp100_cascade_reset(dev, 1);
+       }
+       hp100_page(PERFORMANCE);
+       /* hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_RESET_HB, OPTION_LSW ); */
+       /* Busmaster mode should be shut down now. */
+}
+
+static int hp100_check_lan(struct net_device *dev)
+{
+       struct hp100_private *lp = netdev_priv(dev);
+
+       if (lp->lan_type < 0) { /* no LAN type detected yet? */
+               hp100_stop_interface(dev);
+               if ((lp->lan_type = hp100_sense_lan(dev)) < 0) {
+                       printk("hp100: %s: no connection found - check wire\n", dev->name);
+                       hp100_start_interface(dev);     /* 10Mb/s RX packets maybe handled */
+                       return -EIO;
+               }
+               if (lp->lan_type == HP100_LAN_100)
+                       lp->hub_status = hp100_login_to_vg_hub(dev, 0); /* relogin */
+               hp100_start_interface(dev);
+       }
+       return 0;
+}
+
+/*
+ *  transmit functions
+ */
+
+/* tx function for busmaster mode */
+static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
+                                      struct net_device *dev)
+{
+       unsigned long flags;
+       int i, ok_flag;
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+       hp100_ring_t *ringptr;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4210, TRACE);
+       printk("hp100: %s: start_xmit_bm\n", dev->name);
+#endif
+       if (skb->len <= 0)
+               goto drop;
+
+       if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN))
+               return NETDEV_TX_OK;
+
+       /* Get Tx ring tail pointer */
+       if (lp->txrtail->next == lp->txrhead) {
+               /* No memory. */
+#ifdef HP100_DEBUG
+               printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
+#endif
+               /* not waited long enough since last tx? */
+               if (time_before(jiffies, dev_trans_start(dev) + HZ))
+                       goto drop;
+
+               if (hp100_check_lan(dev))
+                       goto drop;
+
+               if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
+                       /* we have a 100Mb/s adapter but it isn't connected to hub */
+                       printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name);
+                       hp100_stop_interface(dev);
+                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+                       hp100_start_interface(dev);
+               } else {
+                       spin_lock_irqsave(&lp->lock, flags);
+                       hp100_ints_off();       /* Useful ? Jean II */
+                       i = hp100_sense_lan(dev);
+                       hp100_ints_on();
+                       spin_unlock_irqrestore(&lp->lock, flags);
+                       if (i == HP100_LAN_ERR)
+                               printk("hp100: %s: link down detected\n", dev->name);
+                       else if (lp->lan_type != i) {   /* cable change! */
+                               /* it's very hard - all network settings must be changed!!! */
+                               printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name);
+                               lp->lan_type = i;
+                               hp100_stop_interface(dev);
+                               if (lp->lan_type == HP100_LAN_100)
+                                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+                               hp100_start_interface(dev);
+                       } else {
+                               printk("hp100: %s: interface reset\n", dev->name);
+                               hp100_stop_interface(dev);
+                               if (lp->lan_type == HP100_LAN_100)
+                                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+                               hp100_start_interface(dev);
+                       }
+               }
+
+               goto drop;
+       }
+
+       /*
+        * we have to turn int's off before modifying this, otherwise
+        * a tx_pdl_cleanup could occur at the same time
+        */
+       spin_lock_irqsave(&lp->lock, flags);
+       ringptr = lp->txrtail;
+       lp->txrtail = ringptr->next;
+
+       /* Check whether packet has minimal packet size */
+       ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
+       i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
+
+       ringptr->skb = skb;
+       ringptr->pdl[0] = ((1 << 16) | i);      /* PDH: 1 Fragment & length */
+       if (lp->chip == HP100_CHIPID_SHASTA) {
+               /* TODO:Could someone who has the EISA card please check if this works? */
+               ringptr->pdl[2] = i;
+       } else {                /* Lassen */
+               /* In the PDL, don't use the padded size but the real packet size: */
+               ringptr->pdl[2] = skb->len;     /* 1st Frag: Length of frag */
+       }
+       /* Conversion to new PCI API : map skbuf data to PCI bus.
+        * Doc says it's OK for EISA as well - Jean II */
+       ringptr->pdl[1] = ((u32) pci_map_single(lp->pci_dev, skb->data, ringptr->pdl[2], PCI_DMA_TODEVICE));    /* 1st Frag: Adr. of data */
+
+       /* Hand this PDL to the card. */
+       hp100_outl(ringptr->pdl_paddr, TX_PDA_L);       /* Low Prio. Queue */
+
+       lp->txrcommit++;
+
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+
+       spin_unlock_irqrestore(&lp->lock, flags);
+
+       return NETDEV_TX_OK;
+
+drop:
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+
+/* clean_txring checks if packets have been sent by the card by reading
+ * the TX_PDL register from the performance page and comparing it to the
+ * number of committed packets. It then frees the skb's of the packets that
+ * obviously have been sent to the network.
+ *
+ * Needs the PERFORMANCE page selected.
+ */
+static void hp100_clean_txring(struct net_device *dev)
+{
+       struct hp100_private *lp = netdev_priv(dev);
+       int ioaddr = dev->base_addr;
+       int donecount;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4211, TRACE);
+       printk("hp100: %s: clean txring\n", dev->name);
+#endif
+
+       /* How many PDLs have been transmitted? */
+       donecount = (lp->txrcommit) - hp100_inb(TX_PDL);
+
+#ifdef HP100_DEBUG
+       if (donecount > MAX_TX_PDL)
+               printk("hp100: %s: Warning: More PDLs transmitted than committed to card???\n", dev->name);
+#endif
+
+       for (; 0 != donecount; donecount--) {
+#ifdef HP100_DEBUG_BM
+               printk("hp100: %s: Free skb: data @0x%.8x txrcommit=0x%x TXPDL=0x%x, done=0x%x\n",
+                               dev->name, (u_int) lp->txrhead->skb->data,
+                               lp->txrcommit, hp100_inb(TX_PDL), donecount);
+#endif
+               /* Conversion to new PCI API : NOP */
+               pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
+               dev_consume_skb_any(lp->txrhead->skb);
+               lp->txrhead->skb = NULL;
+               lp->txrhead = lp->txrhead->next;
+               lp->txrcommit--;
+       }
+}
+
+/* tx function for slave modes */
+static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
+                                   struct net_device *dev)
+{
+       unsigned long flags;
+       int i, ok_flag;
+       int ioaddr = dev->base_addr;
+       u_short val;
+       struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4212, TRACE);
+       printk("hp100: %s: start_xmit\n", dev->name);
+#endif
+       if (skb->len <= 0)
+               goto drop;
+
+       if (hp100_check_lan(dev))
+               goto drop;
+
+       /* If there is not enough free memory on the card... */
+       i = hp100_inl(TX_MEM_FREE) & 0x7fffffff;
+       if (!(((i / 2) - 539) > (skb->len + 16) && (hp100_inb(TX_PKT_CNT) < 255))) {
+#ifdef HP100_DEBUG
+               printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
+#endif
+               /* not waited long enough since last failed tx try? */
+               if (time_before(jiffies, dev_trans_start(dev) + HZ)) {
+#ifdef HP100_DEBUG
+                       printk("hp100: %s: trans_start timing problem\n",
+                              dev->name);
+#endif
+                       goto drop;
+               }
+               if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
+                       /* we have a 100Mb/s adapter but it isn't connected to hub */
+                       printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name);
+                       hp100_stop_interface(dev);
+                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+                       hp100_start_interface(dev);
+               } else {
+                       spin_lock_irqsave(&lp->lock, flags);
+                       hp100_ints_off();       /* Useful ? Jean II */
+                       i = hp100_sense_lan(dev);
+                       hp100_ints_on();
+                       spin_unlock_irqrestore(&lp->lock, flags);
+                       if (i == HP100_LAN_ERR)
+                               printk("hp100: %s: link down detected\n", dev->name);
+                       else if (lp->lan_type != i) {   /* cable change! */
+                               /* it's very hard - all network setting must be changed!!! */
+                               printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name);
+                               lp->lan_type = i;
+                               hp100_stop_interface(dev);
+                               if (lp->lan_type == HP100_LAN_100)
+                                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+                               hp100_start_interface(dev);
+                       } else {
+                               printk("hp100: %s: interface reset\n", dev->name);
+                               hp100_stop_interface(dev);
+                               if (lp->lan_type == HP100_LAN_100)
+                                       lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+                               hp100_start_interface(dev);
+                               mdelay(1);
+                       }
+               }
+               goto drop;
+       }
+
+       for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) {
+#ifdef HP100_DEBUG_TX
+               printk("hp100: %s: start_xmit: busy\n", dev->name);
+#endif
+       }
+
+       spin_lock_irqsave(&lp->lock, flags);
+       hp100_ints_off();
+       val = hp100_inw(IRQ_STATUS);
+       /* Ack / clear the interrupt TX_COMPLETE interrupt - this interrupt is set
+        * when the current packet being transmitted on the wire is completed. */
+       hp100_outw(HP100_TX_COMPLETE, IRQ_STATUS);
+#ifdef HP100_DEBUG_TX
+       printk("hp100: %s: start_xmit: irq_status=0x%.4x, irqmask=0x%.4x, len=%d\n",
+                       dev->name, val, hp100_inw(IRQ_MASK), (int) skb->len);
+#endif
+
+       ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
+       i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
+
+       hp100_outw(i, DATA32);  /* tell card the total packet length */
+       hp100_outw(i, FRAGMENT_LEN);    /* and first/only fragment length    */
+
+       if (lp->mode == 2) {    /* memory mapped */
+               /* Note: The J2585B needs alignment to 32bits here!  */
+               memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3);
+               if (!ok_flag)
+                       memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len);
+       } else {                /* programmed i/o */
+               outsl(ioaddr + HP100_REG_DATA32, skb->data,
+                     (skb->len + 3) >> 2);
+               if (!ok_flag)
+                       for (i = (skb->len + 3) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4)
+                               hp100_outl(0, DATA32);
+       }
+
+       hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW);    /* send packet */
+
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+       hp100_ints_on();
+       spin_unlock_irqrestore(&lp->lock, flags);
+
+       dev_consume_skb_any(skb);
+
+#ifdef HP100_DEBUG_TX
+       printk("hp100: %s: start_xmit: end\n", dev->name);
+#endif
+
+       return NETDEV_TX_OK;
+
+drop:
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+
+}
+
+
+/*
+ * Receive Function (Non-Busmaster mode)
+ * Called when an "Receive Packet" interrupt occurs, i.e. the receive
+ * packet counter is non-zero.
+ * For non-busmaster, this function does the whole work of transferring
+ * the packet to the host memory and then up to higher layers via skb
+ * and netif_rx.
+ */
+
+static void hp100_rx(struct net_device *dev)
+{
+       int packets, pkt_len;
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+       u_int header;
+       struct sk_buff *skb;
+
+#ifdef DEBUG_B
+       hp100_outw(0x4213, TRACE);
+       printk("hp100: %s: rx\n", dev->name);
+#endif
+
+       /* First get indication of received lan packet */
+       /* RX_PKT_CND indicates the number of packets which have been fully */
+       /* received onto the card but have not been fully transferred of the card */
+       packets = hp100_inb(RX_PKT_CNT);
+#ifdef HP100_DEBUG_RX
+       if (packets > 1)
+               printk("hp100: %s: rx: waiting packets = %d\n", dev->name, packets);
+#endif
+
+       while (packets-- > 0) {
+               /* If ADV_NXT_PKT is still set, we have to wait until the card has */
+               /* really advanced to the next packet. */
+               for (pkt_len = 0; pkt_len < 6000 && (hp100_inb(OPTION_MSW) & HP100_ADV_NXT_PKT); pkt_len++) {
+#ifdef HP100_DEBUG_RX
+                       printk ("hp100: %s: rx: busy, remaining packets = %d\n", dev->name, packets);
+#endif
+               }
+
+               /* First we get the header, which contains information about the */
+               /* actual length of the received packet. */
+               if (lp->mode == 2) {    /* memory mapped mode */
+                       header = readl(lp->mem_ptr_virt);
+               } else          /* programmed i/o */
+                       header = hp100_inl(DATA32);
+
+               pkt_len = ((header & HP100_PKT_LEN_MASK) + 3) & ~3;
+
+#ifdef HP100_DEBUG_RX
+               printk("hp100: %s: rx: new packet - length=%d, errors=0x%x, dest=0x%x\n",
+                                    dev->name, header & HP100_PKT_LEN_MASK,
+                                    (header >> 16) & 0xfff8, (header >> 16) & 7);
+#endif
+
+               /* Now we allocate the skb and transfer the data into it. */
+               skb = netdev_alloc_skb(dev, pkt_len + 2);
+               if (skb == NULL) {      /* Not enough memory->drop packet */
+#ifdef HP100_DEBUG
+                       printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n",
+                                            dev->name, pkt_len);
+#endif
+                       dev->stats.rx_dropped++;
+               } else {        /* skb successfully allocated */
+
+                       u_char *ptr;
+
+                       skb_reserve(skb,2);
+
+                       /* ptr to start of the sk_buff data area */
+                       skb_put(skb, pkt_len);
+                       ptr = skb->data;
+
+                       /* Now transfer the data from the card into that area */
+                       if (lp->mode == 2)
+                               memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len);
+                       else    /* io mapped */
+                               insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2);
+
+                       skb->protocol = eth_type_trans(skb, dev);
+
+#ifdef HP100_DEBUG_RX
+                       printk("hp100: %s: rx: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+                                       dev->name, ptr[0], ptr[1], ptr[2], ptr[3],
+                                       ptr[4], ptr[5], ptr[6], ptr[7], ptr[8],
+                                       ptr[9], ptr[10], ptr[11]);
+#endif
+                       netif_rx(skb);
+                       dev->stats.rx_packets++;
+                       dev->stats.rx_bytes += pkt_len;
+               }
+
+               /* Indicate the card that we have got the packet */
+               hp100_outb(HP100_ADV_NXT_PKT | HP100_SET_LB, OPTION_MSW);
+
+               switch (header & 0x00070000) {
+               case (HP100_MULTI_ADDR_HASH << 16):
+               case (HP100_MULTI_ADDR_NO_HASH << 16):
+                       dev->stats.multicast++;
+                       break;
+               }
+       }                       /* end of while(there are packets) loop */
+#ifdef HP100_DEBUG_RX
+       printk("hp100_rx: %s: end\n", dev->name);
+#endif
+}
+
+/*
+ * Receive Function for Busmaster Mode
+ */
+static void hp100_rx_bm(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+       hp100_ring_t *ptr;
+       u_int header;
+       int pkt_len;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4214, TRACE);
+       printk("hp100: %s: rx_bm\n", dev->name);
+#endif
+
+#ifdef HP100_DEBUG
+       if (0 == lp->rxrcommit) {
+               printk("hp100: %s: rx_bm called although no PDLs were committed to adapter?\n", dev->name);
+               return;
+       } else
+               /* RX_PKT_CNT states how many PDLs are currently formatted and available to
+                * the cards BM engine */
+       if ((hp100_inw(RX_PKT_CNT) & 0x00ff) >= lp->rxrcommit) {
+               printk("hp100: %s: More packets received than committed? RX_PKT_CNT=0x%x, commit=0x%x\n",
+                                    dev->name, hp100_inw(RX_PKT_CNT) & 0x00ff,
+                                    lp->rxrcommit);
+               return;
+       }
+#endif
+
+       while ((lp->rxrcommit > hp100_inb(RX_PDL))) {
+               /*
+                * The packet was received into the pdl pointed to by lp->rxrhead (
+                * the oldest pdl in the ring
+                */
+
+               /* First we get the header, which contains information about the */
+               /* actual length of the received packet. */
+
+               ptr = lp->rxrhead;
+
+               header = *(ptr->pdl - 1);
+               pkt_len = (header & HP100_PKT_LEN_MASK);
+
+               /* Conversion to new PCI API : NOP */
+               pci_unmap_single(lp->pci_dev, (dma_addr_t) ptr->pdl[3], MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE);
+
+#ifdef HP100_DEBUG_BM
+               printk("hp100: %s: rx_bm: header@0x%x=0x%x length=%d, errors=0x%x, dest=0x%x\n",
+                               dev->name, (u_int) (ptr->pdl - 1), (u_int) header,
+                               pkt_len, (header >> 16) & 0xfff8, (header >> 16) & 7);
+               printk("hp100: %s: RX_PDL_COUNT:0x%x TX_PDL_COUNT:0x%x, RX_PKT_CNT=0x%x PDH=0x%x, Data@0x%x len=0x%x\n",
+                               dev->name, hp100_inb(RX_PDL), hp100_inb(TX_PDL),
+                               hp100_inb(RX_PKT_CNT), (u_int) * (ptr->pdl),
+                               (u_int) * (ptr->pdl + 3), (u_int) * (ptr->pdl + 4));
+#endif
+
+               if ((pkt_len >= MIN_ETHER_SIZE) &&
+                   (pkt_len <= MAX_ETHER_SIZE)) {
+                       if (ptr->skb == NULL) {
+                               printk("hp100: %s: rx_bm: skb null\n", dev->name);
+                               /* can happen if we only allocated room for the pdh due to memory shortage. */
+                               dev->stats.rx_dropped++;
+                       } else {
+                               skb_trim(ptr->skb, pkt_len);    /* Shorten it */
+                               ptr->skb->protocol =
+                                   eth_type_trans(ptr->skb, dev);
+
+                               netif_rx(ptr->skb);     /* Up and away... */
+
+                               dev->stats.rx_packets++;
+                               dev->stats.rx_bytes += pkt_len;
+                       }
+
+                       switch (header & 0x00070000) {
+                       case (HP100_MULTI_ADDR_HASH << 16):
+                       case (HP100_MULTI_ADDR_NO_HASH << 16):
+                               dev->stats.multicast++;
+                               break;
+                       }
+               } else {
+#ifdef HP100_DEBUG
+                       printk("hp100: %s: rx_bm: Received bad packet (length=%d)\n", dev->name, pkt_len);
+#endif
+                       if (ptr->skb != NULL)
+                               dev_kfree_skb_any(ptr->skb);
+                       dev->stats.rx_errors++;
+               }
+
+               lp->rxrhead = lp->rxrhead->next;
+
+               /* Allocate a new rx PDL (so lp->rxrcommit stays the same) */
+               if (0 == hp100_build_rx_pdl(lp->rxrtail, dev)) {
+                       /* No space for skb, header can still be received. */
+#ifdef HP100_DEBUG
+                       printk("hp100: %s: rx_bm: No space for new PDL.\n", dev->name);
+#endif
+                       return;
+               } else {        /* successfully allocated new PDL - put it in ringlist at tail. */
+                       hp100_outl((u32) lp->rxrtail->pdl_paddr, RX_PDA);
+                       lp->rxrtail = lp->rxrtail->next;
+               }
+
+       }
+}
+
+/*
+ *  statistics
+ */
+static struct net_device_stats *hp100_get_stats(struct net_device *dev)
+{
+       unsigned long flags;
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4215, TRACE);
+#endif
+
+       spin_lock_irqsave(&lp->lock, flags);
+       hp100_ints_off();       /* Useful ? Jean II */
+       hp100_update_stats(dev);
+       hp100_ints_on();
+       spin_unlock_irqrestore(&lp->lock, flags);
+       return &(dev->stats);
+}
+
+static void hp100_update_stats(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+       u_short val;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4216, TRACE);
+       printk("hp100: %s: update-stats\n", dev->name);
+#endif
+
+       /* Note: Statistics counters clear when read. */
+       hp100_page(MAC_CTRL);
+       val = hp100_inw(DROPPED) & 0x0fff;
+       dev->stats.rx_errors += val;
+       dev->stats.rx_over_errors += val;
+       val = hp100_inb(CRC);
+       dev->stats.rx_errors += val;
+       dev->stats.rx_crc_errors += val;
+       val = hp100_inb(ABORT);
+       dev->stats.tx_errors += val;
+       dev->stats.tx_aborted_errors += val;
+       hp100_page(PERFORMANCE);
+}
+
+static void hp100_misc_interrupt(struct net_device *dev)
+{
+#ifdef HP100_DEBUG_B
+       int ioaddr = dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+       int ioaddr = dev->base_addr;
+       hp100_outw(0x4216, TRACE);
+       printk("hp100: %s: misc_interrupt\n", dev->name);
+#endif
+
+       /* Note: Statistics counters clear when read. */
+       dev->stats.rx_errors++;
+       dev->stats.tx_errors++;
+}
+
+static void hp100_clear_stats(struct hp100_private *lp, int ioaddr)
+{
+       unsigned long flags;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4217, TRACE);
+       printk("hp100: %s: clear_stats\n", dev->name);
+#endif
+
+       spin_lock_irqsave(&lp->lock, flags);
+       hp100_page(MAC_CTRL);   /* get all statistics bytes */
+       hp100_inw(DROPPED);
+       hp100_inb(CRC);
+       hp100_inb(ABORT);
+       hp100_page(PERFORMANCE);
+       spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+/*
+ *  multicast setup
+ */
+
+/*
+ *  Set or clear the multicast filter for this adapter.
+ */
+
+static void hp100_set_multicast_list(struct net_device *dev)
+{
+       unsigned long flags;
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4218, TRACE);
+       printk("hp100: %s: set_mc_list\n", dev->name);
+#endif
+
+       spin_lock_irqsave(&lp->lock, flags);
+       hp100_ints_off();
+       hp100_page(MAC_CTRL);
+       hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);    /* stop rx/tx */
+
+       if (dev->flags & IFF_PROMISC) {
+               lp->mac2_mode = HP100_MAC2MODE6;        /* promiscuous mode = get all good */
+               lp->mac1_mode = HP100_MAC1MODE6;        /* packets on the net */
+               memset(&lp->hash_bytes, 0xff, 8);
+       } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
+               lp->mac2_mode = HP100_MAC2MODE5;        /* multicast mode = get packets for */
+               lp->mac1_mode = HP100_MAC1MODE5;        /* me, broadcasts and all multicasts */
+#ifdef HP100_MULTICAST_FILTER  /* doesn't work!!! */
+               if (dev->flags & IFF_ALLMULTI) {
+                       /* set hash filter to receive all multicast packets */
+                       memset(&lp->hash_bytes, 0xff, 8);
+               } else {
+                       int i, idx;
+                       u_char *addrs;
+                       struct netdev_hw_addr *ha;
+
+                       memset(&lp->hash_bytes, 0x00, 8);
+#ifdef HP100_DEBUG
+                       printk("hp100: %s: computing hash filter - mc_count = %i\n",
+                              dev->name, netdev_mc_count(dev));
+#endif
+                       netdev_for_each_mc_addr(ha, dev) {
+                               addrs = ha->addr;
+#ifdef HP100_DEBUG
+                               printk("hp100: %s: multicast = %pM, ",
+                                            dev->name, addrs);
+#endif
+                               for (i = idx = 0; i < 6; i++) {
+                                       idx ^= *addrs++ & 0x3f;
+                                       printk(":%02x:", idx);
+                               }
+#ifdef HP100_DEBUG
+                               printk("idx = %i\n", idx);
+#endif
+                               lp->hash_bytes[idx >> 3] |= (1 << (idx & 7));
+                       }
+               }
+#else
+               memset(&lp->hash_bytes, 0xff, 8);
+#endif
+       } else {
+               lp->mac2_mode = HP100_MAC2MODE3;        /* normal mode = get packets for me */
+               lp->mac1_mode = HP100_MAC1MODE3;        /* and broadcasts */
+               memset(&lp->hash_bytes, 0x00, 8);
+       }
+
+       if (((hp100_inb(MAC_CFG_1) & 0x0f) != lp->mac1_mode) ||
+           (hp100_inb(MAC_CFG_2) != lp->mac2_mode)) {
+               int i;
+
+               hp100_outb(lp->mac2_mode, MAC_CFG_2);
+               hp100_andb(HP100_MAC1MODEMASK, MAC_CFG_1);      /* clear mac1 mode bits */
+               hp100_orb(lp->mac1_mode, MAC_CFG_1);    /* and set the new mode */
+
+               hp100_page(MAC_ADDRESS);
+               for (i = 0; i < 8; i++)
+                       hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i);
+#ifdef HP100_DEBUG
+               printk("hp100: %s: mac1 = 0x%x, mac2 = 0x%x, multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+                                    dev->name, lp->mac1_mode, lp->mac2_mode,
+                                    lp->hash_bytes[0], lp->hash_bytes[1],
+                                    lp->hash_bytes[2], lp->hash_bytes[3],
+                                    lp->hash_bytes[4], lp->hash_bytes[5],
+                                    lp->hash_bytes[6], lp->hash_bytes[7]);
+#endif
+
+               if (lp->lan_type == HP100_LAN_100) {
+#ifdef HP100_DEBUG
+                       printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
+#endif
+                       lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */
+               }
+       } else {
+               int i;
+               u_char old_hash_bytes[8];
+
+               hp100_page(MAC_ADDRESS);
+               for (i = 0; i < 8; i++)
+                       old_hash_bytes[i] = hp100_inb(HASH_BYTE0 + i);
+               if (memcmp(old_hash_bytes, &lp->hash_bytes, 8)) {
+                       for (i = 0; i < 8; i++)
+                               hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i);
+#ifdef HP100_DEBUG
+                       printk("hp100: %s: multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+                                       dev->name, lp->hash_bytes[0],
+                                       lp->hash_bytes[1], lp->hash_bytes[2],
+                                       lp->hash_bytes[3], lp->hash_bytes[4],
+                                       lp->hash_bytes[5], lp->hash_bytes[6],
+                                       lp->hash_bytes[7]);
+#endif
+
+                       if (lp->lan_type == HP100_LAN_100) {
+#ifdef HP100_DEBUG
+                               printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
+#endif
+                               lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */
+                       }
+               }
+       }
+
+       hp100_page(MAC_CTRL);
+       hp100_orb(HP100_RX_EN | HP100_RX_IDLE | /* enable rx */
+                 HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1);      /* enable tx */
+
+       hp100_page(PERFORMANCE);
+       hp100_ints_on();
+       spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/*
+ *  hardware interrupt handling
+ */
+
+static irqreturn_t hp100_interrupt(int irq, void *dev_id)
+{
+       struct net_device *dev = (struct net_device *) dev_id;
+       struct hp100_private *lp = netdev_priv(dev);
+
+       int ioaddr;
+       u_int val;
+
+       if (dev == NULL)
+               return IRQ_NONE;
+       ioaddr = dev->base_addr;
+
+       spin_lock(&lp->lock);
+
+       hp100_ints_off();
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4219, TRACE);
+#endif
+
+       /*  hp100_page( PERFORMANCE ); */
+       val = hp100_inw(IRQ_STATUS);
+#ifdef HP100_DEBUG_IRQ
+       printk("hp100: %s: mode=%x,IRQ_STAT=0x%.4x,RXPKTCNT=0x%.2x RXPDL=0x%.2x TXPKTCNT=0x%.2x TXPDL=0x%.2x\n",
+                            dev->name, lp->mode, (u_int) val, hp100_inb(RX_PKT_CNT),
+                            hp100_inb(RX_PDL), hp100_inb(TX_PKT_CNT), hp100_inb(TX_PDL));
+#endif
+
+       if (val == 0) {         /* might be a shared interrupt */
+               spin_unlock(&lp->lock);
+               hp100_ints_on();
+               return IRQ_NONE;
+       }
+       /* We're only interested in those interrupts we really enabled. */
+       /* val &= hp100_inw( IRQ_MASK ); */
+
+       /*
+        * RX_PDL_FILL_COMPL is set whenever a RX_PDL has been executed. A RX_PDL
+        * is considered executed whenever the RX_PDL data structure is no longer
+        * needed.
+        */
+       if (val & HP100_RX_PDL_FILL_COMPL) {
+               if (lp->mode == 1)
+                       hp100_rx_bm(dev);
+               else {
+                       printk("hp100: %s: rx_pdl_fill_compl interrupt although not busmaster?\n", dev->name);
+               }
+       }
+
+       /*
+        * The RX_PACKET interrupt is set, when the receive packet counter is
+        * non zero. We use this interrupt for receiving in slave mode. In
+        * busmaster mode, we use it to make sure we did not miss any rx_pdl_fill
+        * interrupts. If rx_pdl_fill_compl is not set and rx_packet is set, then
+        * we somehow have missed a rx_pdl_fill_compl interrupt.
+        */
+
+       if (val & HP100_RX_PACKET) {    /* Receive Packet Counter is non zero */
+               if (lp->mode != 1)      /* non busmaster */
+                       hp100_rx(dev);
+               else if (!(val & HP100_RX_PDL_FILL_COMPL)) {
+                       /* Shouldn't happen - maybe we missed a RX_PDL_FILL Interrupt?  */
+                       hp100_rx_bm(dev);
+               }
+       }
+
+       /*
+        * Ack. that we have noticed the interrupt and thereby allow next one.
+        * Note that this is now done after the slave rx function, since first
+        * acknowledging and then setting ADV_NXT_PKT caused an extra interrupt
+        * on the J2573.
+        */
+       hp100_outw(val, IRQ_STATUS);
+
+       /*
+        * RX_ERROR is set when a packet is dropped due to no memory resources on
+        * the card or when a RCV_ERR occurs.
+        * TX_ERROR is set when a TX_ABORT condition occurs in the MAC->exists
+        * only in the 802.3 MAC and happens when 16 collisions occur during a TX
+        */
+       if (val & (HP100_TX_ERROR | HP100_RX_ERROR)) {
+#ifdef HP100_DEBUG_IRQ
+               printk("hp100: %s: TX/RX Error IRQ\n", dev->name);
+#endif
+               hp100_update_stats(dev);
+               if (lp->mode == 1) {
+                       hp100_rxfill(dev);
+                       hp100_clean_txring(dev);
+               }
+       }
+
+       /*
+        * RX_PDA_ZERO is set when the PDA count goes from non-zero to zero.
+        */
+       if ((lp->mode == 1) && (val & (HP100_RX_PDA_ZERO)))
+               hp100_rxfill(dev);
+
+       /*
+        * HP100_TX_COMPLETE interrupt occurs when packet transmitted on wire
+        * is completed
+        */
+       if ((lp->mode == 1) && (val & (HP100_TX_COMPLETE)))
+               hp100_clean_txring(dev);
+
+       /*
+        * MISC_ERROR is set when either the LAN link goes down or a detected
+        * bus error occurs.
+        */
+       if (val & HP100_MISC_ERROR) {   /* New for J2585B */
+#ifdef HP100_DEBUG_IRQ
+               printk
+                   ("hp100: %s: Misc. Error Interrupt - Check cabling.\n",
+                    dev->name);
+#endif
+               if (lp->mode == 1) {
+                       hp100_clean_txring(dev);
+                       hp100_rxfill(dev);
+               }
+               hp100_misc_interrupt(dev);
+       }
+
+       spin_unlock(&lp->lock);
+       hp100_ints_on();
+       return IRQ_HANDLED;
+}
+
+/*
+ *  some misc functions
+ */
+
+static void hp100_start_interface(struct net_device *dev)
+{
+       unsigned long flags;
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4220, TRACE);
+       printk("hp100: %s: hp100_start_interface\n", dev->name);
+#endif
+
+       spin_lock_irqsave(&lp->lock, flags);
+
+       /* Ensure the adapter does not want to request an interrupt when */
+       /* enabling the IRQ line to be active on the bus (i.e. not tri-stated) */
+       hp100_page(PERFORMANCE);
+       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
+       hp100_outw(0xffff, IRQ_STATUS); /* ack all IRQs */
+       hp100_outw(HP100_FAKE_INT | HP100_INT_EN | HP100_RESET_LB,
+                  OPTION_LSW);
+       /* Un Tri-state int. TODO: Check if shared interrupts can be realised? */
+       hp100_outw(HP100_TRI_INT | HP100_RESET_HB, OPTION_LSW);
+
+       if (lp->mode == 1) {
+               /* Make sure BM bit is set... */
+               hp100_page(HW_MAP);
+               hp100_orb(HP100_BM_MASTER, BM);
+               hp100_rxfill(dev);
+       } else if (lp->mode == 2) {
+               /* Enable memory mapping. Note: Don't do this when busmaster. */
+               hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW);
+       }
+
+       hp100_page(PERFORMANCE);
+       hp100_outw(0xfefe, IRQ_MASK);   /* mask off all ints */
+       hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
+
+       /* enable a few interrupts: */
+       if (lp->mode == 1) {    /* busmaster mode */
+               hp100_outw(HP100_RX_PDL_FILL_COMPL |
+                          HP100_RX_PDA_ZERO | HP100_RX_ERROR |
+                          /* HP100_RX_PACKET    | */
+                          /* HP100_RX_EARLY_INT |  */ HP100_SET_HB |
+                          /* HP100_TX_PDA_ZERO  |  */
+                          HP100_TX_COMPLETE |
+                          /* HP100_MISC_ERROR   |  */
+                          HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK);
+       } else {
+               hp100_outw(HP100_RX_PACKET |
+                          HP100_RX_ERROR | HP100_SET_HB |
+                          HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK);
+       }
+
+       /* Note : before hp100_set_multicast_list(), because it will play with
+        * spinlock itself... Jean II */
+       spin_unlock_irqrestore(&lp->lock, flags);
+
+       /* Enable MAC Tx and RX, set MAC modes, ... */
+       hp100_set_multicast_list(dev);
+}
+
+static void hp100_stop_interface(struct net_device *dev)
+{
+       struct hp100_private *lp = netdev_priv(dev);
+       int ioaddr = dev->base_addr;
+       u_int val;
+
+#ifdef HP100_DEBUG_B
+       printk("hp100: %s: hp100_stop_interface\n", dev->name);
+       hp100_outw(0x4221, TRACE);
+#endif
+
+       if (lp->mode == 1)
+               hp100_BM_shutdown(dev);
+       else {
+               /* Note: MMAP_DIS will be reenabled by start_interface */
+               hp100_outw(HP100_INT_EN | HP100_RESET_LB |
+                          HP100_TRI_INT | HP100_MMAP_DIS | HP100_SET_HB,
+                          OPTION_LSW);
+               val = hp100_inw(OPTION_LSW);
+
+               hp100_page(MAC_CTRL);
+               hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);
+
+               if (!(val & HP100_HW_RST))
+                       return; /* If reset, imm. return ... */
+               /* ... else: busy wait until idle */
+               for (val = 0; val < 6000; val++)
+                       if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE)) {
+                               hp100_page(PERFORMANCE);
+                               return;
+                       }
+               printk("hp100: %s: hp100_stop_interface - timeout\n", dev->name);
+               hp100_page(PERFORMANCE);
+       }
+}
+
+static void hp100_load_eeprom(struct net_device *dev, u_short probe_ioaddr)
+{
+       int i;
+       int ioaddr = probe_ioaddr > 0 ? probe_ioaddr : dev->base_addr;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4222, TRACE);
+#endif
+
+       hp100_page(EEPROM_CTRL);
+       hp100_andw(~HP100_EEPROM_LOAD, EEPROM_CTRL);
+       hp100_orw(HP100_EEPROM_LOAD, EEPROM_CTRL);
+       for (i = 0; i < 10000; i++)
+               if (!(hp100_inb(OPTION_MSW) & HP100_EE_LOAD))
+                       return;
+       printk("hp100: %s: hp100_load_eeprom - timeout\n", dev->name);
+}
+
+/*  Sense connection status.
+ *  return values: LAN_10  - Connected to 10Mbit/s network
+ *                 LAN_100 - Connected to 100Mbit/s network
+ *                 LAN_ERR - not connected or 100Mbit/s Hub down
+ */
+static int hp100_sense_lan(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+       u_short val_VG, val_10;
+       struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4223, TRACE);
+#endif
+
+       hp100_page(MAC_CTRL);
+       val_10 = hp100_inb(10_LAN_CFG_1);
+       val_VG = hp100_inb(VG_LAN_CFG_1);
+       hp100_page(PERFORMANCE);
+#ifdef HP100_DEBUG
+       printk("hp100: %s: sense_lan: val_VG = 0x%04x, val_10 = 0x%04x\n",
+              dev->name, val_VG, val_10);
+#endif
+
+       if (val_10 & HP100_LINK_BEAT_ST)        /* 10Mb connection is active */
+               return HP100_LAN_10;
+
+       if (val_10 & HP100_AUI_ST) {    /* have we BNC or AUI onboard? */
+               /*
+                * This can be overriden by dos utility, so if this has no effect,
+                * perhaps you need to download that utility from HP and set card
+                * back to "auto detect".
+                */
+               val_10 |= HP100_AUI_SEL | HP100_LOW_TH;
+               hp100_page(MAC_CTRL);
+               hp100_outb(val_10, 10_LAN_CFG_1);
+               hp100_page(PERFORMANCE);
+               return HP100_LAN_COAX;
+       }
+
+       /* Those cards don't have a 100 Mbit connector */
+       if ( !strcmp(lp->id, "HWP1920")  ||
+            (lp->pci_dev &&
+             lp->pci_dev->vendor == PCI_VENDOR_ID &&
+             (lp->pci_dev->device == PCI_DEVICE_ID_HP_J2970A ||
+              lp->pci_dev->device == PCI_DEVICE_ID_HP_J2973A)))
+               return HP100_LAN_ERR;
+
+       if (val_VG & HP100_LINK_CABLE_ST)       /* Can hear the HUBs tone. */
+               return HP100_LAN_100;
+       return HP100_LAN_ERR;
+}
+
+static int hp100_down_vg_link(struct net_device *dev)
+{
+       struct hp100_private *lp = netdev_priv(dev);
+       int ioaddr = dev->base_addr;
+       unsigned long time;
+       long savelan, newlan;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4224, TRACE);
+       printk("hp100: %s: down_vg_link\n", dev->name);
+#endif
+
+       hp100_page(MAC_CTRL);
+       time = jiffies + (HZ / 4);
+       do {
+               if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
+                       break;
+               if (!in_interrupt())
+                       schedule_timeout_interruptible(1);
+       } while (time_after(time, jiffies));
+
+       if (time_after_eq(jiffies, time))       /* no signal->no logout */
+               return 0;
+
+       /* Drop the VG Link by clearing the link up cmd and load addr. */
+
+       hp100_andb(~(HP100_LOAD_ADDR | HP100_LINK_CMD), VG_LAN_CFG_1);
+       hp100_orb(HP100_VG_SEL, VG_LAN_CFG_1);
+
+       /* Conditionally stall for >250ms on Link-Up Status (to go down) */
+       time = jiffies + (HZ / 2);
+       do {
+               if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+                       break;
+               if (!in_interrupt())
+                       schedule_timeout_interruptible(1);
+       } while (time_after(time, jiffies));
+
+#ifdef HP100_DEBUG
+       if (time_after_eq(jiffies, time))
+               printk("hp100: %s: down_vg_link: Link does not go down?\n", dev->name);
+#endif
+
+       /* To prevent condition where Rev 1 VG MAC and old hubs do not complete */
+       /* logout under traffic (even though all the status bits are cleared),  */
+       /* do this workaround to get the Rev 1 MAC in its idle state */
+       if (lp->chip == HP100_CHIPID_LASSEN) {
+               /* Reset VG MAC to insure it leaves the logoff state even if */
+               /* the Hub is still emitting tones */
+               hp100_andb(~HP100_VG_RESET, VG_LAN_CFG_1);
+               udelay(1500);   /* wait for >1ms */
+               hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1);        /* Release Reset */
+               udelay(1500);
+       }
+
+       /* New: For lassen, switch to 10 Mbps mac briefly to clear training ACK */
+       /* to get the VG mac to full reset. This is not req.d with later chips */
+       /* Note: It will take the between 1 and 2 seconds for the VG mac to be */
+       /* selected again! This will be left to the connect hub function to */
+       /* perform if desired.  */
+       if (lp->chip == HP100_CHIPID_LASSEN) {
+               /* Have to write to 10 and 100VG control registers simultaneously */
+               savelan = newlan = hp100_inl(10_LAN_CFG_1);     /* read 10+100 LAN_CFG regs */
+               newlan &= ~(HP100_VG_SEL << 16);
+               newlan |= (HP100_DOT3_MAC) << 8;
+               hp100_andb(~HP100_AUTO_MODE, MAC_CFG_3);        /* Autosel off */
+               hp100_outl(newlan, 10_LAN_CFG_1);
+
+               /* Conditionally stall for 5sec on VG selected. */
+               time = jiffies + (HZ * 5);
+               do {
+                       if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST))
+                               break;
+                       if (!in_interrupt())
+                               schedule_timeout_interruptible(1);
+               } while (time_after(time, jiffies));
+
+               hp100_orb(HP100_AUTO_MODE, MAC_CFG_3);  /* Autosel back on */
+               hp100_outl(savelan, 10_LAN_CFG_1);
+       }
+
+       time = jiffies + (3 * HZ);      /* Timeout 3s */
+       do {
+               if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0)
+                       break;
+               if (!in_interrupt())
+                       schedule_timeout_interruptible(1);
+       } while (time_after(time, jiffies));
+
+       if (time_before_eq(time, jiffies)) {
+#ifdef HP100_DEBUG
+               printk("hp100: %s: down_vg_link: timeout\n", dev->name);
+#endif
+               return -EIO;
+       }
+
+       time = jiffies + (2 * HZ);      /* This seems to take a while.... */
+       do {
+               if (!in_interrupt())
+                       schedule_timeout_interruptible(1);
+       } while (time_after(time, jiffies));
+
+       return 0;
+}
+
+static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
+{
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+       u_short val = 0;
+       unsigned long time;
+       int startst;
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4225, TRACE);
+       printk("hp100: %s: login_to_vg_hub\n", dev->name);
+#endif
+
+       /* Initiate a login sequence iff VG MAC is enabled and either Load Address
+        * bit is zero or the force relogin flag is set (e.g. due to MAC address or
+        * promiscuous mode change)
+        */
+       hp100_page(MAC_CTRL);
+       startst = hp100_inb(VG_LAN_CFG_1);
+       if ((force_relogin == 1) || (hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) {
+#ifdef HP100_DEBUG_TRAINING
+               printk("hp100: %s: Start training\n", dev->name);
+#endif
+
+               /* Ensure VG Reset bit is 1 (i.e., do not reset) */
+               hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1);
+
+               /* If Lassen AND auto-select-mode AND VG tones were sensed on */
+               /* entry then temporarily put them into force 100Mbit mode */
+               if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST))
+                       hp100_andb(~HP100_DOT3_MAC, 10_LAN_CFG_2);
+
+               /* Drop the VG link by zeroing Link Up Command and Load Address  */
+               hp100_andb(~(HP100_LINK_CMD /* |HP100_LOAD_ADDR */ ), VG_LAN_CFG_1);
+
+#ifdef HP100_DEBUG_TRAINING
+               printk("hp100: %s: Bring down the link\n", dev->name);
+#endif
+
+               /* Wait for link to drop */
+               time = jiffies + (HZ / 10);
+               do {
+                       if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+                               break;
+                       if (!in_interrupt())
+                               schedule_timeout_interruptible(1);
+               } while (time_after(time, jiffies));
+
+               /* Start an addressed training and optionally request promiscuous port */
+               if ((dev->flags) & IFF_PROMISC) {
+                       hp100_orb(HP100_PROM_MODE, VG_LAN_CFG_2);
+                       if (lp->chip == HP100_CHIPID_LASSEN)
+                               hp100_orw(HP100_MACRQ_PROMSC, TRAIN_REQUEST);
+               } else {
+                       hp100_andb(~HP100_PROM_MODE, VG_LAN_CFG_2);
+                       /* For ETR parts we need to reset the prom. bit in the training
+                        * register, otherwise promiscious mode won't be disabled.
+                        */
+                       if (lp->chip == HP100_CHIPID_LASSEN) {
+                               hp100_andw(~HP100_MACRQ_PROMSC, TRAIN_REQUEST);
+                       }
+               }
+
+               /* With ETR parts, frame format request bits can be set. */
+               if (lp->chip == HP100_CHIPID_LASSEN)
+                       hp100_orb(HP100_MACRQ_FRAMEFMT_EITHER, TRAIN_REQUEST);
+
+               hp100_orb(HP100_LINK_CMD | HP100_LOAD_ADDR | HP100_VG_RESET, VG_LAN_CFG_1);
+
+               /* Note: Next wait could be omitted for Hood and earlier chips under */
+               /* certain circumstances */
+               /* TODO: check if hood/earlier and skip wait. */
+
+               /* Wait for either short timeout for VG tones or long for login    */
+               /* Wait for the card hardware to signalise link cable status ok... */
+               hp100_page(MAC_CTRL);
+               time = jiffies + (1 * HZ);      /* 1 sec timeout for cable st */
+               do {
+                       if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
+                               break;
+                       if (!in_interrupt())
+                               schedule_timeout_interruptible(1);
+               } while (time_before(jiffies, time));
+
+               if (time_after_eq(jiffies, time)) {
+#ifdef HP100_DEBUG_TRAINING
+                       printk("hp100: %s: Link cable status not ok? Training aborted.\n", dev->name);
+#endif
+               } else {
+#ifdef HP100_DEBUG_TRAINING
+                       printk
+                           ("hp100: %s: HUB tones detected. Trying to train.\n",
+                            dev->name);
+#endif
+
+                       time = jiffies + (2 * HZ);      /* again a timeout */
+                       do {
+                               val = hp100_inb(VG_LAN_CFG_1);
+                               if ((val & (HP100_LINK_UP_ST))) {
+#ifdef HP100_DEBUG_TRAINING
+                                       printk("hp100: %s: Passed training.\n", dev->name);
+#endif
+                                       break;
+                               }
+                               if (!in_interrupt())
+                                       schedule_timeout_interruptible(1);
+                       } while (time_after(time, jiffies));
+               }
+
+               /* If LINK_UP_ST is set, then we are logged into the hub. */
+               if (time_before_eq(jiffies, time) && (val & HP100_LINK_UP_ST)) {
+#ifdef HP100_DEBUG_TRAINING
+                       printk("hp100: %s: Successfully logged into the HUB.\n", dev->name);
+                       if (lp->chip == HP100_CHIPID_LASSEN) {
+                               val = hp100_inw(TRAIN_ALLOW);
+                               printk("hp100: %s: Card supports 100VG MAC Version \"%s\" ",
+                                            dev->name, (hp100_inw(TRAIN_REQUEST) & HP100_CARD_MACVER) ? "802.12" : "Pre");
+                               printk("Driver will use MAC Version \"%s\"\n", (val & HP100_HUB_MACVER) ? "802.12" : "Pre");
+                               printk("hp100: %s: Frame format is %s.\n", dev->name, (val & HP100_MALLOW_FRAMEFMT) ? "802.5" : "802.3");
+                       }
+#endif
+               } else {
+                       /* If LINK_UP_ST is not set, login was not successful */
+                       printk("hp100: %s: Problem logging into the HUB.\n", dev->name);
+                       if (lp->chip == HP100_CHIPID_LASSEN) {
+                               /* Check allowed Register to find out why there is a problem. */
+                               val = hp100_inw(TRAIN_ALLOW);   /* won't work on non-ETR card */
+#ifdef HP100_DEBUG_TRAINING
+                               printk("hp100: %s: MAC Configuration requested: 0x%04x, HUB allowed: 0x%04x\n", dev->name, hp100_inw(TRAIN_REQUEST), val);
+#endif
+                               if (val & HP100_MALLOW_ACCDENIED)
+                                       printk("hp100: %s: HUB access denied.\n", dev->name);
+                               if (val & HP100_MALLOW_CONFIGURE)
+                                       printk("hp100: %s: MAC Configuration is incompatible with the Network.\n", dev->name);
+                               if (val & HP100_MALLOW_DUPADDR)
+                                       printk("hp100: %s: Duplicate MAC Address on the Network.\n", dev->name);
+                       }
+               }
+
+               /* If we have put the chip into forced 100 Mbit mode earlier, go back */
+               /* to auto-select mode */
+
+               if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST)) {
+                       hp100_page(MAC_CTRL);
+                       hp100_orb(HP100_DOT3_MAC, 10_LAN_CFG_2);
+               }
+
+               val = hp100_inb(VG_LAN_CFG_1);
+
+               /* Clear the MISC_ERROR Interrupt, which might be generated when doing the relogin */
+               hp100_page(PERFORMANCE);
+               hp100_outw(HP100_MISC_ERROR, IRQ_STATUS);
+
+               if (val & HP100_LINK_UP_ST)
+                       return 0;       /* login was ok */
+               else {
+                       printk("hp100: %s: Training failed.\n", dev->name);
+                       hp100_down_vg_link(dev);
+                       return -EIO;
+               }
+       }
+       /* no forced relogin & already link there->no training. */
+       return -EIO;
+}
+
+static void hp100_cascade_reset(struct net_device *dev, u_short enable)
+{
+       int ioaddr = dev->base_addr;
+       struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+       hp100_outw(0x4226, TRACE);
+       printk("hp100: %s: cascade_reset\n", dev->name);
+#endif
+
+       if (enable) {
+               hp100_outw(HP100_HW_RST | HP100_RESET_LB, OPTION_LSW);
+               if (lp->chip == HP100_CHIPID_LASSEN) {
+                       /* Lassen requires a PCI transmit fifo reset */
+                       hp100_page(HW_MAP);
+                       hp100_andb(~HP100_PCI_RESET, PCICTRL2);
+                       hp100_orb(HP100_PCI_RESET, PCICTRL2);
+                       /* Wait for min. 300 ns */
+                       /* we can't use jiffies here, because it may be */
+                       /* that we have disabled the timer... */
+                       udelay(400);
+                       hp100_andb(~HP100_PCI_RESET, PCICTRL2);
+                       hp100_page(PERFORMANCE);
+               }
+       } else {                /* bring out of reset */
+               hp100_outw(HP100_HW_RST | HP100_SET_LB, OPTION_LSW);
+               udelay(400);
+               hp100_page(PERFORMANCE);
+       }
+}
+
+#ifdef HP100_DEBUG
+void hp100_RegisterDump(struct net_device *dev)
+{
+       int ioaddr = dev->base_addr;
+       int Page;
+       int Register;
+
+       /* Dump common registers */
+       printk("hp100: %s: Cascade Register Dump\n", dev->name);
+       printk("hardware id #1: 0x%.2x\n", hp100_inb(HW_ID));
+       printk("hardware id #2/paging: 0x%.2x\n", hp100_inb(PAGING));
+       printk("option #1: 0x%.4x\n", hp100_inw(OPTION_LSW));
+       printk("option #2: 0x%.4x\n", hp100_inw(OPTION_MSW));
+
+       /* Dump paged registers */
+       for (Page = 0; Page < 8; Page++) {
+               /* Dump registers */
+               printk("page: 0x%.2x\n", Page);
+               outw(Page, ioaddr + 0x02);
+               for (Register = 0x8; Register < 0x22; Register += 2) {
+                       /* Display Register contents except data port */
+                       if (((Register != 0x10) && (Register != 0x12)) || (Page > 0)) {
+                               printk("0x%.2x = 0x%.4x\n", Register, inw(ioaddr + Register));
+                       }
+               }
+       }
+       hp100_page(PERFORMANCE);
+}
+#endif
+
+
+static void cleanup_dev(struct net_device *d)
+{
+       struct hp100_private *p = netdev_priv(d);
+
+       unregister_netdev(d);
+       release_region(d->base_addr, HP100_REGION_SIZE);
+
+       if (p->mode == 1)       /* busmaster */
+               pci_free_consistent(p->pci_dev, MAX_RINGSIZE + 0x0f,
+                                   p->page_vaddr_algn,
+                                   virt_to_whatever(d, p->page_vaddr_algn));
+       if (p->mem_ptr_virt)
+               iounmap(p->mem_ptr_virt);
+
+       free_netdev(d);
+}
+
+static int hp100_eisa_probe(struct device *gendev)
+{
+       struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
+       struct eisa_device *edev = to_eisa_device(gendev);
+       int err;
+
+       if (!dev)
+               return -ENOMEM;
+
+       SET_NETDEV_DEV(dev, &edev->dev);
+
+       err = hp100_probe1(dev, edev->base_addr + 0xC38, HP100_BUS_EISA, NULL);
+       if (err)
+               goto out1;
+
+#ifdef HP100_DEBUG
+       printk("hp100: %s: EISA adapter found at 0x%x\n", dev->name,
+              dev->base_addr);
+#endif
+       dev_set_drvdata(gendev, dev);
+       return 0;
+ out1:
+       free_netdev(dev);
+       return err;
+}
+
+static int hp100_eisa_remove(struct device *gendev)
+{
+       struct net_device *dev = dev_get_drvdata(gendev);
+       cleanup_dev(dev);
+       return 0;
+}
+
+static struct eisa_driver hp100_eisa_driver = {
+        .id_table = hp100_eisa_tbl,
+        .driver   = {
+                .name    = "hp100",
+                .probe   = hp100_eisa_probe,
+               .remove  = hp100_eisa_remove,
+        }
+};
+
+static int hp100_pci_probe(struct pci_dev *pdev,
+                          const struct pci_device_id *ent)
+{
+       struct net_device *dev;
+       int ioaddr;
+       u_short pci_command;
+       int err;
+
+       if (pci_enable_device(pdev))
+               return -ENODEV;
+
+       dev = alloc_etherdev(sizeof(struct hp100_private));
+       if (!dev) {
+               err = -ENOMEM;
+               goto out0;
+       }
+
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+       if (!(pci_command & PCI_COMMAND_IO)) {
+#ifdef HP100_DEBUG
+               printk("hp100: %s: PCI I/O Bit has not been set. Setting...\n", dev->name);
+#endif
+               pci_command |= PCI_COMMAND_IO;
+               pci_write_config_word(pdev, PCI_COMMAND, pci_command);
+       }
+
+       if (!(pci_command & PCI_COMMAND_MASTER)) {
+#ifdef HP100_DEBUG
+               printk("hp100: %s: PCI Master Bit has not been set. Setting...\n", dev->name);
+#endif
+               pci_command |= PCI_COMMAND_MASTER;
+               pci_write_config_word(pdev, PCI_COMMAND, pci_command);
+       }
+
+       ioaddr = pci_resource_start(pdev, 0);
+       err = hp100_probe1(dev, ioaddr, HP100_BUS_PCI, pdev);
+       if (err)
+               goto out1;
+
+#ifdef HP100_DEBUG
+       printk("hp100: %s: PCI adapter found at 0x%x\n", dev->name, ioaddr);
+#endif
+       pci_set_drvdata(pdev, dev);
+       return 0;
+ out1:
+       free_netdev(dev);
+ out0:
+       pci_disable_device(pdev);
+       return err;
+}
+
+static void hp100_pci_remove(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       cleanup_dev(dev);
+       pci_disable_device(pdev);
+}
+
+
+static struct pci_driver hp100_pci_driver = {
+       .name           = "hp100",
+       .id_table       = hp100_pci_tbl,
+       .probe          = hp100_pci_probe,
+       .remove         = hp100_pci_remove,
+};
+
+/*
+ *  module section
+ */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, "
+              "Siegfried \"Frieder\" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>");
+MODULE_DESCRIPTION("HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters");
+
+/*
+ * Note: to register three isa devices, use:
+ * option hp100 hp100_port=0,0,0
+ *        to register one card at io 0x280 as eth239, use:
+ * option hp100 hp100_port=0x280
+ */
+#if defined(MODULE) && defined(CONFIG_ISA)
+#define HP100_DEVICES 5
+/* Parameters set by insmod */
+static int hp100_port[HP100_DEVICES] = { 0, [1 ... (HP100_DEVICES-1)] = -1 };
+module_param_hw_array(hp100_port, int, ioport, NULL, 0);
+
+/* List of devices */
+static struct net_device *hp100_devlist[HP100_DEVICES];
+
+static int __init hp100_isa_init(void)
+{
+       struct net_device *dev;
+       int i, err, cards = 0;
+
+       /* Don't autoprobe ISA bus */
+       if (hp100_port[0] == 0)
+               return -ENODEV;
+
+       /* Loop on all possible base addresses */
+       for (i = 0; i < HP100_DEVICES && hp100_port[i] != -1; ++i) {
+               dev = alloc_etherdev(sizeof(struct hp100_private));
+               if (!dev) {
+                       while (cards > 0)
+                               cleanup_dev(hp100_devlist[--cards]);
+
+                       return -ENOMEM;
+               }
+
+               err = hp100_isa_probe(dev, hp100_port[i]);
+               if (!err)
+                       hp100_devlist[cards++] = dev;
+               else
+                       free_netdev(dev);
+       }
+
+       return cards > 0 ? 0 : -ENODEV;
+}
+
+static void hp100_isa_cleanup(void)
+{
+       int i;
+
+       for (i = 0; i < HP100_DEVICES; i++) {
+               struct net_device *dev = hp100_devlist[i];
+               if (dev)
+                       cleanup_dev(dev);
+       }
+}
+#else
+#define hp100_isa_init()       (0)
+#define hp100_isa_cleanup()    do { } while(0)
+#endif
+
+static int __init hp100_module_init(void)
+{
+       int err;
+
+       err = hp100_isa_init();
+       if (err && err != -ENODEV)
+               goto out;
+       err = eisa_driver_register(&hp100_eisa_driver);
+       if (err && err != -ENODEV)
+               goto out2;
+       err = pci_register_driver(&hp100_pci_driver);
+       if (err && err != -ENODEV)
+               goto out3;
+ out:
+       return err;
+ out3:
+       eisa_driver_unregister (&hp100_eisa_driver);
+ out2:
+       hp100_isa_cleanup();
+       goto out;
+}
+
+
+static void __exit hp100_module_exit(void)
+{
+       hp100_isa_cleanup();
+       eisa_driver_unregister (&hp100_eisa_driver);
+       pci_unregister_driver (&hp100_pci_driver);
+}
+
+module_init(hp100_module_init)
+module_exit(hp100_module_exit)
diff --git a/drivers/staging/hp/hp100.h b/drivers/staging/hp/hp100.h
new file mode 100644 (file)
index 0000000..7239b94
--- /dev/null
@@ -0,0 +1,611 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * hp100.h: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux.
+ *
+ * $Id: hp100.h,v 1.51 1997/04/08 14:26:42 floeff Exp floeff $
+ *
+ * Authors:  Jaroslav Kysela, <perex@pf.jcu.cz>
+ *           Siegfried Loeffler <floeff@tunix.mathematik.uni-stuttgart.de>
+ *
+ * This driver is based on the 'hpfepkt' crynwr packet driver.
+ */
+
+/****************************************************************************
+ *  Hardware Constants
+ ****************************************************************************/
+
+/*
+ * Page Identifiers
+ * (Swap Paging Register, PAGING, bits 3:0, Offset 0x02)
+ */
+
+#define HP100_PAGE_PERFORMANCE 0x0     /* Page 0 */
+#define HP100_PAGE_MAC_ADDRESS 0x1     /* Page 1 */
+#define HP100_PAGE_HW_MAP      0x2     /* Page 2 */
+#define HP100_PAGE_EEPROM_CTRL 0x3     /* Page 3 */
+#define HP100_PAGE_MAC_CTRL    0x4     /* Page 4 */
+#define HP100_PAGE_MMU_CFG     0x5     /* Page 5 */
+#define HP100_PAGE_ID_MAC_ADDR 0x6     /* Page 6 */
+#define HP100_PAGE_MMU_POINTER 0x7     /* Page 7 */
+
+
+/* Registers that are present on all pages  */
+
+#define HP100_REG_HW_ID                0x00    /* R:  (16) Unique card ID           */
+#define HP100_REG_TRACE                0x00    /* W:  (16) Used for debug output    */
+#define HP100_REG_PAGING       0x02    /* R:  (16),15:4 Card ID             */
+                                       /* W:  (16),3:0 Switch pages         */
+#define HP100_REG_OPTION_LSW   0x04    /* RW: (16) Select card functions    */
+#define HP100_REG_OPTION_MSW   0x06    /* RW: (16) Select card functions    */
+
+/*  Page 0 - Performance  */
+
+#define HP100_REG_IRQ_STATUS   0x08    /* RW: (16) Which ints are pending   */
+#define HP100_REG_IRQ_MASK     0x0a    /* RW: (16) Select ints to allow     */
+#define HP100_REG_FRAGMENT_LEN 0x0c    /* W: (16)12:0 Current fragment len */
+/* Note: For 32 bit systems, fragment len and offset registers are available */
+/*       at offset 0x28 and 0x2c, where they can be written as 32bit values. */
+#define HP100_REG_OFFSET       0x0e    /* RW: (16)12:0 Offset to start read */
+#define HP100_REG_DATA32       0x10    /* RW: (32) I/O mode data port       */
+#define HP100_REG_DATA16       0x12    /* RW: WORDs must be read from here  */
+#define HP100_REG_TX_MEM_FREE  0x14    /* RD: (32) Amount of free Tx mem    */
+#define HP100_REG_TX_PDA_L      0x14   /* W: (32) BM: Ptr to PDL, Low Pri  */
+#define HP100_REG_TX_PDA_H      0x1c   /* W: (32) BM: Ptr to PDL, High Pri */
+#define HP100_REG_RX_PKT_CNT   0x18    /* RD: (8) Rx count of pkts on card  */
+#define HP100_REG_TX_PKT_CNT   0x19    /* RD: (8) Tx count of pkts on card  */
+#define HP100_REG_RX_PDL        0x1a   /* R: (8) BM: # rx pdl not executed */
+#define HP100_REG_TX_PDL        0x1b   /* R: (8) BM: # tx pdl not executed */
+#define HP100_REG_RX_PDA        0x18   /* W: (32) BM: Up to 31 addresses */
+                                       /*             which point to a PDL */
+#define HP100_REG_SL_EARLY      0x1c   /*    (32) Enhanced Slave Early Rx */
+#define HP100_REG_STAT_DROPPED  0x20   /* R (12) Dropped Packet Counter */
+#define HP100_REG_STAT_ERRORED  0x22   /* R (8) Errored Packet Counter */
+#define HP100_REG_STAT_ABORT    0x23   /* R (8) Abort Counter/OW Coll. Flag */
+#define HP100_REG_RX_RING       0x24   /* W (32) Slave: RX Ring Pointers */
+#define HP100_REG_32_FRAGMENT_LEN 0x28 /* W (13) Slave: Fragment Length Reg */
+#define HP100_REG_32_OFFSET     0x2c   /* W (16) Slave: Offset Register */
+
+/*  Page 1 - MAC Address/Hash Table  */
+
+#define HP100_REG_MAC_ADDR     0x08    /* RW: (8) Cards MAC address         */
+#define HP100_REG_HASH_BYTE0   0x10    /* RW: (8) Cards multicast filter    */
+
+/*  Page 2 - Hardware Mapping  */
+
+#define HP100_REG_MEM_MAP_LSW  0x08    /* RW: (16) LSW of cards mem addr    */
+#define HP100_REG_MEM_MAP_MSW  0x0a    /* RW: (16) MSW of cards mem addr    */
+#define HP100_REG_IO_MAP       0x0c    /* RW: (8) Cards I/O address         */
+#define HP100_REG_IRQ_CHANNEL  0x0d    /* RW: (8) IRQ and edge/level int    */
+#define HP100_REG_SRAM         0x0e    /* RW: (8) How much RAM on card      */
+#define HP100_REG_BM           0x0f    /* RW: (8) Controls BM functions     */
+
+/* New on Page 2 for ETR chips: */
+#define HP100_REG_MODECTRL1     0x10   /* RW: (8) Mode Control 1 */
+#define HP100_REG_MODECTRL2     0x11   /* RW: (8) Mode Control 2 */
+#define HP100_REG_PCICTRL1      0x12   /* RW: (8) PCI Cfg 1 */
+#define HP100_REG_PCICTRL2      0x13   /* RW: (8) PCI Cfg 2 */
+#define HP100_REG_PCIBUSMLAT    0x15   /* RW: (8) PCI Bus Master Latency */
+#define HP100_REG_EARLYTXCFG    0x16   /* RW: (16) Early TX Cfg/Cntrl Reg */
+#define HP100_REG_EARLYRXCFG    0x18   /* RW: (8) Early RX Cfg/Cntrl Reg */
+#define HP100_REG_ISAPNPCFG1    0x1a   /* RW: (8) ISA PnP Cfg/Cntrl Reg 1 */
+#define HP100_REG_ISAPNPCFG2    0x1b   /* RW: (8) ISA PnP Cfg/Cntrl Reg 2 */
+
+/*  Page 3 - EEPROM/Boot ROM  */
+
+#define HP100_REG_EEPROM_CTRL  0x08    /* RW: (16) Used to load EEPROM      */
+#define HP100_REG_BOOTROM_CTRL  0x0a
+
+/*  Page 4 - LAN Configuration  (MAC_CTRL) */
+
+#define HP100_REG_10_LAN_CFG_1 0x08    /* RW: (8) Set 10M XCVR functions   */
+#define HP100_REG_10_LAN_CFG_2  0x09   /* RW: (8)     10M XCVR functions   */
+#define HP100_REG_VG_LAN_CFG_1 0x0a    /* RW: (8) Set 100M XCVR functions  */
+#define HP100_REG_VG_LAN_CFG_2  0x0b   /* RW: (8) 100M LAN Training cfgregs */
+#define HP100_REG_MAC_CFG_1    0x0c    /* RW: (8) Types of pkts to accept   */
+#define HP100_REG_MAC_CFG_2    0x0d    /* RW: (8) Misc MAC functions        */
+#define HP100_REG_MAC_CFG_3     0x0e   /* RW: (8) Misc MAC functions */
+#define HP100_REG_MAC_CFG_4     0x0f   /* R:  (8) Misc MAC states */
+#define HP100_REG_DROPPED      0x10    /* R:  (16),11:0 Pkts can't fit in mem */
+#define HP100_REG_CRC          0x12    /* R:  (8) Pkts with CRC             */
+#define HP100_REG_ABORT                0x13    /* R:  (8) Aborted Tx pkts           */
+#define HP100_REG_TRAIN_REQUEST 0x14   /* RW: (16) Endnode MAC register. */
+#define HP100_REG_TRAIN_ALLOW   0x16   /* R:  (16) Hub allowed register */
+
+/*  Page 5 - MMU  */
+
+#define HP100_REG_RX_MEM_STOP  0x0c    /* RW: (16) End of Rx ring addr      */
+#define HP100_REG_TX_MEM_STOP  0x0e    /* RW: (16) End of Tx ring addr      */
+#define HP100_REG_PDL_MEM_STOP  0x10   /* Not used by 802.12 devices */
+#define HP100_REG_ECB_MEM_STOP  0x14   /* I've no idea what this is */
+
+/*  Page 6 - Card ID/Physical LAN Address  */
+
+#define HP100_REG_BOARD_ID     0x08    /* R:  (8) EISA/ISA card ID          */
+#define HP100_REG_BOARD_IO_CHCK 0x0c   /* R:  (8) Added to ID to get FFh    */
+#define HP100_REG_SOFT_MODEL   0x0d    /* R:  (8) Config program defined    */
+#define HP100_REG_LAN_ADDR     0x10    /* R:  (8) MAC addr of card          */
+#define HP100_REG_LAN_ADDR_CHCK 0x16   /* R:  (8) Added to addr to get FFh  */
+
+/*  Page 7 - MMU Current Pointers  */
+
+#define HP100_REG_PTR_RXSTART  0x08    /* R:  (16) Current begin of Rx ring */
+#define HP100_REG_PTR_RXEND    0x0a    /* R:  (16) Current end of Rx ring   */
+#define HP100_REG_PTR_TXSTART  0x0c    /* R:  (16) Current begin of Tx ring */
+#define HP100_REG_PTR_TXEND    0x0e    /* R:  (16) Current end of Rx ring   */
+#define HP100_REG_PTR_RPDLSTART 0x10
+#define HP100_REG_PTR_RPDLEND   0x12
+#define HP100_REG_PTR_RINGPTRS  0x14
+#define HP100_REG_PTR_MEMDEBUG  0x1a
+/* ------------------------------------------------------------------------ */
+
+
+/*
+ * Hardware ID Register I (Always available, HW_ID, Offset 0x00)
+ */
+#define HP100_HW_ID_CASCADE     0x4850 /* Identifies Cascade Chip */
+
+/*
+ * Hardware ID Register 2 & Paging Register
+ * (Always available, PAGING, Offset 0x02)
+ * Bits 15:4 are for the Chip ID
+ */
+#define HP100_CHIPID_MASK        0xFFF0
+#define HP100_CHIPID_SHASTA      0x5350        /* Not 802.12 compliant */
+                                        /* EISA BM/SL, MCA16/32 SL, ISA SL */
+#define HP100_CHIPID_RAINIER     0x5360        /* Not 802.12 compliant EISA BM, */
+                                        /* PCI SL, MCA16/32 SL, ISA SL */
+#define HP100_CHIPID_LASSEN      0x5370        /* 802.12 compliant PCI BM, PCI SL */
+                                        /* LRF supported */
+
+/*
+ *  Option Registers I and II
+ * (Always available, OPTION_LSW, Offset 0x04-0x05)
+ */
+#define HP100_DEBUG_EN         0x8000  /* 0:Dis., 1:Enable Debug Dump Ptr. */
+#define HP100_RX_HDR           0x4000  /* 0:Dis., 1:Enable putting pkt into */
+                                       /*   system mem. before Rx interrupt */
+#define HP100_MMAP_DIS         0x2000  /* 0:Enable, 1:Disable mem.mapping. */
+                                       /*   MMAP_DIS must be 0 and MEM_EN */
+                                       /*   must be 1 for memory-mapped */
+                                       /*   mode to be enabled */
+#define HP100_EE_EN            0x1000  /* 0:Disable,1:Enable EEPROM writing */
+#define HP100_BM_WRITE         0x0800  /* 0:Slave, 1:Bus Master for Tx data */
+#define HP100_BM_READ          0x0400  /* 0:Slave, 1:Bus Master for Rx data */
+#define HP100_TRI_INT          0x0200  /* 0:Don't, 1:Do tri-state the int */
+#define HP100_MEM_EN           0x0040  /* Config program set this to */
+                                       /*   0:Disable, 1:Enable mem map. */
+                                       /*   See MMAP_DIS. */
+#define HP100_IO_EN            0x0020  /* 1:Enable I/O transfers */
+#define HP100_BOOT_EN          0x0010  /* 1:Enable boot ROM access */
+#define HP100_FAKE_INT         0x0008  /* 1:int */
+#define HP100_INT_EN           0x0004  /* 1:Enable ints from card */
+#define HP100_HW_RST           0x0002  /* 0:Reset, 1:Out of reset */
+                                       /* NIC reset on 0 to 1 transition */
+
+/*
+ *  Option Register III
+ * (Always available, OPTION_MSW, Offset 0x06)
+ */
+#define HP100_PRIORITY_TX      0x0080  /* 1:Do all Tx pkts as priority */
+#define HP100_EE_LOAD          0x0040  /* 1:EEPROM loading, 0 when done */
+#define HP100_ADV_NXT_PKT      0x0004  /* 1:Advance to next pkt in Rx queue */
+                                       /*   h/w will set to 0 when done */
+#define HP100_TX_CMD           0x0002  /* 1:Tell h/w download done, h/w */
+                                       /*   will set to 0 when done */
+
+/*
+ * Interrupt Status Registers I and II
+ * (Page PERFORMANCE, IRQ_STATUS, Offset 0x08-0x09)
+ * Note: With old chips, these Registers will clear when 1 is written to them
+ *       with new chips this depends on setting of CLR_ISMODE
+ */
+#define HP100_RX_EARLY_INT      0x2000
+#define HP100_RX_PDA_ZERO       0x1000
+#define HP100_RX_PDL_FILL_COMPL 0x0800
+#define HP100_RX_PACKET                0x0400  /* 0:No, 1:Yes pkt has been Rx */
+#define HP100_RX_ERROR         0x0200  /* 0:No, 1:Yes Rx pkt had error */
+#define HP100_TX_PDA_ZERO       0x0020 /* 1 when PDA count goes to zero */
+#define HP100_TX_SPACE_AVAIL   0x0010  /* 0:<8192, 1:>=8192 Tx free bytes */
+#define HP100_TX_COMPLETE      0x0008  /* 0:No, 1:Yes a Tx has completed */
+#define HP100_MISC_ERROR        0x0004 /* 0:No, 1:Lan Link down or bus error */
+#define HP100_TX_ERROR         0x0002  /* 0:No, 1:Yes Tx pkt had error */
+
+/*
+ * Xmit Memory Free Count
+ * (Page PERFORMANCE, TX_MEM_FREE, Offset 0x14) (Read only, 32bit)
+ */
+#define HP100_AUTO_COMPARE     0x80000000      /* Tx Space avail & pkts<255 */
+#define HP100_FREE_SPACE       0x7fffffe0      /* Tx free memory */
+
+/*
+ *  IRQ Channel
+ * (Page HW_MAP, IRQ_CHANNEL, Offset 0x0d)
+ */
+#define HP100_ZERO_WAIT_EN     0x80    /* 0:No, 1:Yes asserts NOWS signal */
+#define HP100_IRQ_SCRAMBLE      0x40
+#define HP100_BOND_HP           0x20
+#define HP100_LEVEL_IRQ                0x10    /* 0:Edge, 1:Level type interrupts. */
+                                       /* (Only valid on EISA cards) */
+#define HP100_IRQMASK          0x0F    /* Isolate the IRQ bits */
+
+/*
+ * SRAM Parameters
+ * (Page HW_MAP, SRAM, Offset 0x0e)
+ */
+#define HP100_RAM_SIZE_MASK    0xe0    /* AND to get SRAM size index */
+#define HP100_RAM_SIZE_SHIFT   0x05    /* Shift count(put index in lwr bits) */
+
+/*
+ * Bus Master Register
+ * (Page HW_MAP, BM, Offset 0x0f)
+ */
+#define HP100_BM_BURST_RD       0x01   /* EISA only: 1=Use burst trans. fm system */
+                                       /* memory to chip (tx) */
+#define HP100_BM_BURST_WR       0x02   /* EISA only: 1=Use burst trans. fm system */
+                                       /* memory to chip (rx) */
+#define HP100_BM_MASTER                0x04    /* 0:Slave, 1:BM mode */
+#define HP100_BM_PAGE_CK        0x08   /* This bit should be set whenever in */
+                                       /* an EISA system */
+#define HP100_BM_PCI_8CLK       0x40   /* ... cycles 8 clocks apart */
+
+
+/*
+ * Mode Control Register I
+ * (Page HW_MAP, MODECTRL1, Offset0x10)
+ */
+#define HP100_TX_DUALQ          0x10
+   /* If set and BM -> dual tx pda queues */
+#define HP100_ISR_CLRMODE       0x02   /* If set ISR will clear all pending */
+                                      /* interrupts on read (etr only?) */
+#define HP100_EE_NOLOAD         0x04   /* Status whether res will be loaded */
+                                      /* from the eeprom */
+#define HP100_TX_CNT_FLG        0x08   /* Controls Early TX Reg Cnt Field */
+#define HP100_PDL_USE3          0x10   /* If set BM engine will read only */
+                                      /* first three data elements of a PDL */
+                                      /* on the first access. */
+#define HP100_BUSTYPE_MASK      0xe0   /* Three bit bus type info */
+
+/*
+ * Mode Control Register II
+ * (Page HW_MAP, MODECTRL2, Offset0x11)
+ */
+#define HP100_EE_MASK           0x0f   /* Tell EEPROM circuit not to load */
+                                      /* certain resources */
+#define HP100_DIS_CANCEL        0x20   /* For tx dualq mode operation */
+#define HP100_EN_PDL_WB         0x40   /* 1: Status of PDL completion may be */
+                                      /* written back to system mem */
+#define HP100_EN_BUS_FAIL       0x80   /* Enables bus-fail portion of misc */
+                                      /* interrupt */
+
+/*
+ * PCI Configuration and Control Register I
+ * (Page HW_MAP, PCICTRL1, Offset 0x12)
+ */
+#define HP100_LO_MEM            0x01   /* 1: Mapped Mem requested below 1MB */
+#define HP100_NO_MEM            0x02   /* 1: Disables Req for sysmem to PCI */
+                                      /* bios */
+#define HP100_USE_ISA           0x04   /* 1: isa type decodes will occur */
+                                      /* simultaneously with PCI decodes */
+#define HP100_IRQ_HI_MASK       0xf0   /* pgmed by pci bios */
+#define HP100_PCI_IRQ_HI_MASK   0x78   /* Isolate 4 bits for PCI IRQ  */
+
+/*
+ * PCI Configuration and Control Register II
+ * (Page HW_MAP, PCICTRL2, Offset 0x13)
+ */
+#define HP100_RD_LINE_PDL       0x01   /* 1: PCI command Memory Read Line en */
+#define HP100_RD_TX_DATA_MASK   0x06   /* choose PCI memread cmds for TX */
+#define HP100_MWI               0x08   /* 1: en. PCI memory write invalidate */
+#define HP100_ARB_MODE          0x10   /* Select PCI arbitor type */
+#define HP100_STOP_EN           0x20   /* Enables PCI state machine to issue */
+                                      /* pci stop if cascade not ready */
+#define HP100_IGNORE_PAR        0x40   /* 1: PCI state machine ignores parity */
+#define HP100_PCI_RESET         0x80   /* 0->1: Reset PCI block */
+
+/*
+ * Early TX Configuration and Control Register
+ * (Page HW_MAP, EARLYTXCFG, Offset 0x16)
+ */
+#define HP100_EN_EARLY_TX       0x8000 /* 1=Enable Early TX */
+#define HP100_EN_ADAPTIVE       0x4000 /* 1=Enable adaptive mode */
+#define HP100_EN_TX_UR_IRQ      0x2000 /* reserved, must be 0 */
+#define HP100_EN_LOW_TX         0x1000 /* reserved, must be 0 */
+#define HP100_ET_CNT_MASK       0x0fff /* bits 11..0: ET counters */
+
+/*
+ * Early RX Configuration and Control Register
+ * (Page HW_MAP, EARLYRXCFG, Offset 0x18)
+ */
+#define HP100_EN_EARLY_RX       0x80   /* 1=Enable Early RX */
+#define HP100_EN_LOW_RX         0x40   /* reserved, must be 0 */
+#define HP100_RX_TRIP_MASK      0x1f   /* bits 4..0: threshold at which the
+                                        * early rx circuit will start the
+                                        * dma of received packet into system
+                                        * memory for BM */
+
+/*
+ *  Serial Devices Control Register
+ * (Page EEPROM_CTRL, EEPROM_CTRL, Offset 0x08)
+ */
+#define HP100_EEPROM_LOAD      0x0001  /* 0->1 loads EEPROM into registers. */
+                                       /* When it goes back to 0, load is   */
+                                       /* complete. This should take ~600us. */
+
+/*
+ * 10MB LAN Control and Configuration Register I
+ * (Page MAC_CTRL, 10_LAN_CFG_1, Offset 0x08)
+ */
+#define HP100_MAC10_SEL                0xc0    /* Get bits to indicate MAC */
+#define HP100_AUI_SEL          0x20    /* Status of AUI selection */
+#define HP100_LOW_TH           0x10    /* 0:No, 1:Yes allow better cabling */
+#define HP100_LINK_BEAT_DIS    0x08    /* 0:Enable, 1:Disable link beat */
+#define HP100_LINK_BEAT_ST     0x04    /* 0:No, 1:Yes link beat being Rx */
+#define HP100_R_ROL_ST         0x02    /* 0:No, 1:Yes Rx twisted pair has */
+                                       /*             been reversed */
+#define HP100_AUI_ST           0x01    /* 0:No, 1:Yes use AUI on TP card */
+
+/*
+ * 10 MB LAN Control and Configuration Register II
+ * (Page MAC_CTRL, 10_LAN_CFG_2, Offset 0x09)
+ */
+#define HP100_SQU_ST           0x01    /* 0:No, 1:Yes collision signal sent */
+                                       /*       after Tx.Only used for AUI. */
+#define HP100_FULLDUP           0x02   /* 1: LXT901 XCVR fullduplx enabled */
+#define HP100_DOT3_MAC          0x04   /* 1: DOT 3 Mac sel. unless Autosel */
+
+/*
+ * MAC Selection, use with MAC10_SEL bits
+ */
+#define HP100_AUTO_SEL_10      0x0     /* Auto select */
+#define HP100_XCVR_LXT901_10   0x1     /* LXT901 10BaseT transceiver */
+#define HP100_XCVR_7213                0x2     /* 7213 transceiver */
+#define HP100_XCVR_82503       0x3     /* 82503 transceiver */
+
+/*
+ *  100MB LAN Training Register
+ * (Page MAC_CTRL, VG_LAN_CFG_2, Offset 0x0b) (old, pre 802.12)
+ */
+#define HP100_FRAME_FORMAT     0x08    /* 0:802.3, 1:802.5 frames */
+#define HP100_BRIDGE           0x04    /* 0:No, 1:Yes tell hub i am a bridge */
+#define HP100_PROM_MODE                0x02    /* 0:No, 1:Yes tell hub card is */
+                                       /*         promiscuous */
+#define HP100_REPEATER         0x01    /* 0:No, 1:Yes tell hub MAC wants to */
+                                       /*         be a cascaded repeater */
+
+/*
+ * 100MB LAN Control and Configuration Register
+ * (Page MAC_CTRL, VG_LAN_CFG_1, Offset 0x0a)
+ */
+#define HP100_VG_SEL           0x80    /* 0:No, 1:Yes use 100 Mbit MAC */
+#define HP100_LINK_UP_ST       0x40    /* 0:No, 1:Yes endnode logged in */
+#define HP100_LINK_CABLE_ST    0x20    /* 0:No, 1:Yes cable can hear tones */
+                                       /*         from  hub */
+#define HP100_LOAD_ADDR                0x10    /* 0->1 card addr will be sent  */
+                                       /* 100ms later the link status  */
+                                       /* bits are valid */
+#define HP100_LINK_CMD         0x08    /* 0->1 link will attempt to log in. */
+                                       /* 100ms later the link status */
+                                       /* bits are valid */
+#define HP100_TRN_DONE          0x04   /* NEW ETR-Chips only: Will be reset */
+                                       /* after LinkUp Cmd is given and set */
+                                       /* when training has completed. */
+#define HP100_LINK_GOOD_ST     0x02    /* 0:No, 1:Yes cable passed training */
+#define HP100_VG_RESET         0x01    /* 0:Yes, 1:No reset the 100VG MAC */
+
+
+/*
+ *  MAC Configuration Register I
+ * (Page MAC_CTRL, MAC_CFG_1, Offset 0x0c)
+ */
+#define HP100_RX_IDLE          0x80    /* 0:Yes, 1:No currently receiving pkts */
+#define HP100_TX_IDLE          0x40    /* 0:Yes, 1:No currently Txing pkts */
+#define HP100_RX_EN            0x20    /* 1: allow receiving of pkts */
+#define HP100_TX_EN            0x10    /* 1: allow transmitting of pkts */
+#define HP100_ACC_ERRORED      0x08    /* 0:No, 1:Yes allow Rx of errored pkts */
+#define HP100_ACC_MC           0x04    /* 0:No, 1:Yes allow Rx of multicast pkts */
+#define HP100_ACC_BC           0x02    /* 0:No, 1:Yes allow Rx of broadcast pkts */
+#define HP100_ACC_PHY          0x01    /* 0:No, 1:Yes allow Rx of ALL phys. pkts */
+#define HP100_MAC1MODEMASK     0xf0    /* Hide ACC bits */
+#define HP100_MAC1MODE1                0x00    /* Receive nothing, must also disable RX */
+#define HP100_MAC1MODE2                0x00
+#define HP100_MAC1MODE3                HP100_MAC1MODE2 | HP100_ACC_BC
+#define HP100_MAC1MODE4                HP100_MAC1MODE3 | HP100_ACC_MC
+#define HP100_MAC1MODE5                HP100_MAC1MODE4 /* set mc hash to all ones also */
+#define HP100_MAC1MODE6                HP100_MAC1MODE5 | HP100_ACC_PHY /* Promiscuous */
+/* Note MODE6 will receive all GOOD packets on the LAN. This really needs
+   a mode 7 defined to be LAN Analyzer mode, which will receive errored and
+   runt packets, and keep the CRC bytes. */
+#define HP100_MAC1MODE7                HP100_MAC1MODE6 | HP100_ACC_ERRORED
+
+/*
+ *  MAC Configuration Register II
+ * (Page MAC_CTRL, MAC_CFG_2, Offset 0x0d)
+ */
+#define HP100_TR_MODE          0x80    /* 0:No, 1:Yes support Token Ring formats */
+#define HP100_TX_SAME          0x40    /* 0:No, 1:Yes Tx same packet continuous */
+#define HP100_LBK_XCVR         0x20    /* 0:No, 1:Yes loopback through MAC & */
+                                       /*   transceiver */
+#define HP100_LBK_MAC          0x10    /* 0:No, 1:Yes loopback through MAC */
+#define HP100_CRC_I            0x08    /* 0:No, 1:Yes inhibit CRC on Tx packets */
+#define HP100_ACCNA             0x04   /* 1: For 802.5: Accept only token ring
+                                        * group addr that maches NA mask */
+#define HP100_KEEP_CRC         0x02    /* 0:No, 1:Yes keep CRC on Rx packets. */
+                                       /*   The length will reflect this. */
+#define HP100_ACCFA             0x01   /* 1: For 802.5: Accept only functional
+                                        * addrs that match FA mask (page1) */
+#define HP100_MAC2MODEMASK     0x02
+#define HP100_MAC2MODE1                0x00
+#define HP100_MAC2MODE2                0x00
+#define HP100_MAC2MODE3                0x00
+#define HP100_MAC2MODE4                0x00
+#define HP100_MAC2MODE5                0x00
+#define HP100_MAC2MODE6                0x00
+#define HP100_MAC2MODE7                KEEP_CRC
+
+/*
+ * MAC Configuration Register III
+ * (Page MAC_CTRL, MAC_CFG_3, Offset 0x0e)
+ */
+#define HP100_PACKET_PACE       0x03   /* Packet Pacing:
+                                        * 00: No packet pacing
+                                        * 01: 8 to 16 uS delay
+                                        * 10: 16 to 32 uS delay
+                                        * 11: 32 to 64 uS delay
+                                        */
+#define HP100_LRF_EN            0x04   /* 1: External LAN Rcv Filter and
+                                        * TCP/IP Checksumming enabled. */
+#define HP100_AUTO_MODE         0x10   /* 1: AutoSelect between 10/100 */
+
+/*
+ * MAC Configuration Register IV
+ * (Page MAC_CTRL, MAC_CFG_4, Offset 0x0f)
+ */
+#define HP100_MAC_SEL_ST        0x01   /* (R): Status of external VGSEL
+                                        * Signal, 1=100VG, 0=10Mbit sel. */
+#define HP100_LINK_FAIL_ST      0x02   /* (R): Status of Link Fail portion
+                                        * of the Misc. Interrupt */
+
+/*
+ *  100 MB LAN Training Request/Allowed Registers
+ * (Page MAC_CTRL, TRAIN_REQUEST and TRAIN_ALLOW, Offset 0x14-0x16)(ETR parts only)
+ */
+#define HP100_MACRQ_REPEATER         0x0001    /* 1: MAC tells HUB it wants to be
+                                                *    a cascaded repeater
+                                                * 0: ... wants to be a DTE */
+#define HP100_MACRQ_PROMSC           0x0006    /* 2 bits: Promiscious mode
+                                                * 00: Rcv only unicast packets
+                                                *     specifically addr to this
+                                                *     endnode
+                                                * 10: Rcv all pckts fwded by
+                                                *     the local repeater */
+#define HP100_MACRQ_FRAMEFMT_EITHER  0x0018    /* 11: either format allowed */
+#define HP100_MACRQ_FRAMEFMT_802_3   0x0000    /* 00: 802.3 is requested */
+#define HP100_MACRQ_FRAMEFMT_802_5   0x0010    /* 10: 802.5 format is requested */
+#define HP100_CARD_MACVER            0xe000    /* R: 3 bit Cards 100VG MAC version */
+#define HP100_MALLOW_REPEATER        0x0001    /* If reset, requested access as an
+                                                * end node is allowed */
+#define HP100_MALLOW_PROMSC          0x0004    /* 2 bits: Promiscious mode
+                                                * 00: Rcv only unicast packets
+                                                *     specifically addr to this
+                                                *     endnode
+                                                * 10: Rcv all pckts fwded by
+                                                *     the local repeater */
+#define HP100_MALLOW_FRAMEFMT        0x00e0    /* 2 bits: Frame Format
+                                                * 00: 802.3 format will be used
+                                                * 10: 802.5 format will be used */
+#define HP100_MALLOW_ACCDENIED       0x0400    /* N bit */
+#define HP100_MALLOW_CONFIGURE       0x0f00    /* C bit */
+#define HP100_MALLOW_DUPADDR         0x1000    /* D bit */
+#define HP100_HUB_MACVER             0xe000    /* R: 3 bit 802.12 MAC/RMAC training */
+                                            /*    protocol of repeater */
+
+/* ****************************************************************************** */
+
+/*
+ *  Set/Reset bits
+ */
+#define HP100_SET_HB           0x0100  /* 0:Set fields to 0 whose mask is 1 */
+#define HP100_SET_LB           0x0001  /* HB sets upper byte, LB sets lower byte */
+#define HP100_RESET_HB         0x0000  /* For readability when resetting bits */
+#define HP100_RESET_LB         0x0000  /* For readability when resetting bits */
+
+/*
+ *  Misc. Constants
+ */
+#define HP100_LAN_100          100     /* lan_type value for VG */
+#define HP100_LAN_10           10      /* lan_type value for 10BaseT */
+#define HP100_LAN_COAX         9       /* lan_type value for Coax */
+#define HP100_LAN_ERR          (-1)    /* lan_type value for link down */
+
+/*
+ * Bus Master Data Structures  ----------------------------------------------
+ */
+
+#define MAX_RX_PDL              30     /* Card limit = 31 */
+#define MAX_RX_FRAG             2      /* Don't need more... */
+#define MAX_TX_PDL              29
+#define MAX_TX_FRAG             2      /* Limit = 31 */
+
+/* Define total PDL area size in bytes (should be 4096) */
+/* This is the size of kernel (dma) memory that will be allocated. */
+#define MAX_RINGSIZE ((MAX_RX_FRAG*8+4+4)*MAX_RX_PDL+(MAX_TX_FRAG*8+4+4)*MAX_TX_PDL)+16
+
+/* Ethernet Packet Sizes */
+#define MIN_ETHER_SIZE          60
+#define MAX_ETHER_SIZE          1514   /* Needed for preallocation of */
+                                       /* skb buffer when busmastering */
+
+/* Tx or Rx Ring Entry */
+typedef struct hp100_ring {
+       u_int *pdl;             /* Address of PDLs PDH, dword before
+                                * this address is used for rx hdr */
+       u_int pdl_paddr;        /* Physical address of PDL */
+       struct sk_buff *skb;
+       struct hp100_ring *next;
+} hp100_ring_t;
+
+
+
+/* Mask for Header Descriptor */
+#define HP100_PKT_LEN_MASK     0x1FFF  /* AND with RxLength to get length */
+
+
+/* Receive Packet Status.  Note, the error bits are only valid if ACC_ERRORED
+   bit in the MAC Configuration Register 1 is set. */
+#define HP100_RX_PRI           0x8000  /* 0:No, 1:Yes packet is priority */
+#define HP100_SDF_ERR          0x4000  /* 0:No, 1:Yes start of frame error */
+#define HP100_SKEW_ERR         0x2000  /* 0:No, 1:Yes skew out of range */
+#define HP100_BAD_SYMBOL_ERR   0x1000  /* 0:No, 1:Yes invalid symbol received */
+#define HP100_RCV_IPM_ERR      0x0800  /* 0:No, 1:Yes pkt had an invalid packet */
+                                       /*   marker */
+#define HP100_SYMBOL_BAL_ERR   0x0400  /* 0:No, 1:Yes symbol balance error */
+#define HP100_VG_ALN_ERR       0x0200  /* 0:No, 1:Yes non-octet received */
+#define HP100_TRUNC_ERR                0x0100  /* 0:No, 1:Yes the packet was truncated */
+#define HP100_RUNT_ERR         0x0040  /* 0:No, 1:Yes pkt length < Min Pkt */
+                                       /*   Length Reg. */
+#define HP100_ALN_ERR          0x0010  /* 0:No, 1:Yes align error. */
+#define HP100_CRC_ERR          0x0008  /* 0:No, 1:Yes CRC occurred. */
+
+/* The last three bits indicate the type of destination address */
+
+#define HP100_MULTI_ADDR_HASH  0x0006  /* 110: Addr multicast, matched hash */
+#define HP100_BROADCAST_ADDR   0x0003  /* x11: Addr broadcast */
+#define HP100_MULTI_ADDR_NO_HASH 0x0002        /* 010: Addr multicast, didn't match hash */
+#define HP100_PHYS_ADDR_MATCH  0x0001  /* x01: Addr was physical and mine */
+#define HP100_PHYS_ADDR_NO_MATCH 0x0000        /* x00: Addr was physical but not mine */
+
+/*
+ *  macros
+ */
+
+#define hp100_inb( reg ) \
+        inb( ioaddr + HP100_REG_##reg )
+#define hp100_inw( reg ) \
+       inw( ioaddr + HP100_REG_##reg )
+#define hp100_inl( reg ) \
+       inl( ioaddr + HP100_REG_##reg )
+#define hp100_outb( data, reg ) \
+       outb( data, ioaddr + HP100_REG_##reg )
+#define hp100_outw( data, reg ) \
+       outw( data, ioaddr + HP100_REG_##reg )
+#define hp100_outl( data, reg ) \
+       outl( data, ioaddr + HP100_REG_##reg )
+#define hp100_orb( data, reg ) \
+       outb( inb( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_orw( data, reg ) \
+       outw( inw( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_andb( data, reg ) \
+       outb( inb( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+#define hp100_andw( data, reg ) \
+       outw( inw( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+
+#define hp100_page( page ) \
+       outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING )
+#define hp100_ints_off() \
+       outw( HP100_INT_EN | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_ints_on() \
+       outw( HP100_INT_EN | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_enable() \
+       outw( HP100_MMAP_DIS | HP100_RESET_HB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_disable() \
+       outw( HP100_MMAP_DIS | HP100_SET_HB, ioaddr + HP100_REG_OPTION_LSW )
index eee1998..fac38c8 100644 (file)
@@ -469,10 +469,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
        /* Set the encryption - we only support wep */
        if (is_wep) {
                if (sme->key) {
-                       if (sme->key_idx >= NUM_WEPKEYS) {
-                               err = -EINVAL;
-                               goto exit;
-                       }
+                       if (sme->key_idx >= NUM_WEPKEYS)
+                               return -EINVAL;
 
                        result = prism2_domibset_uint32(wlandev,
                                DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
index 04bf2ac..2d19f0e 100644 (file)
@@ -1074,27 +1074,6 @@ passthrough_parse_cdb(struct se_cmd *cmd,
        struct se_device *dev = cmd->se_dev;
        unsigned int size;
 
-       /*
-        * Clear a lun set in the cdb if the initiator talking to use spoke
-        * and old standards version, as we can't assume the underlying device
-        * won't choke up on it.
-        */
-       switch (cdb[0]) {
-       case READ_10: /* SBC - RDProtect */
-       case READ_12: /* SBC - RDProtect */
-       case READ_16: /* SBC - RDProtect */
-       case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
-       case VERIFY: /* SBC - VRProtect */
-       case VERIFY_16: /* SBC - VRProtect */
-       case WRITE_VERIFY: /* SBC - VRProtect */
-       case WRITE_VERIFY_12: /* SBC - VRProtect */
-       case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
-               break;
-       default:
-               cdb[1] &= 0x1f; /* clear logical unit number */
-               break;
-       }
-
        /*
         * For REPORT LUNS we always need to emulate the response, for everything
         * else, pass it up.
index 391f397..6b9865c 100644 (file)
@@ -88,7 +88,7 @@ struct cpufreq_cooling_device {
        struct cpufreq_policy *policy;
        struct list_head node;
        struct time_in_idle *idle_time;
-       struct dev_pm_qos_request qos_req;
+       struct freq_qos_request qos_req;
 };
 
 static DEFINE_IDA(cpufreq_ida);
@@ -331,7 +331,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 
        cpufreq_cdev->cpufreq_state = state;
 
-       return dev_pm_qos_update_request(&cpufreq_cdev->qos_req,
+       return freq_qos_update_request(&cpufreq_cdev->qos_req,
                                cpufreq_cdev->freq_table[state].frequency);
 }
 
@@ -615,9 +615,9 @@ __cpufreq_cooling_register(struct device_node *np,
                cooling_ops = &cpufreq_cooling_ops;
        }
 
-       ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req,
-                                    DEV_PM_QOS_MAX_FREQUENCY,
-                                    cpufreq_cdev->freq_table[0].frequency);
+       ret = freq_qos_add_request(&policy->constraints,
+                                  &cpufreq_cdev->qos_req, FREQ_QOS_MAX,
+                                  cpufreq_cdev->freq_table[0].frequency);
        if (ret < 0) {
                pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
                       ret);
@@ -637,7 +637,7 @@ __cpufreq_cooling_register(struct device_node *np,
        return cdev;
 
 remove_qos_req:
-       dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+       freq_qos_remove_request(&cpufreq_cdev->qos_req);
 remove_ida:
        ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
 free_table:
@@ -736,7 +736,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
        mutex_unlock(&cooling_list_lock);
 
        thermal_cooling_device_unregister(cdev);
-       dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+       freq_qos_remove_request(&cpufreq_cdev->qos_req);
        ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
        kfree(cpufreq_cdev->idle_time);
        kfree(cpufreq_cdev->freq_table);
index 02c5aff..8df89e9 100644 (file)
@@ -72,8 +72,8 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
 {
        struct serial_8250_men_mcb_data *data;
        struct resource *mem;
-       unsigned int num_ports;
-       unsigned int i;
+       int num_ports;
+       int i;
        void __iomem *membase;
 
        mem = mcb_get_resource(mdev, IORESOURCE_MEM);
@@ -88,7 +88,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
        dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n",
                mdev->id, num_ports);
 
-       if (num_ports == 0 || num_ports > 4) {
+       if (num_ports <= 0 || num_ports > 4) {
                dev_err(&mdev->dev, "unexpected number of ports: %u\n",
                        num_ports);
                return -ENODEV;
@@ -133,7 +133,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
 
 static void serial_8250_men_mcb_remove(struct mcb_device *mdev)
 {
-       unsigned int num_ports, i;
+       int num_ports, i;
        struct serial_8250_men_mcb_data *data = mcb_get_drvdata(mdev);
 
        if (!data)
index 1109dc5..c2123ef 100644 (file)
@@ -166,7 +166,6 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
                goto err;
 
        switch (cdns->dr_mode) {
-       case USB_DR_MODE_UNKNOWN:
        case USB_DR_MODE_OTG:
                ret = cdns3_hw_role_switch(cdns);
                if (ret)
@@ -182,6 +181,9 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
                if (ret)
                        goto err;
                break;
+       default:
+               ret = -EINVAL;
+               goto err;
        }
 
        return ret;
index 2ca280f..9050b38 100644 (file)
@@ -1145,6 +1145,14 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
                request = cdns3_next_request(&priv_ep->pending_req_list);
                priv_req = to_cdns3_request(request);
 
+               trb = priv_ep->trb_pool + priv_ep->dequeue;
+
+               /* Request was dequeued and TRB was changed to TRB_LINK. */
+               if (TRB_FIELD_TO_TYPE(trb->control) == TRB_LINK) {
+                       trace_cdns3_complete_trb(priv_ep, trb);
+                       cdns3_move_deq_to_next_trb(priv_req);
+               }
+
                /* Re-select endpoint. It could be changed by other CPU during
                 * handling usb_gadget_giveback_request.
                 */
@@ -2067,6 +2075,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
        struct usb_request *req, *req_temp;
        struct cdns3_request *priv_req;
        struct cdns3_trb *link_trb;
+       u8 req_on_hw_ring = 0;
        unsigned long flags;
        int ret = 0;
 
@@ -2083,8 +2092,10 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
 
        list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
                                 list) {
-               if (request == req)
+               if (request == req) {
+                       req_on_hw_ring = 1;
                        goto found;
+               }
        }
 
        list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
@@ -2096,27 +2107,21 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
        goto not_found;
 
 found:
-
-       if (priv_ep->wa1_trb == priv_req->trb)
-               cdns3_wa1_restore_cycle_bit(priv_ep);
-
        link_trb = priv_req->trb;
-       cdns3_move_deq_to_next_trb(priv_req);
-       cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
-
-       /* Update ring */
-       request = cdns3_next_request(&priv_ep->deferred_req_list);
-       if (request) {
-               priv_req = to_cdns3_request(request);
 
+       /* Update ring only if removed request is on pending_req_list list */
+       if (req_on_hw_ring) {
                link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
                                              (priv_req->start_trb * TRB_SIZE));
                link_trb->control = (link_trb->control & TRB_CYCLE) |
-                                   TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE;
-       } else {
-               priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
+                                   TRB_TYPE(TRB_LINK) | TRB_CHAIN;
+
+               if (priv_ep->wa1_trb == priv_req->trb)
+                       cdns3_wa1_restore_cycle_bit(priv_ep);
        }
 
+       cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
+
 not_found:
        spin_unlock_irqrestore(&priv_dev->lock, flags);
        return ret;
index fb8bd60..0d8e3f3 100644 (file)
@@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp)
        kfree(usblp->readbuf);
        kfree(usblp->device_id_string);
        kfree(usblp->statusbuf);
+       usb_put_intf(usblp->intf);
        kfree(usblp);
 }
 
@@ -1113,7 +1114,7 @@ static int usblp_probe(struct usb_interface *intf,
        init_waitqueue_head(&usblp->wwait);
        init_usb_anchor(&usblp->urbs);
        usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
-       usblp->intf = intf;
+       usblp->intf = usb_get_intf(intf);
 
        /* Malloc device ID string buffer to the largest expected length,
         * since we can re-query it on an ioctl and a dynamic string
@@ -1198,6 +1199,7 @@ abort:
        kfree(usblp->readbuf);
        kfree(usblp->statusbuf);
        kfree(usblp->device_id_string);
+       usb_put_intf(usblp->intf);
        kfree(usblp);
 abort_ret:
        return retval;
index 2b1f3cc..bf6c81e 100644 (file)
@@ -1177,11 +1177,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
                        tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
 
                        bl = bytes - n;
-                       if (bl > 3)
-                               bl = 3;
+                       if (bl > 4)
+                               bl = 4;
 
                        for (i = 0; i < bl; i++)
-                               data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
+                               data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
                }
                break;
 
index f3108d8..15b5f06 100644 (file)
@@ -380,10 +380,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
                goto exit;
        }
 
-       if (mutex_lock_interruptible(&dev->mutex)) {
-               retval = -ERESTARTSYS;
-               goto exit;
-       }
+       mutex_lock(&dev->mutex);
 
        if (dev->open_count != 1) {
                retval = -ENODEV;
@@ -467,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
 
        /* wait for data */
        spin_lock_irq(&dev->rbsl);
-       if (dev->ring_head == dev->ring_tail) {
+       while (dev->ring_head == dev->ring_tail) {
                dev->interrupt_in_done = 0;
                spin_unlock_irq(&dev->rbsl);
                if (file->f_flags & O_NONBLOCK) {
@@ -477,12 +474,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
                retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
                if (retval < 0)
                        goto unlock_exit;
-       } else {
-               spin_unlock_irq(&dev->rbsl);
+
+               spin_lock_irq(&dev->rbsl);
        }
+       spin_unlock_irq(&dev->rbsl);
 
        /* actual_buffer contains actual_length + interrupt_in_buffer */
        actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
+       if (*actual_buffer > dev->interrupt_in_endpoint_size) {
+               retval = -EIO;
+               goto unlock_exit;
+       }
        bytes_to_read = min(count, *actual_buffer);
        if (bytes_to_read < *actual_buffer)
                dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
@@ -693,10 +695,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
                dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
 
        dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
-       dev->ring_buffer =
-               kmalloc_array(ring_buffer_size,
-                             sizeof(size_t) + dev->interrupt_in_endpoint_size,
-                             GFP_KERNEL);
+       dev->ring_buffer = kcalloc(ring_buffer_size,
+                       sizeof(size_t) + dev->interrupt_in_endpoint_size,
+                       GFP_KERNEL);
        if (!dev->ring_buffer)
                goto error;
        dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
index 9d4c52a..23061f1 100644 (file)
@@ -419,10 +419,7 @@ static int tower_release (struct inode *inode, struct file *file)
                goto exit;
        }
 
-       if (mutex_lock_interruptible(&dev->lock)) {
-               retval = -ERESTARTSYS;
-               goto exit;
-       }
+       mutex_lock(&dev->lock);
 
        if (dev->open_count != 1) {
                dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
@@ -881,7 +878,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
                                  get_version_reply,
                                  sizeof(*get_version_reply),
                                  1000);
-       if (result < sizeof(*get_version_reply)) {
+       if (result != sizeof(*get_version_reply)) {
                if (result >= 0)
                        result = -EIO;
                dev_err(idev, "get version request failed: %d\n", result);
index dd0ad67..ef23acc 100644 (file)
@@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port)
        struct ti_port *tport;
        int port_number;
        int status;
-       int do_unlock;
        unsigned long flags;
 
        tdev = usb_get_serial_data(port->serial);
@@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port)
                        "%s - cannot send close port command, %d\n"
                                                        , __func__, status);
 
-       /* if mutex_lock is interrupted, continue anyway */
-       do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
-       --tport->tp_tdev->td_open_port_count;
-       if (tport->tp_tdev->td_open_port_count <= 0) {
+       mutex_lock(&tdev->td_open_close_lock);
+       --tdev->td_open_port_count;
+       if (tdev->td_open_port_count == 0) {
                /* last port is closed, shut down interrupt urb */
                usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
-               tport->tp_tdev->td_open_port_count = 0;
        }
-       if (do_unlock)
-               mutex_unlock(&tdev->td_open_close_lock);
+       mutex_unlock(&tdev->td_open_close_lock);
 }
 
 
index 96fddc1..d864277 100644 (file)
@@ -1658,7 +1658,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
        struct bus_type *bus = NULL;
        int ret;
        bool resv_msi, msi_remap;
-       phys_addr_t resv_msi_base;
+       phys_addr_t resv_msi_base = 0;
        struct iommu_domain_geometry geo;
        LIST_HEAD(iova_copy);
        LIST_HEAD(group_resv_regions);
index 08ad0d1..a0a2d74 100644 (file)
@@ -852,6 +852,12 @@ static inline int xfer_kern(void *src, void *dst, size_t len)
        return 0;
 }
 
+static inline int kern_xfer(void *dst, void *src, size_t len)
+{
+       memcpy(dst, src, len);
+       return 0;
+}
+
 /**
  * vringh_init_kern - initialize a vringh for a kernelspace vring.
  * @vrh: the vringh to initialize.
@@ -958,7 +964,7 @@ EXPORT_SYMBOL(vringh_iov_pull_kern);
 ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
                             const void *src, size_t len)
 {
-       return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern);
+       return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer);
 }
 EXPORT_SYMBOL(vringh_iov_push_kern);
 
index bdc0824..a8041e4 100644 (file)
@@ -1499,9 +1499,6 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
                 * counter first before updating event flags.
                 */
                virtio_wmb(vq->weak_barriers);
-       } else {
-               used_idx = vq->last_used_idx;
-               wrap_counter = vq->packed.used_wrap_counter;
        }
 
        if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
@@ -1518,7 +1515,9 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
         */
        virtio_mb(vq->weak_barriers);
 
-       if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
+       if (is_used_desc_packed(vq,
+                               vq->last_used_idx,
+                               vq->packed.used_wrap_counter)) {
                END_USE(vq);
                return false;
        }
index bf7e3f2..670700c 100644 (file)
@@ -1761,6 +1761,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
                        btrfs_err(info,
 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
                                  cache->key.objectid);
+                       btrfs_put_block_group(cache);
                        ret = -EINVAL;
                        goto error;
                }
index 19d669d..fe2b876 100644 (file)
@@ -734,8 +734,6 @@ struct btrfs_fs_info {
        struct btrfs_workqueue *fixup_workers;
        struct btrfs_workqueue *delayed_workers;
 
-       /* the extent workers do delayed refs on the extent allocation tree */
-       struct btrfs_workqueue *extent_workers;
        struct task_struct *transaction_kthread;
        struct task_struct *cleaner_kthread;
        u32 thread_pool_size;
@@ -2489,8 +2487,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
                                     int nitems, bool use_global_rsv);
 void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
                                      struct btrfs_block_rsv *rsv);
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
-                                   bool qgroup_free);
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
 
 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
index d949d7d..db9f2c5 100644 (file)
@@ -381,7 +381,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
 out_qgroup:
        btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
 out_fail:
-       btrfs_inode_rsv_release(inode, true);
        if (delalloc_lock)
                mutex_unlock(&inode->delalloc_mutex);
        return ret;
@@ -418,7 +417,6 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
  * btrfs_delalloc_release_extents - release our outstanding_extents
  * @inode: the inode to balance the reservation for.
  * @num_bytes: the number of bytes we originally reserved with
- * @qgroup_free: do we need to free qgroup meta reservation or convert them.
  *
  * When we reserve space we increase outstanding_extents for the extents we may
  * add.  Once we've set the range as delalloc or created our ordered extents we
@@ -426,8 +424,7 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
  * temporarily tracked outstanding_extents.  This _must_ be used in conjunction
  * with btrfs_delalloc_reserve_metadata.
  */
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
-                                   bool qgroup_free)
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
 {
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
        unsigned num_extents;
@@ -441,7 +438,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
        if (btrfs_is_testing(fs_info))
                return;
 
-       btrfs_inode_rsv_release(inode, qgroup_free);
+       btrfs_inode_rsv_release(inode, true);
 }
 
 /**
index 044981c..402b61b 100644 (file)
@@ -2008,7 +2008,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
        btrfs_destroy_workqueue(fs_info->readahead_workers);
        btrfs_destroy_workqueue(fs_info->flush_workers);
        btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
-       btrfs_destroy_workqueue(fs_info->extent_workers);
        /*
         * Now that all other work queues are destroyed, we can safely destroy
         * the queues used for metadata I/O, since tasks from those other work
@@ -2214,10 +2213,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
                                      max_active, 2);
        fs_info->qgroup_rescan_workers =
                btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
-       fs_info->extent_workers =
-               btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
-                                     min_t(u64, fs_devices->num_devices,
-                                           max_active), 8);
 
        if (!(fs_info->workers && fs_info->delalloc_workers &&
              fs_info->submit_workers && fs_info->flush_workers &&
@@ -2228,7 +2223,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
              fs_info->endio_freespace_worker && fs_info->rmw_workers &&
              fs_info->caching_workers && fs_info->readahead_workers &&
              fs_info->fixup_workers && fs_info->delayed_workers &&
-             fs_info->extent_workers &&
              fs_info->qgroup_rescan_workers)) {
                return -ENOMEM;
        }
index 27e5b26..435a502 100644 (file)
@@ -1692,7 +1692,7 @@ again:
                                    force_page_uptodate);
                if (ret) {
                        btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                      reserve_bytes, true);
+                                                      reserve_bytes);
                        break;
                }
 
@@ -1704,7 +1704,7 @@ again:
                        if (extents_locked == -EAGAIN)
                                goto again;
                        btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                      reserve_bytes, true);
+                                                      reserve_bytes);
                        ret = extents_locked;
                        break;
                }
@@ -1772,8 +1772,7 @@ again:
                else
                        free_extent_state(cached_state);
 
-               btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
-                                              true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
                if (ret) {
                        btrfs_drop_pages(pages, num_pages);
                        break;
@@ -2068,25 +2067,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        struct btrfs_trans_handle *trans;
        struct btrfs_log_ctx ctx;
        int ret = 0, err;
-       u64 len;
 
-       /*
-        * If the inode needs a full sync, make sure we use a full range to
-        * avoid log tree corruption, due to hole detection racing with ordered
-        * extent completion for adjacent ranges, and assertion failures during
-        * hole detection.
-        */
-       if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
-                    &BTRFS_I(inode)->runtime_flags)) {
-               start = 0;
-               end = LLONG_MAX;
-       }
-
-       /*
-        * The range length can be represented by u64, we have to do the typecasts
-        * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
-        */
-       len = (u64)end - (u64)start + 1;
        trace_btrfs_sync_file(file, datasync);
 
        btrfs_init_log_ctx(&ctx, inode);
@@ -2112,6 +2093,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        atomic_inc(&root->log_batch);
 
+       /*
+        * If the inode needs a full sync, make sure we use a full range to
+        * avoid log tree corruption, due to hole detection racing with ordered
+        * extent completion for adjacent ranges, and assertion failures during
+        * hole detection. Do this while holding the inode lock, to avoid races
+        * with other tasks.
+        */
+       if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                    &BTRFS_I(inode)->runtime_flags)) {
+               start = 0;
+               end = LLONG_MAX;
+       }
+
        /*
         * Before we acquired the inode's lock, someone may have dirtied more
         * pages in the target range. We need to make sure that writeback for
@@ -2139,8 +2133,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        /*
         * We have to do this here to avoid the priority inversion of waiting on
         * IO of a lower priority task while holding a transaction open.
+        *
+        * Also, the range length can be represented by u64, we have to do the
+        * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
         */
-       ret = btrfs_wait_ordered_range(inode, start, len);
+       ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
        if (ret) {
                up_write(&BTRFS_I(inode)->dio_sem);
                inode_unlock(inode);
index 63cad78..37345fb 100644 (file)
@@ -501,13 +501,13 @@ again:
        ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
                                              prealloc, prealloc, &alloc_hint);
        if (ret) {
-               btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
                btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true);
                goto out_put;
        }
 
        ret = btrfs_write_out_ino_cache(root, trans, path, inode);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, false);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
 out_put:
        iput(inode);
 out_release:
index 0f2754e..c3f386b 100644 (file)
@@ -2206,7 +2206,7 @@ again:
 
        ClearPageChecked(page);
        set_page_dirty(page);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
 out:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
                             &cached_state);
@@ -4951,7 +4951,7 @@ again:
        if (!page) {
                btrfs_delalloc_release_space(inode, data_reserved,
                                             block_start, blocksize, true);
-               btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
                ret = -ENOMEM;
                goto out;
        }
@@ -5018,7 +5018,7 @@ out_unlock:
        if (ret)
                btrfs_delalloc_release_space(inode, data_reserved, block_start,
                                             blocksize, true);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0));
+       btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
        unlock_page(page);
        put_page(page);
 out:
@@ -8709,7 +8709,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                } else if (ret >= 0 && (size_t)ret < count)
                        btrfs_delalloc_release_space(inode, data_reserved,
                                        offset, count - (size_t)ret, true);
-               btrfs_delalloc_release_extents(BTRFS_I(inode), count, false);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), count);
        }
 out:
        if (wakeup)
@@ -9059,7 +9059,7 @@ again:
        unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
 
        if (!ret2) {
-               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
                sb_end_pagefault(inode->i_sb);
                extent_changeset_free(data_reserved);
                return VM_FAULT_LOCKED;
@@ -9068,7 +9068,7 @@ again:
 out_unlock:
        unlock_page(page);
 out:
-       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
+       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
        btrfs_delalloc_release_space(inode, data_reserved, page_start,
                                     reserved_space, (ret != 0));
 out_noreserve:
index de730e5..7c145a4 100644 (file)
@@ -1360,8 +1360,7 @@ again:
                unlock_page(pages[i]);
                put_page(pages[i]);
        }
-       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
-                                      false);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
        extent_changeset_free(data_reserved);
        return i_done;
 out:
@@ -1372,8 +1371,7 @@ out:
        btrfs_delalloc_release_space(inode, data_reserved,
                        start_index << PAGE_SHIFT,
                        page_cnt << PAGE_SHIFT, true);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
-                                      true);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
        extent_changeset_free(data_reserved);
        return ret;
 
index c4bb699..3ad1516 100644 (file)
@@ -3629,7 +3629,7 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
                return 0;
 
        BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
-       trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
+       trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
        ret = qgroup_reserve(root, num_bytes, enforce, type);
        if (ret < 0)
                return ret;
@@ -3676,7 +3676,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
         */
        num_bytes = sub_root_meta_rsv(root, num_bytes, type);
        BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
-       trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
+       trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
        btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
                                  num_bytes, type);
 }
index 0050465..5cd42b6 100644 (file)
@@ -3277,6 +3277,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        if (!page) {
                                btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                        PAGE_SIZE, true);
+                               btrfs_delalloc_release_extents(BTRFS_I(inode),
+                                                       PAGE_SIZE);
                                ret = -ENOMEM;
                                goto out;
                        }
@@ -3297,7 +3299,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                                btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                        PAGE_SIZE, true);
                                btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                              PAGE_SIZE, true);
+                                                              PAGE_SIZE);
                                ret = -EIO;
                                goto out;
                        }
@@ -3326,7 +3328,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                         PAGE_SIZE, true);
                        btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                      PAGE_SIZE, true);
+                                                      PAGE_SIZE);
 
                        clear_extent_bits(&BTRFS_I(inode)->io_tree,
                                          page_start, page_end,
@@ -3342,8 +3344,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                put_page(page);
 
                index++;
-               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE,
-                                              false);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
                balance_dirty_pages_ratelimited(inode->i_mapping);
                btrfs_throttle(fs_info);
        }
index c049c7b..1a135d1 100644 (file)
@@ -169,7 +169,13 @@ cifs_read_super(struct super_block *sb)
        else
                sb->s_maxbytes = MAX_NON_LFS;
 
-       /* Some very old servers like DOS and OS/2 used 2 second granularity */
+       /*
+        * Some very old servers like DOS and OS/2 used 2 second granularity
+        * (while all current servers use 100ns granularity - see MS-DTYP)
+        * but 1 second is the maximum allowed granularity for the VFS
+        * so for old servers set time granularity to 1 second while for
+        * everything else (current servers) set it to 100ns.
+        */
        if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
            ((tcon->ses->capabilities &
              tcon->ses->server->vals->cap_nt_find) == 0) &&
index 50dfd90..d78bfcc 100644 (file)
@@ -1391,6 +1391,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
 struct cifsInodeInfo {
        bool can_cache_brlcks;
        struct list_head llist; /* locks helb by this inode */
+       /*
+        * NOTE: Some code paths call down_read(lock_sem) twice, so
+        * we must always use use cifs_down_write() instead of down_write()
+        * for this semaphore to avoid deadlocks.
+        */
        struct rw_semaphore lock_sem;   /* protect the fields above */
        /* BB add in lists for dirty pages i.e. write caching info for oplock */
        struct list_head openFileList;
index e53e9f6..fe597d3 100644 (file)
@@ -170,6 +170,7 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile,
                             struct file_lock *flock, const unsigned int xid);
 extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
 
+extern void cifs_down_write(struct rw_semaphore *sem);
 extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid,
                                              struct file *file,
                                              struct tcon_link *tlink,
index a64dfa9..ccaa8ba 100644 (file)
@@ -564,9 +564,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
        spin_lock(&GlobalMid_Lock);
        list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
                mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+               kref_get(&mid_entry->refcount);
                if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
                        mid_entry->mid_state = MID_RETRY_NEEDED;
                list_move(&mid_entry->qhead, &retry_list);
+               mid_entry->mid_flags |= MID_DELETED;
        }
        spin_unlock(&GlobalMid_Lock);
        mutex_unlock(&server->srv_mutex);
@@ -576,6 +578,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
                list_del_init(&mid_entry->qhead);
                mid_entry->callback(mid_entry);
+               cifs_mid_q_entry_release(mid_entry);
        }
 
        if (cifs_rdma_enabled(server)) {
@@ -895,8 +898,10 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
        if (mid->mid_flags & MID_DELETED)
                printk_once(KERN_WARNING
                            "trying to dequeue a deleted mid\n");
-       else
+       else {
                list_del_init(&mid->qhead);
+               mid->mid_flags |= MID_DELETED;
+       }
        spin_unlock(&GlobalMid_Lock);
 }
 
@@ -966,8 +971,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
                list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
                        mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
                        cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid);
+                       kref_get(&mid_entry->refcount);
                        mid_entry->mid_state = MID_SHUTDOWN;
                        list_move(&mid_entry->qhead, &dispose_list);
+                       mid_entry->mid_flags |= MID_DELETED;
                }
                spin_unlock(&GlobalMid_Lock);
 
@@ -977,6 +984,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
                        cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid);
                        list_del_init(&mid_entry->qhead);
                        mid_entry->callback(mid_entry);
+                       cifs_mid_q_entry_release(mid_entry);
                }
                /* 1/8th of sec is more than enough time for them to exit */
                msleep(125);
@@ -3882,8 +3890,12 @@ generic_ip_connect(struct TCP_Server_Info *server)
 
        rc = socket->ops->connect(socket, saddr, slen,
                                  server->noblockcnt ? O_NONBLOCK : 0);
-
-       if (rc == -EINPROGRESS)
+       /*
+        * When mounting SMB root file systems, we do not want to block in
+        * connect. Otherwise bail out and then let cifs_reconnect() perform
+        * reconnect failover - if possible.
+        */
+       if (server->noblockcnt && rc == -EINPROGRESS)
                rc = 0;
        if (rc < 0) {
                cifs_dbg(FYI, "Error %d connecting to server\n", rc);
index 5ad15de..fa7b0fa 100644 (file)
@@ -281,6 +281,13 @@ cifs_has_mand_locks(struct cifsInodeInfo *cinode)
        return has_locks;
 }
 
+void
+cifs_down_write(struct rw_semaphore *sem)
+{
+       while (!down_write_trylock(sem))
+               msleep(10);
+}
+
 struct cifsFileInfo *
 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
                  struct tcon_link *tlink, __u32 oplock)
@@ -306,7 +313,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
        INIT_LIST_HEAD(&fdlocks->locks);
        fdlocks->cfile = cfile;
        cfile->llist = fdlocks;
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        list_add(&fdlocks->llist, &cinode->llist);
        up_write(&cinode->lock_sem);
 
@@ -405,10 +412,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
        bool oplock_break_cancelled;
 
        spin_lock(&tcon->open_file_lock);
-
+       spin_lock(&cifsi->open_file_lock);
        spin_lock(&cifs_file->file_info_lock);
        if (--cifs_file->count > 0) {
                spin_unlock(&cifs_file->file_info_lock);
+               spin_unlock(&cifsi->open_file_lock);
                spin_unlock(&tcon->open_file_lock);
                return;
        }
@@ -421,9 +429,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
        cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
 
        /* remove it from the lists */
-       spin_lock(&cifsi->open_file_lock);
        list_del(&cifs_file->flist);
-       spin_unlock(&cifsi->open_file_lock);
        list_del(&cifs_file->tlist);
        atomic_dec(&tcon->num_local_opens);
 
@@ -440,6 +446,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
                cifs_set_oplock_level(cifsi, 0);
        }
 
+       spin_unlock(&cifsi->open_file_lock);
        spin_unlock(&tcon->open_file_lock);
 
        oplock_break_cancelled = wait_oplock_handler ?
@@ -464,7 +471,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
         * Delete any outstanding lock records. We'll lose them when the file
         * is closed anyway.
         */
-       down_write(&cifsi->lock_sem);
+       cifs_down_write(&cifsi->lock_sem);
        list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
                list_del(&li->llist);
                cifs_del_lock_waiters(li);
@@ -1027,7 +1034,7 @@ static void
 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
 {
        struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        list_add_tail(&lock->llist, &cfile->llist->locks);
        up_write(&cinode->lock_sem);
 }
@@ -1049,7 +1056,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
 
 try_again:
        exist = false;
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
 
        exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
                                        lock->type, lock->flags, &conf_lock,
@@ -1072,7 +1079,7 @@ try_again:
                                        (lock->blist.next == &lock->blist));
                if (!rc)
                        goto try_again;
-               down_write(&cinode->lock_sem);
+               cifs_down_write(&cinode->lock_sem);
                list_del_init(&lock->blist);
        }
 
@@ -1125,7 +1132,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock)
                return rc;
 
 try_again:
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        if (!cinode->can_cache_brlcks) {
                up_write(&cinode->lock_sem);
                return rc;
@@ -1331,7 +1338,7 @@ cifs_push_locks(struct cifsFileInfo *cfile)
        int rc = 0;
 
        /* we are going to update can_cache_brlcks here - need a write access */
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        if (!cinode->can_cache_brlcks) {
                up_write(&cinode->lock_sem);
                return rc;
@@ -1522,7 +1529,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
        if (!buf)
                return -ENOMEM;
 
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        for (i = 0; i < 2; i++) {
                cur = buf;
                num = 0;
index 5dcc95b..df93778 100644 (file)
@@ -2475,9 +2475,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
                        rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
                        cifsFileInfo_put(wfile);
                        if (rc)
-                               return rc;
+                               goto cifs_setattr_exit;
                } else if (rc != -EBADF)
-                       return rc;
+                       goto cifs_setattr_exit;
                else
                        rc = 0;
        }
index b7421a0..5148106 100644 (file)
@@ -171,6 +171,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
        /* we do not want to loop forever */
        last_mid = cur_mid;
        cur_mid++;
+       /* avoid 0xFFFF MID */
+       if (cur_mid == 0xffff)
+               cur_mid++;
 
        /*
         * This nested loop looks more expensive than it is.
index e6a1fc7..8b0b512 100644 (file)
@@ -145,7 +145,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
 
        cur = buf;
 
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
                if (flock->fl_start > li->offset ||
                    (flock->fl_start + length) <
index 308ad0f..ca3de62 100644 (file)
@@ -86,22 +86,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 
 static void _cifs_mid_q_entry_release(struct kref *refcount)
 {
-       struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
-                                              refcount);
-
-       mempool_free(mid, cifs_mid_poolp);
-}
-
-void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
-{
-       spin_lock(&GlobalMid_Lock);
-       kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
-       spin_unlock(&GlobalMid_Lock);
-}
-
-void
-DeleteMidQEntry(struct mid_q_entry *midEntry)
-{
+       struct mid_q_entry *midEntry =
+                       container_of(refcount, struct mid_q_entry, refcount);
 #ifdef CONFIG_CIFS_STATS2
        __le16 command = midEntry->server->vals->lock_cmd;
        __u16 smb_cmd = le16_to_cpu(midEntry->command);
@@ -166,6 +152,19 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
                }
        }
 #endif
+
+       mempool_free(midEntry, cifs_mid_poolp);
+}
+
+void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
+{
+       spin_lock(&GlobalMid_Lock);
+       kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
+       spin_unlock(&GlobalMid_Lock);
+}
+
+void DeleteMidQEntry(struct mid_q_entry *midEntry)
+{
        cifs_mid_q_entry_release(midEntry);
 }
 
@@ -173,8 +172,10 @@ void
 cifs_delete_mid(struct mid_q_entry *mid)
 {
        spin_lock(&GlobalMid_Lock);
-       list_del_init(&mid->qhead);
-       mid->mid_flags |= MID_DELETED;
+       if (!(mid->mid_flags & MID_DELETED)) {
+               list_del_init(&mid->qhead);
+               mid->mid_flags |= MID_DELETED;
+       }
        spin_unlock(&GlobalMid_Lock);
 
        DeleteMidQEntry(mid);
@@ -872,7 +873,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
                rc = -EHOSTDOWN;
                break;
        default:
-               list_del_init(&mid->qhead);
+               if (!(mid->mid_flags & MID_DELETED)) {
+                       list_del_init(&mid->qhead);
+                       mid->mid_flags |= MID_DELETED;
+               }
                cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
                         __func__, mid->mid, mid->mid_state);
                rc = -EIO;
index 6bf81f9..2cc43cd 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -220,10 +220,11 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
 
        for (;;) {
                entry = xas_find_conflict(xas);
+               if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
+                       return entry;
                if (dax_entry_order(entry) < order)
                        return XA_RETRY_ENTRY;
-               if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
-                               !dax_is_locked(entry))
+               if (!dax_is_locked(entry))
                        return entry;
 
                wq = dax_entry_waitqueue(xas, entry, &ewait.key);
index 6419a2b..3e8cebf 100644 (file)
@@ -5,6 +5,7 @@
 
 obj-$(CONFIG_FUSE_FS) += fuse.o
 obj-$(CONFIG_CUSE) += cuse.o
-obj-$(CONFIG_VIRTIO_FS) += virtio_fs.o
+obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
 
 fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o
+virtiofs-y += virtio_fs.o
index dadd617..ed1abc9 100644 (file)
@@ -276,10 +276,12 @@ static void flush_bg_queue(struct fuse_conn *fc)
 void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
 {
        struct fuse_iqueue *fiq = &fc->iq;
-       bool async = req->args->end;
+       bool async;
 
        if (test_and_set_bit(FR_FINISHED, &req->flags))
                goto put_request;
+
+       async = req->args->end;
        /*
         * test_and_set_bit() implies smp_mb() between bit
         * changing and below intr_entry check. Pairs with
index d572c90..54d638f 100644 (file)
@@ -405,7 +405,8 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
        else
                fuse_invalidate_entry_cache(entry);
 
-       fuse_advise_use_readdirplus(dir);
+       if (inode)
+               fuse_advise_use_readdirplus(dir);
        return newent;
 
  out_iput:
@@ -1521,6 +1522,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
                is_truncate = true;
        }
 
+       /* Flush dirty data/metadata before non-truncate SETATTR */
+       if (is_wb && S_ISREG(inode->i_mode) &&
+           attr->ia_valid &
+                       (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
+                        ATTR_TIMES_SET)) {
+               err = write_inode_now(inode, true);
+               if (err)
+                       return err;
+
+               fuse_set_nowrite(inode);
+               fuse_release_nowrite(inode);
+       }
+
        if (is_truncate) {
                fuse_set_nowrite(inode);
                set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
index 0f02256..db48a5c 100644 (file)
@@ -217,7 +217,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        int err;
-       bool lock_inode = (file->f_flags & O_TRUNC) &&
+       bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
                          fc->atomic_o_trunc &&
                          fc->writeback_cache;
 
@@ -225,16 +225,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
        if (err)
                return err;
 
-       if (lock_inode)
+       if (is_wb_truncate) {
                inode_lock(inode);
+               fuse_set_nowrite(inode);
+       }
 
        err = fuse_do_open(fc, get_node_id(inode), file, isdir);
 
        if (!err)
                fuse_finish_open(inode, file);
 
-       if (lock_inode)
+       if (is_wb_truncate) {
+               fuse_release_nowrite(inode);
                inode_unlock(inode);
+       }
 
        return err;
 }
@@ -1997,7 +2001,7 @@ static int fuse_writepages_fill(struct page *page,
 
        if (!data->ff) {
                err = -EIO;
-               data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
+               data->ff = fuse_write_file_get(fc, fi);
                if (!data->ff)
                        goto out_unlock;
        }
@@ -2042,8 +2046,6 @@ static int fuse_writepages_fill(struct page *page,
         * under writeback, so we can release the page lock.
         */
        if (data->wpa == NULL) {
-               struct fuse_inode *fi = get_fuse_inode(inode);
-
                err = -ENOMEM;
                wpa = fuse_writepage_args_alloc();
                if (!wpa) {
index 956aeaf..d148188 100644 (file)
@@ -479,6 +479,7 @@ struct fuse_fs_context {
        bool destroy:1;
        bool no_control:1;
        bool no_force_umount:1;
+       bool no_mount_options:1;
        unsigned int max_read;
        unsigned int blksize;
        const char *subtype;
@@ -713,6 +714,9 @@ struct fuse_conn {
        /** Do not allow MNT_FORCE umount */
        unsigned int no_force_umount:1;
 
+       /* Do not show mount options */
+       unsigned int no_mount_options:1;
+
        /** The number of requests waiting for completion */
        atomic_t num_waiting;
 
index e040e2a..16aec32 100644 (file)
@@ -558,6 +558,9 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
        struct super_block *sb = root->d_sb;
        struct fuse_conn *fc = get_fuse_conn_super(sb);
 
+       if (fc->no_mount_options)
+               return 0;
+
        seq_printf(m, ",user_id=%u", from_kuid_munged(fc->user_ns, fc->user_id));
        seq_printf(m, ",group_id=%u", from_kgid_munged(fc->user_ns, fc->group_id));
        if (fc->default_permissions)
@@ -1180,6 +1183,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
        fc->destroy = ctx->destroy;
        fc->no_control = ctx->no_control;
        fc->no_force_umount = ctx->no_force_umount;
+       fc->no_mount_options = ctx->no_mount_options;
 
        err = -ENOMEM;
        root = fuse_get_root_inode(sb, ctx->rootmode);
index 6af3f13..a5c8604 100644 (file)
@@ -30,6 +30,7 @@ struct virtio_fs_vq {
        struct virtqueue *vq;     /* protected by ->lock */
        struct work_struct done_work;
        struct list_head queued_reqs;
+       struct list_head end_reqs;      /* End these requests */
        struct delayed_work dispatch_work;
        struct fuse_dev *fud;
        bool connected;
@@ -54,6 +55,9 @@ struct virtio_fs_forget {
        struct list_head list;
 };
 
+static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
+                                struct fuse_req *req, bool in_flight);
+
 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
 {
        struct virtio_fs *fs = vq->vdev->priv;
@@ -66,6 +70,19 @@ static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq)
        return &vq_to_fsvq(vq)->fud->pq;
 }
 
+/* Should be called with fsvq->lock held. */
+static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
+{
+       fsvq->in_flight++;
+}
+
+/* Should be called with fsvq->lock held. */
+static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
+{
+       WARN_ON(fsvq->in_flight <= 0);
+       fsvq->in_flight--;
+}
+
 static void release_virtio_fs_obj(struct kref *ref)
 {
        struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
@@ -109,22 +126,6 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
        flush_delayed_work(&fsvq->dispatch_work);
 }
 
-static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq)
-{
-       struct virtio_fs_forget *forget;
-
-       spin_lock(&fsvq->lock);
-       while (1) {
-               forget = list_first_entry_or_null(&fsvq->queued_reqs,
-                                               struct virtio_fs_forget, list);
-               if (!forget)
-                       break;
-               list_del(&forget->list);
-               kfree(forget);
-       }
-       spin_unlock(&fsvq->lock);
-}
-
 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
 {
        struct virtio_fs_vq *fsvq;
@@ -132,9 +133,6 @@ static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
 
        for (i = 0; i < fs->nvqs; i++) {
                fsvq = &fs->vqs[i];
-               if (i == VQ_HIPRIO)
-                       drain_hiprio_queued_reqs(fsvq);
-
                virtio_fs_drain_queue(fsvq);
        }
 }
@@ -253,14 +251,66 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work)
 
                while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
                        kfree(req);
-                       fsvq->in_flight--;
+                       dec_in_flight_req(fsvq);
                }
        } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
        spin_unlock(&fsvq->lock);
 }
 
-static void virtio_fs_dummy_dispatch_work(struct work_struct *work)
+static void virtio_fs_request_dispatch_work(struct work_struct *work)
 {
+       struct fuse_req *req;
+       struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
+                                                dispatch_work.work);
+       struct fuse_conn *fc = fsvq->fud->fc;
+       int ret;
+
+       pr_debug("virtio-fs: worker %s called.\n", __func__);
+       while (1) {
+               spin_lock(&fsvq->lock);
+               req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
+                                              list);
+               if (!req) {
+                       spin_unlock(&fsvq->lock);
+                       break;
+               }
+
+               list_del_init(&req->list);
+               spin_unlock(&fsvq->lock);
+               fuse_request_end(fc, req);
+       }
+
+       /* Dispatch pending requests */
+       while (1) {
+               spin_lock(&fsvq->lock);
+               req = list_first_entry_or_null(&fsvq->queued_reqs,
+                                              struct fuse_req, list);
+               if (!req) {
+                       spin_unlock(&fsvq->lock);
+                       return;
+               }
+               list_del_init(&req->list);
+               spin_unlock(&fsvq->lock);
+
+               ret = virtio_fs_enqueue_req(fsvq, req, true);
+               if (ret < 0) {
+                       if (ret == -ENOMEM || ret == -ENOSPC) {
+                               spin_lock(&fsvq->lock);
+                               list_add_tail(&req->list, &fsvq->queued_reqs);
+                               schedule_delayed_work(&fsvq->dispatch_work,
+                                                     msecs_to_jiffies(1));
+                               spin_unlock(&fsvq->lock);
+                               return;
+                       }
+                       req->out.h.error = ret;
+                       spin_lock(&fsvq->lock);
+                       dec_in_flight_req(fsvq);
+                       spin_unlock(&fsvq->lock);
+                       pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
+                              ret);
+                       fuse_request_end(fc, req);
+               }
+       }
 }
 
 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
@@ -286,6 +336,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
 
                list_del(&forget->list);
                if (!fsvq->connected) {
+                       dec_in_flight_req(fsvq);
                        spin_unlock(&fsvq->lock);
                        kfree(forget);
                        continue;
@@ -307,13 +358,13 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
                        } else {
                                pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
                                         ret);
+                               dec_in_flight_req(fsvq);
                                kfree(forget);
                        }
                        spin_unlock(&fsvq->lock);
                        return;
                }
 
-               fsvq->in_flight++;
                notify = virtqueue_kick_prepare(vq);
                spin_unlock(&fsvq->lock);
 
@@ -452,7 +503,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
 
                fuse_request_end(fc, req);
                spin_lock(&fsvq->lock);
-               fsvq->in_flight--;
+               dec_in_flight_req(fsvq);
                spin_unlock(&fsvq->lock);
        }
 }
@@ -502,6 +553,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
        names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
        INIT_WORK(&fs->vqs[VQ_HIPRIO].done_work, virtio_fs_hiprio_done_work);
        INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].queued_reqs);
+       INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].end_reqs);
        INIT_DELAYED_WORK(&fs->vqs[VQ_HIPRIO].dispatch_work,
                        virtio_fs_hiprio_dispatch_work);
        spin_lock_init(&fs->vqs[VQ_HIPRIO].lock);
@@ -511,8 +563,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
                spin_lock_init(&fs->vqs[i].lock);
                INIT_WORK(&fs->vqs[i].done_work, virtio_fs_requests_done_work);
                INIT_DELAYED_WORK(&fs->vqs[i].dispatch_work,
-                                       virtio_fs_dummy_dispatch_work);
+                                 virtio_fs_request_dispatch_work);
                INIT_LIST_HEAD(&fs->vqs[i].queued_reqs);
+               INIT_LIST_HEAD(&fs->vqs[i].end_reqs);
                snprintf(fs->vqs[i].name, sizeof(fs->vqs[i].name),
                         "requests.%u", i - VQ_REQUEST);
                callbacks[i] = virtio_fs_vq_done;
@@ -708,6 +761,7 @@ __releases(fiq->lock)
                        list_add_tail(&forget->list, &fsvq->queued_reqs);
                        schedule_delayed_work(&fsvq->dispatch_work,
                                        msecs_to_jiffies(1));
+                       inc_in_flight_req(fsvq);
                } else {
                        pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
                                 ret);
@@ -717,7 +771,7 @@ __releases(fiq->lock)
                goto out;
        }
 
-       fsvq->in_flight++;
+       inc_in_flight_req(fsvq);
        notify = virtqueue_kick_prepare(vq);
 
        spin_unlock(&fsvq->lock);
@@ -819,7 +873,7 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg,
 
 /* Add a request to a virtqueue and kick the device */
 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
-                                struct fuse_req *req)
+                                struct fuse_req *req, bool in_flight)
 {
        /* requests need at least 4 elements */
        struct scatterlist *stack_sgs[6];
@@ -835,6 +889,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
        unsigned int i;
        int ret;
        bool notify;
+       struct fuse_pqueue *fpq;
 
        /* Does the sglist fit on the stack? */
        total_sgs = sg_count_fuse_req(req);
@@ -889,7 +944,17 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
                goto out;
        }
 
-       fsvq->in_flight++;
+       /* Request successfully sent. */
+       fpq = &fsvq->fud->pq;
+       spin_lock(&fpq->lock);
+       list_add_tail(&req->list, fpq->processing);
+       spin_unlock(&fpq->lock);
+       set_bit(FR_SENT, &req->flags);
+       /* matches barrier in request_wait_answer() */
+       smp_mb__after_atomic();
+
+       if (!in_flight)
+               inc_in_flight_req(fsvq);
        notify = virtqueue_kick_prepare(vq);
 
        spin_unlock(&fsvq->lock);
@@ -915,9 +980,8 @@ __releases(fiq->lock)
 {
        unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
        struct virtio_fs *fs;
-       struct fuse_conn *fc;
        struct fuse_req *req;
-       struct fuse_pqueue *fpq;
+       struct virtio_fs_vq *fsvq;
        int ret;
 
        WARN_ON(list_empty(&fiq->pending));
@@ -928,44 +992,36 @@ __releases(fiq->lock)
        spin_unlock(&fiq->lock);
 
        fs = fiq->priv;
-       fc = fs->vqs[queue_id].fud->fc;
 
        pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
                  __func__, req->in.h.opcode, req->in.h.unique,
                 req->in.h.nodeid, req->in.h.len,
                 fuse_len_args(req->args->out_numargs, req->args->out_args));
 
-       fpq = &fs->vqs[queue_id].fud->pq;
-       spin_lock(&fpq->lock);
-       if (!fpq->connected) {
-               spin_unlock(&fpq->lock);
-               req->out.h.error = -ENODEV;
-               pr_err("virtio-fs: %s disconnected\n", __func__);
-               fuse_request_end(fc, req);
-               return;
-       }
-       list_add_tail(&req->list, fpq->processing);
-       spin_unlock(&fpq->lock);
-       set_bit(FR_SENT, &req->flags);
-       /* matches barrier in request_wait_answer() */
-       smp_mb__after_atomic();
-
-retry:
-       ret = virtio_fs_enqueue_req(&fs->vqs[queue_id], req);
+       fsvq = &fs->vqs[queue_id];
+       ret = virtio_fs_enqueue_req(fsvq, req, false);
        if (ret < 0) {
                if (ret == -ENOMEM || ret == -ENOSPC) {
-                       /* Virtqueue full. Retry submission */
-                       /* TODO use completion instead of timeout */
-                       usleep_range(20, 30);
-                       goto retry;
+                       /*
+                        * Virtqueue full. Retry submission from worker
+                        * context as we might be holding fc->bg_lock.
+                        */
+                       spin_lock(&fsvq->lock);
+                       list_add_tail(&req->list, &fsvq->queued_reqs);
+                       inc_in_flight_req(fsvq);
+                       schedule_delayed_work(&fsvq->dispatch_work,
+                                               msecs_to_jiffies(1));
+                       spin_unlock(&fsvq->lock);
+                       return;
                }
                req->out.h.error = ret;
                pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
-               spin_lock(&fpq->lock);
-               clear_bit(FR_SENT, &req->flags);
-               list_del_init(&req->list);
-               spin_unlock(&fpq->lock);
-               fuse_request_end(fc, req);
+
+               /* Can't end request in submission context. Use a worker */
+               spin_lock(&fsvq->lock);
+               list_add_tail(&req->list, &fsvq->end_reqs);
+               schedule_delayed_work(&fsvq->dispatch_work, 0);
+               spin_unlock(&fsvq->lock);
                return;
        }
 }
@@ -992,6 +1048,7 @@ static int virtio_fs_fill_super(struct super_block *sb)
                .destroy = true,
                .no_control = true,
                .no_force_umount = true,
+               .no_mount_options = true,
        };
 
        mutex_lock(&virtio_fs_mutex);
index 681b446..18daf49 100644 (file)
@@ -1540,17 +1540,23 @@ static int gfs2_init_fs_context(struct fs_context *fc)
 {
        struct gfs2_args *args;
 
-       args = kzalloc(sizeof(*args), GFP_KERNEL);
+       args = kmalloc(sizeof(*args), GFP_KERNEL);
        if (args == NULL)
                return -ENOMEM;
 
-       args->ar_quota = GFS2_QUOTA_DEFAULT;
-       args->ar_data = GFS2_DATA_DEFAULT;
-       args->ar_commit = 30;
-       args->ar_statfs_quantum = 30;
-       args->ar_quota_quantum = 60;
-       args->ar_errors = GFS2_ERRORS_DEFAULT;
+       if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+               struct gfs2_sbd *sdp = fc->root->d_sb->s_fs_info;
 
+               *args = sdp->sd_args;
+       } else {
+               memset(args, 0, sizeof(*args));
+               args->ar_quota = GFS2_QUOTA_DEFAULT;
+               args->ar_data = GFS2_DATA_DEFAULT;
+               args->ar_commit = 30;
+               args->ar_statfs_quantum = 30;
+               args->ar_quota_quantum = 60;
+               args->ar_errors = GFS2_ERRORS_DEFAULT;
+       }
        fc->fs_private = args;
        fc->ops = &gfs2_context_ops;
        return 0;
@@ -1600,6 +1606,7 @@ static int gfs2_meta_get_tree(struct fs_context *fc)
 }
 
 static const struct fs_context_operations gfs2_meta_context_ops = {
+       .free        = gfs2_fc_free,
        .get_tree    = gfs2_meta_get_tree,
 };
 
index 67dbe02..f9a3899 100644 (file)
@@ -197,6 +197,7 @@ struct io_ring_ctx {
                unsigned                sq_entries;
                unsigned                sq_mask;
                unsigned                sq_thread_idle;
+               unsigned                cached_sq_dropped;
                struct io_uring_sqe     *sq_sqes;
 
                struct list_head        defer_list;
@@ -212,6 +213,7 @@ struct io_ring_ctx {
 
        struct {
                unsigned                cached_cq_tail;
+               atomic_t                cached_cq_overflow;
                unsigned                cq_entries;
                unsigned                cq_mask;
                struct wait_queue_head  cq_wait;
@@ -420,7 +422,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
                                       struct io_kiocb *req)
 {
-       return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
+       return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+                                       + atomic_read(&ctx->cached_cq_overflow);
 }
 
 static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
@@ -567,9 +570,8 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
                WRITE_ONCE(cqe->res, res);
                WRITE_ONCE(cqe->flags, 0);
        } else {
-               unsigned overflow = READ_ONCE(ctx->rings->cq_overflow);
-
-               WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1);
+               WRITE_ONCE(ctx->rings->cq_overflow,
+                               atomic_inc_return(&ctx->cached_cq_overflow));
        }
 }
 
@@ -735,6 +737,14 @@ static unsigned io_cqring_events(struct io_rings *rings)
        return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
 }
 
+static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
+{
+       struct io_rings *rings = ctx->rings;
+
+       /* make sure SQ entry isn't read before tail */
+       return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
+}
+
 /*
  * Find and free completed poll iocbs
  */
@@ -864,19 +874,11 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
        mutex_unlock(&ctx->uring_lock);
 }
 
-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
-                          long min)
+static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+                           long min)
 {
-       int iters, ret = 0;
+       int iters = 0, ret = 0;
 
-       /*
-        * We disallow the app entering submit/complete with polling, but we
-        * still need to lock the ring to prevent racing with polled issue
-        * that got punted to a workqueue.
-        */
-       mutex_lock(&ctx->uring_lock);
-
-       iters = 0;
        do {
                int tmin = 0;
 
@@ -912,6 +914,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
                ret = 0;
        } while (min && !*nr_events && !need_resched());
 
+       return ret;
+}
+
+static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+                          long min)
+{
+       int ret;
+
+       /*
+        * We disallow the app entering submit/complete with polling, but we
+        * still need to lock the ring to prevent racing with polled issue
+        * that got punted to a workqueue.
+        */
+       mutex_lock(&ctx->uring_lock);
+       ret = __io_iopoll_check(ctx, nr_events, min);
        mutex_unlock(&ctx->uring_lock);
        return ret;
 }
@@ -1107,6 +1124,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
 
                kiocb->ki_flags |= IOCB_HIPRI;
                kiocb->ki_complete = io_complete_rw_iopoll;
+               req->result = 0;
        } else {
                if (kiocb->ki_flags & IOCB_HIPRI)
                        return -EINVAL;
@@ -1877,7 +1895,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 {
        struct io_ring_ctx *ctx;
-       struct io_kiocb *req;
+       struct io_kiocb *req, *prev;
        unsigned long flags;
 
        req = container_of(timer, struct io_kiocb, timeout.timer);
@@ -1885,6 +1903,15 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
        atomic_inc(&ctx->cq_timeouts);
 
        spin_lock_irqsave(&ctx->completion_lock, flags);
+       /*
+        * Adjust the reqs sequence before the current one because it
+        * will consume a slot in the cq_ring and the the cq_tail pointer
+        * will be increased, otherwise other timeout reqs may return in
+        * advance without waiting for enough wait_nr.
+        */
+       prev = req;
+       list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
+               prev->sequence++;
        list_del(&req->list);
 
        io_cqring_fill_event(ctx, req->user_data, -ETIME);
@@ -1903,6 +1930,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        struct io_ring_ctx *ctx = req->ctx;
        struct list_head *entry;
        struct timespec64 ts;
+       unsigned span = 0;
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -1951,9 +1979,17 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                if (ctx->cached_sq_head < nxt_sq_head)
                        tmp += UINT_MAX;
 
-               if (tmp >= tmp_nxt)
+               if (tmp > tmp_nxt)
                        break;
+
+               /*
+                * Sequence of reqs after the insert one and itself should
+                * be adjusted because each timeout req consumes a slot.
+                */
+               span++;
+               nxt->sequence++;
        }
+       req->sequence -= span;
        list_add(&req->list, entry);
        spin_unlock_irq(&ctx->completion_lock);
 
@@ -2292,11 +2328,11 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
 }
 
 static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                       struct sqe_submit *s, bool force_nonblock)
+                       struct sqe_submit *s)
 {
        int ret;
 
-       ret = __io_submit_sqe(ctx, req, s, force_nonblock);
+       ret = __io_submit_sqe(ctx, req, s, true);
 
        /*
         * We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -2343,7 +2379,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 }
 
 static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                       struct sqe_submit *s, bool force_nonblock)
+                       struct sqe_submit *s)
 {
        int ret;
 
@@ -2356,18 +2392,17 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
                return 0;
        }
 
-       return __io_queue_sqe(ctx, req, s, force_nonblock);
+       return __io_queue_sqe(ctx, req, s);
 }
 
 static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                             struct sqe_submit *s, struct io_kiocb *shadow,
-                             bool force_nonblock)
+                             struct sqe_submit *s, struct io_kiocb *shadow)
 {
        int ret;
        int need_submit = false;
 
        if (!shadow)
-               return io_queue_sqe(ctx, req, s, force_nonblock);
+               return io_queue_sqe(ctx, req, s);
 
        /*
         * Mark the first IO in link list as DRAIN, let all the following
@@ -2379,6 +2414,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
        if (ret) {
                if (ret != -EIOCBQUEUED) {
                        io_free_req(req);
+                       __io_free_req(shadow);
                        io_cqring_add_event(ctx, s->sqe->user_data, ret);
                        return 0;
                }
@@ -2396,7 +2432,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
        spin_unlock_irq(&ctx->completion_lock);
 
        if (need_submit)
-               return __io_queue_sqe(ctx, req, s, force_nonblock);
+               return __io_queue_sqe(ctx, req, s);
 
        return 0;
 }
@@ -2404,8 +2440,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
 #define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
 
 static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
-                         struct io_submit_state *state, struct io_kiocb **link,
-                         bool force_nonblock)
+                         struct io_submit_state *state, struct io_kiocb **link)
 {
        struct io_uring_sqe *sqe_copy;
        struct io_kiocb *req;
@@ -2432,6 +2467,8 @@ err:
                return;
        }
 
+       req->user_data = s->sqe->user_data;
+
        /*
         * If we already have a head request, queue this one for async
         * submittal once the head completes. If we don't have a head but
@@ -2458,7 +2495,7 @@ err:
                INIT_LIST_HEAD(&req->link_list);
                *link = req;
        } else {
-               io_queue_sqe(ctx, req, s, force_nonblock);
+               io_queue_sqe(ctx, req, s);
        }
 }
 
@@ -2538,12 +2575,13 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
 
        /* drop invalid entries */
        ctx->cached_sq_head++;
-       rings->sq_dropped++;
+       ctx->cached_sq_dropped++;
+       WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
        return false;
 }
 
-static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
-                         unsigned int nr, bool has_user, bool mm_fault)
+static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
+                         bool has_user, bool mm_fault)
 {
        struct io_submit_state state, *statep = NULL;
        struct io_kiocb *link = NULL;
@@ -2557,19 +2595,23 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
        }
 
        for (i = 0; i < nr; i++) {
+               struct sqe_submit s;
+
+               if (!io_get_sqring(ctx, &s))
+                       break;
+
                /*
                 * If previous wasn't linked and we have a linked command,
                 * that's the end of the chain. Submit the previous link.
                 */
                if (!prev_was_link && link) {
-                       io_queue_link_head(ctx, link, &link->submit, shadow_req,
-                                               true);
+                       io_queue_link_head(ctx, link, &link->submit, shadow_req);
                        link = NULL;
                        shadow_req = NULL;
                }
-               prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
+               prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
 
-               if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) {
+               if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
                        if (!shadow_req) {
                                shadow_req = io_get_req(ctx, NULL);
                                if (unlikely(!shadow_req))
@@ -2577,24 +2619,24 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
                                shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
                                refcount_dec(&shadow_req->refs);
                        }
-                       shadow_req->sequence = sqes[i].sequence;
+                       shadow_req->sequence = s.sequence;
                }
 
 out:
                if (unlikely(mm_fault)) {
-                       io_cqring_add_event(ctx, sqes[i].sqe->user_data,
+                       io_cqring_add_event(ctx, s.sqe->user_data,
                                                -EFAULT);
                } else {
-                       sqes[i].has_user = has_user;
-                       sqes[i].needs_lock = true;
-                       sqes[i].needs_fixed_file = true;
-                       io_submit_sqe(ctx, &sqes[i], statep, &link, true);
+                       s.has_user = has_user;
+                       s.needs_lock = true;
+                       s.needs_fixed_file = true;
+                       io_submit_sqe(ctx, &s, statep, &link);
                        submitted++;
                }
        }
 
        if (link)
-               io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
+               io_queue_link_head(ctx, link, &link->submit, shadow_req);
        if (statep)
                io_submit_state_end(&state);
 
@@ -2603,7 +2645,6 @@ out:
 
 static int io_sq_thread(void *data)
 {
-       struct sqe_submit sqes[IO_IOPOLL_BATCH];
        struct io_ring_ctx *ctx = data;
        struct mm_struct *cur_mm = NULL;
        mm_segment_t old_fs;
@@ -2618,14 +2659,27 @@ static int io_sq_thread(void *data)
 
        timeout = inflight = 0;
        while (!kthread_should_park()) {
-               bool all_fixed, mm_fault = false;
-               int i;
+               bool mm_fault = false;
+               unsigned int to_submit;
 
                if (inflight) {
                        unsigned nr_events = 0;
 
                        if (ctx->flags & IORING_SETUP_IOPOLL) {
-                               io_iopoll_check(ctx, &nr_events, 0);
+                               /*
+                                * inflight is the count of the maximum possible
+                                * entries we submitted, but it can be smaller
+                                * if we dropped some of them. If we don't have
+                                * poll entries available, then we know that we
+                                * have nothing left to poll for. Reset the
+                                * inflight count to zero in that case.
+                                */
+                               mutex_lock(&ctx->uring_lock);
+                               if (!list_empty(&ctx->poll_list))
+                                       __io_iopoll_check(ctx, &nr_events, 0);
+                               else
+                                       inflight = 0;
+                               mutex_unlock(&ctx->uring_lock);
                        } else {
                                /*
                                 * Normal IO, just pretend everything completed.
@@ -2639,7 +2693,8 @@ static int io_sq_thread(void *data)
                                timeout = jiffies + ctx->sq_thread_idle;
                }
 
-               if (!io_get_sqring(ctx, &sqes[0])) {
+               to_submit = io_sqring_entries(ctx);
+               if (!to_submit) {
                        /*
                         * We're polling. If we're within the defined idle
                         * period, then let us spin without work before going
@@ -2670,7 +2725,8 @@ static int io_sq_thread(void *data)
                        /* make sure to read SQ tail after writing flags */
                        smp_mb();
 
-                       if (!io_get_sqring(ctx, &sqes[0])) {
+                       to_submit = io_sqring_entries(ctx);
+                       if (!to_submit) {
                                if (kthread_should_park()) {
                                        finish_wait(&ctx->sqo_wait, &wait);
                                        break;
@@ -2688,19 +2744,8 @@ static int io_sq_thread(void *data)
                        ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
                }
 
-               i = 0;
-               all_fixed = true;
-               do {
-                       if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
-                               all_fixed = false;
-
-                       i++;
-                       if (i == ARRAY_SIZE(sqes))
-                               break;
-               } while (io_get_sqring(ctx, &sqes[i]));
-
                /* Unless all new commands are FIXED regions, grab mm */
-               if (!all_fixed && !cur_mm) {
+               if (!cur_mm) {
                        mm_fault = !mmget_not_zero(ctx->sqo_mm);
                        if (!mm_fault) {
                                use_mm(ctx->sqo_mm);
@@ -2708,8 +2753,9 @@ static int io_sq_thread(void *data)
                        }
                }
 
-               inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
-                                               mm_fault);
+               to_submit = min(to_submit, ctx->sq_entries);
+               inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
+                                          mm_fault);
 
                /* Commit SQ ring head once we've consumed all SQEs */
                io_commit_sqring(ctx);
@@ -2726,8 +2772,7 @@ static int io_sq_thread(void *data)
        return 0;
 }
 
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
-                         bool block_for_last)
+static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
 {
        struct io_submit_state state, *statep = NULL;
        struct io_kiocb *link = NULL;
@@ -2741,7 +2786,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
        }
 
        for (i = 0; i < to_submit; i++) {
-               bool force_nonblock = true;
                struct sqe_submit s;
 
                if (!io_get_sqring(ctx, &s))
@@ -2752,8 +2796,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
                 * that's the end of the chain. Submit the previous link.
                 */
                if (!prev_was_link && link) {
-                       io_queue_link_head(ctx, link, &link->submit, shadow_req,
-                                               force_nonblock);
+                       io_queue_link_head(ctx, link, &link->submit, shadow_req);
                        link = NULL;
                        shadow_req = NULL;
                }
@@ -2775,27 +2818,16 @@ out:
                s.needs_lock = false;
                s.needs_fixed_file = false;
                submit++;
-
-               /*
-                * The caller will block for events after submit, submit the
-                * last IO non-blocking. This is either the only IO it's
-                * submitting, or it already submitted the previous ones. This
-                * improves performance by avoiding an async punt that we don't
-                * need to do.
-                */
-               if (block_for_last && submit == to_submit)
-                       force_nonblock = false;
-
-               io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
+               io_submit_sqe(ctx, &s, statep, &link);
        }
-       io_commit_sqring(ctx);
 
        if (link)
-               io_queue_link_head(ctx, link, &link->submit, shadow_req,
-                                       !block_for_last);
+               io_queue_link_head(ctx, link, &link->submit, shadow_req);
        if (statep)
                io_submit_state_end(statep);
 
+       io_commit_sqring(ctx);
+
        return submit;
 }
 
@@ -3636,21 +3668,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                        wake_up(&ctx->sqo_wait);
                submitted = to_submit;
        } else if (to_submit) {
-               bool block_for_last = false;
-
                to_submit = min(to_submit, ctx->sq_entries);
 
-               /*
-                * Allow last submission to block in a series, IFF the caller
-                * asked to wait for events and we don't currently have
-                * enough. This potentially avoids an async punt.
-                */
-               if (to_submit == min_complete &&
-                   io_cqring_events(ctx->rings) < min_complete)
-                       block_for_last = true;
-
                mutex_lock(&ctx->uring_lock);
-               submitted = io_ring_submit(ctx, to_submit, block_for_last);
+               submitted = io_ring_submit(ctx, to_submit);
                mutex_unlock(&ctx->uring_lock);
        }
        if (flags & IORING_ENTER_GETEVENTS) {
@@ -3809,10 +3830,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
        if (ret)
                goto err;
 
-       ret = io_uring_get_fd(ctx);
-       if (ret < 0)
-               goto err;
-
        memset(&p->sq_off, 0, sizeof(p->sq_off));
        p->sq_off.head = offsetof(struct io_rings, sq.head);
        p->sq_off.tail = offsetof(struct io_rings, sq.tail);
@@ -3830,6 +3847,14 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
        p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
        p->cq_off.cqes = offsetof(struct io_rings, cqes);
 
+       /*
+        * Install ring fd as the very last thing, so we don't risk someone
+        * having closed it before we finish setup
+        */
+       ret = io_uring_get_fd(ctx);
+       if (ret < 0)
+               goto err;
+
        p->features = IORING_FEAT_SINGLE_MMAP;
        return ret;
 err:
index 071b90a..af549d7 100644 (file)
@@ -53,6 +53,16 @@ nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
        return false;
 }
 
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
+{
+       struct nfs_delegation *delegation;
+
+       delegation = rcu_dereference(NFS_I(inode)->delegation);
+       if (nfs4_is_valid_delegation(delegation, 0))
+               return delegation;
+       return NULL;
+}
+
 static int
 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
 {
@@ -1181,7 +1191,7 @@ bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
        if (delegation != NULL &&
            nfs4_stateid_match_other(dst, &delegation->stateid)) {
                dst->seqid = delegation->stateid.seqid;
-               return ret;
+               ret = true;
        }
        rcu_read_unlock();
 out:
index 9eb87ae..8b14d44 100644 (file)
@@ -68,6 +68,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
 bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
 bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
 
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode);
 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
 int nfs4_have_delegation(struct inode *inode, fmode_t flags);
 int nfs4_check_delegation(struct inode *inode, fmode_t flags);
index ab8ca20..caacf5e 100644 (file)
@@ -1440,8 +1440,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
                return 0;
        if ((delegation->type & fmode) != fmode)
                return 0;
-       if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
-               return 0;
        switch (claim) {
        case NFS4_OPEN_CLAIM_NULL:
        case NFS4_OPEN_CLAIM_FH:
@@ -1810,7 +1808,6 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo
 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
 {
        struct nfs4_state *state = opendata->state;
-       struct nfs_inode *nfsi = NFS_I(state->inode);
        struct nfs_delegation *delegation;
        int open_mode = opendata->o_arg.open_flags;
        fmode_t fmode = opendata->o_arg.fmode;
@@ -1827,7 +1824,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
                }
                spin_unlock(&state->owner->so_lock);
                rcu_read_lock();
-               delegation = rcu_dereference(nfsi->delegation);
+               delegation = nfs4_get_valid_delegation(state->inode);
                if (!can_open_delegated(delegation, fmode, claim)) {
                        rcu_read_unlock();
                        break;
@@ -2371,7 +2368,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                                        data->o_arg.open_flags, claim))
                        goto out_no_action;
                rcu_read_lock();
-               delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
+               delegation = nfs4_get_valid_delegation(data->state->inode);
                if (can_open_delegated(delegation, data->o_arg.fmode, claim))
                        goto unlock_no_action;
                rcu_read_unlock();
index f936033..4780517 100644 (file)
@@ -232,8 +232,8 @@ struct acpi_processor {
        struct acpi_processor_limit limit;
        struct thermal_cooling_device *cdev;
        struct device *dev; /* Processor device. */
-       struct dev_pm_qos_request perflib_req;
-       struct dev_pm_qos_request thermal_req;
+       struct freq_qos_request perflib_req;
+       struct freq_qos_request thermal_req;
 };
 
 struct acpi_processor_errata {
@@ -302,8 +302,8 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
 #ifdef CONFIG_CPU_FREQ
 extern bool acpi_processor_cpufreq_init;
 void acpi_processor_ignore_ppc_init(void);
-void acpi_processor_ppc_init(int cpu);
-void acpi_processor_ppc_exit(int cpu);
+void acpi_processor_ppc_init(struct cpufreq_policy *policy);
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy);
 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
 extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
 #else
@@ -311,11 +311,11 @@ static inline void acpi_processor_ignore_ppc_init(void)
 {
        return;
 }
-static inline void acpi_processor_ppc_init(int cpu)
+static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy)
 {
        return;
 }
-static inline void acpi_processor_ppc_exit(int cpu)
+static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
 {
        return;
 }
@@ -431,14 +431,14 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
 int acpi_processor_get_limit_info(struct acpi_processor *pr);
 extern const struct thermal_cooling_device_ops processor_cooling_ops;
 #if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
-void acpi_thermal_cpufreq_init(int cpu);
-void acpi_thermal_cpufreq_exit(int cpu);
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy);
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy);
 #else
-static inline void acpi_thermal_cpufreq_init(int cpu)
+static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
 {
        return;
 }
-static inline void acpi_thermal_cpufreq_exit(int cpu)
+static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
 {
        return;
 }
index c57e88e..92d5fdc 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/completion.h>
 #include <linux/kobject.h>
 #include <linux/notifier.h>
+#include <linux/pm_qos.h>
 #include <linux/spinlock.h>
 #include <linux/sysfs.h>
 
@@ -76,8 +77,10 @@ struct cpufreq_policy {
        struct work_struct      update; /* if update_policy() needs to be
                                         * called, but you're in IRQ context */
 
-       struct dev_pm_qos_request *min_freq_req;
-       struct dev_pm_qos_request *max_freq_req;
+       struct freq_constraints constraints;
+       struct freq_qos_request *min_freq_req;
+       struct freq_qos_request *max_freq_req;
+
        struct cpufreq_frequency_table  *freq_table;
        enum cpufreq_table_sorting freq_table_sorted;
 
index 6c80944..4cf02ec 100644 (file)
@@ -204,6 +204,12 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
        do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
 #define dynamic_dev_dbg(dev, fmt, ...)                                 \
        do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize,             \
+                        groupsize, buf, len, ascii)                    \
+       do { if (0)                                                     \
+               print_hex_dump(KERN_DEBUG, prefix_str, prefix_type,     \
+                               rowsize, groupsize, buf, len, ascii);   \
+       } while (0)
 #endif
 
 #endif
index bd38370..d87acf6 100644 (file)
@@ -1579,9 +1579,22 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
 efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
                                struct efi_boot_memmap *map);
 
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+                                unsigned long size, unsigned long align,
+                                unsigned long *addr, unsigned long min);
+
+static inline
 efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                           unsigned long size, unsigned long align,
-                          unsigned long *addr);
+                          unsigned long *addr)
+{
+       /*
+        * Don't allocate at 0x0. It will confuse code that
+        * checks pointers against NULL. Skip the first 8
+        * bytes so we start at a nice even number.
+        */
+       return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8);
+}
 
 efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
                            unsigned long size, unsigned long align,
@@ -1592,7 +1605,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
                                 unsigned long image_size,
                                 unsigned long alloc_size,
                                 unsigned long preferred_addr,
-                                unsigned long alignment);
+                                unsigned long alignment,
+                                unsigned long min_addr);
 
 efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
                                  efi_loaded_image_t *image,
index 621158e..941d075 100644 (file)
@@ -18,8 +18,6 @@ extern struct module __this_module;
 #define THIS_MODULE ((struct module *)0)
 #endif
 
-#define NS_SEPARATOR "."
-
 #ifdef CONFIG_MODVERSIONS
 /* Mark the CRC weak since genksyms apparently decides not to
  * generate a checksums for some symbols */
@@ -48,11 +46,11 @@ extern struct module __this_module;
  * absolute relocations that require runtime processing on relocatable
  * kernels.
  */
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns)                               \
+#define __KSYMTAB_ENTRY_NS(sym, sec)                                   \
        __ADDRESSABLE(sym)                                              \
        asm("   .section \"___ksymtab" sec "+" #sym "\", \"a\"  \n"     \
            "   .balign 4                                       \n"     \
-           "__ksymtab_" #ns NS_SEPARATOR #sym ":               \n"     \
+           "__ksymtab_" #sym ":                                \n"     \
            "   .long   " #sym "- .                             \n"     \
            "   .long   __kstrtab_" #sym "- .                   \n"     \
            "   .long   __kstrtabns_" #sym "- .                 \n"     \
@@ -74,16 +72,14 @@ struct kernel_symbol {
        int namespace_offset;
 };
 #else
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns)                               \
-       static const struct kernel_symbol __ksymtab_##sym##__##ns       \
-       asm("__ksymtab_" #ns NS_SEPARATOR #sym)                         \
+#define __KSYMTAB_ENTRY_NS(sym, sec)                                   \
+       static const struct kernel_symbol __ksymtab_##sym               \
        __attribute__((section("___ksymtab" sec "+" #sym), used))       \
        __aligned(sizeof(void *))                                       \
        = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
 
 #define __KSYMTAB_ENTRY(sym, sec)                                      \
        static const struct kernel_symbol __ksymtab_##sym               \
-       asm("__ksymtab_" #sym)                                          \
        __attribute__((section("___ksymtab" sec "+" #sym), used))       \
        __aligned(sizeof(void *))                                       \
        = { (unsigned long)&sym, __kstrtab_##sym, NULL }
@@ -115,7 +111,7 @@ struct kernel_symbol {
        static const char __kstrtabns_##sym[]                           \
        __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
        = #ns;                                                          \
-       __KSYMTAB_ENTRY_NS(sym, sec, ns)
+       __KSYMTAB_ENTRY_NS(sym, sec)
 
 #define ___EXPORT_SYMBOL(sym, sec)                                     \
        ___export_symbol_common(sym, sec);                              \
index 22ebea2..7a6f8f6 100644 (file)
@@ -1103,7 +1103,6 @@ static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
 
 #endif /* CONFIG_BPF_JIT */
 
-void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
 
 #define BPF_ANC                BIT(15)
diff --git a/include/linux/firmware/broadcom/tee_bnxt_fw.h b/include/linux/firmware/broadcom/tee_bnxt_fw.h
new file mode 100644 (file)
index 0000000..f24c82d
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#ifndef _BROADCOM_TEE_BNXT_FW_H
+#define _BROADCOM_TEE_BNXT_FW_H
+
+#include <linux/types.h>
+
+int tee_bnxt_fw_load(void);
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size);
+
+#endif /* _BROADCOM_TEE_BNXT_FW_H */
index 975553a..54d9436 100644 (file)
@@ -403,6 +403,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
 
 void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
 
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev);
+
 extern struct bus_type fsl_mc_bus_type;
 
 extern struct device_type fsl_mc_bus_dprc_type;
index fb07b50..61f2f6f 100644 (file)
@@ -325,6 +325,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
        return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
 }
 
+/**
+ * gfpflags_normal_context - is gfp_flags a normal sleepable context?
+ * @gfp_flags: gfp_flags to test
+ *
+ * Test whether @gfp_flags indicates that the allocation is from the
+ * %current context and allowed to sleep.
+ *
+ * An allocation being allowed to block doesn't mean it owns the %current
+ * context.  When direct reclaim path tries to allocate memory, the
+ * allocation context is nested inside whatever %current was doing at the
+ * time of the original allocation.  The nested allocation may be allowed
+ * to block but modifying anything %current owns can corrupt the outer
+ * context's expectations.
+ *
+ * %true result from this function indicates that the allocation context
+ * can sleep and use anything that's associated with %current.
+ */
+static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
+{
+       return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
+               __GFP_DIRECT_RECLAIM;
+}
+
 #ifdef CONFIG_HIGHMEM
 #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
 #else
index 2e55e4c..a367ead 100644 (file)
@@ -29,7 +29,6 @@ struct macvlan_dev {
        netdev_features_t       set_features;
        enum macvlan_mode       mode;
        u16                     flags;
-       int                     nest_level;
        unsigned int            macaddr_count;
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll          *netpoll;
index 06faa06..ec7e4bd 100644 (file)
@@ -223,6 +223,7 @@ struct team {
                atomic_t count_pending;
                struct delayed_work dw;
        } mcast_rejoin;
+       struct lock_class_key team_lock_key;
        long mode_priv[TEAM_MODE_PRIV_LONGS];
 };
 
index 244278d..b05e855 100644 (file)
@@ -182,7 +182,6 @@ struct vlan_dev_priv {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                          *netpoll;
 #endif
-       unsigned int                            nest_level;
 };
 
 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -221,11 +220,6 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
 
 extern bool vlan_uses_dev(const struct net_device *dev);
 
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
-       BUG_ON(!is_vlan_dev(dev));
-       return vlan_dev_priv(dev)->nest_level;
-}
 #else
 static inline struct net_device *
 __vlan_find_dev_deep_rcu(struct net_device *real_dev,
@@ -295,11 +289,6 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
 {
        return false;
 }
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
-       BUG();
-       return 0;
-}
 #endif
 
 /**
index 138c50d..0836fe2 100644 (file)
@@ -1545,9 +1545,8 @@ struct mlx5_ifc_extended_dest_format_bits {
 };
 
 union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
-       struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+       struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
        struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
-       u8         reserved_at_0[0x40];
 };
 
 struct mlx5_ifc_fte_match_param_bits {
index 3207e0b..1f140a6 100644 (file)
@@ -1431,7 +1431,6 @@ struct net_device_ops {
        void                    (*ndo_dfwd_del_station)(struct net_device *pdev,
                                                        void *priv);
 
-       int                     (*ndo_get_lock_subclass)(struct net_device *dev);
        int                     (*ndo_set_tx_maxrate)(struct net_device *dev,
                                                      int queue_index,
                                                      u32 maxrate);
@@ -1659,6 +1658,8 @@ enum netdev_priv_flags {
  *     @perm_addr:             Permanent hw address
  *     @addr_assign_type:      Hw address assignment type
  *     @addr_len:              Hardware address length
+ *     @upper_level:           Maximum depth level of upper devices.
+ *     @lower_level:           Maximum depth level of lower devices.
  *     @neigh_priv_len:        Used in neigh_alloc()
  *     @dev_id:                Used to differentiate devices that share
  *                             the same link layer address
@@ -1768,9 +1769,13 @@ enum netdev_priv_flags {
  *     @phydev:        Physical device may attach itself
  *                     for hardware timestamping
  *     @sfp_bus:       attached &struct sfp_bus structure.
- *
- *     @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
- *     @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
+ *     @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
+                               spinlock
+ *     @qdisc_running_key:     lockdep class annotating Qdisc->running seqcount
+ *     @qdisc_xmit_lock_key:   lockdep class annotating
+ *                             netdev_queue->_xmit_lock spinlock
+ *     @addr_list_lock_key:    lockdep class annotating
+ *                             net_device->addr_list_lock spinlock
  *
  *     @proto_down:    protocol port state information can be sent to the
  *                     switch driver and used to set the phys state of the
@@ -1885,6 +1890,8 @@ struct net_device {
        unsigned char           perm_addr[MAX_ADDR_LEN];
        unsigned char           addr_assign_type;
        unsigned char           addr_len;
+       unsigned char           upper_level;
+       unsigned char           lower_level;
        unsigned short          neigh_priv_len;
        unsigned short          dev_id;
        unsigned short          dev_port;
@@ -2055,8 +2062,10 @@ struct net_device {
 #endif
        struct phy_device       *phydev;
        struct sfp_bus          *sfp_bus;
-       struct lock_class_key   *qdisc_tx_busylock;
-       struct lock_class_key   *qdisc_running_key;
+       struct lock_class_key   qdisc_tx_busylock_key;
+       struct lock_class_key   qdisc_running_key;
+       struct lock_class_key   qdisc_xmit_lock_key;
+       struct lock_class_key   addr_list_lock_key;
        bool                    proto_down;
        unsigned                wol_enabled:1;
 };
@@ -2134,23 +2143,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
                f(dev, &dev->_tx[i], arg);
 }
 
-#define netdev_lockdep_set_classes(dev)                                \
-{                                                              \
-       static struct lock_class_key qdisc_tx_busylock_key;     \
-       static struct lock_class_key qdisc_running_key;         \
-       static struct lock_class_key qdisc_xmit_lock_key;       \
-       static struct lock_class_key dev_addr_list_lock_key;    \
-       unsigned int i;                                         \
-                                                               \
-       (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;      \
-       (dev)->qdisc_running_key = &qdisc_running_key;          \
-       lockdep_set_class(&(dev)->addr_list_lock,               \
-                         &dev_addr_list_lock_key);             \
-       for (i = 0; i < (dev)->num_tx_queues; i++)              \
-               lockdep_set_class(&(dev)->_tx[i]._xmit_lock,    \
-                                 &qdisc_xmit_lock_key);        \
-}
-
 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
                     struct net_device *sb_dev);
 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
@@ -3155,6 +3147,7 @@ static inline void netif_stop_queue(struct net_device *dev)
 }
 
 void netif_tx_stop_all_queues(struct net_device *dev);
+void netdev_update_lockdep_key(struct net_device *dev);
 
 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 {
@@ -4072,16 +4065,6 @@ static inline void netif_addr_lock(struct net_device *dev)
        spin_lock(&dev->addr_list_lock);
 }
 
-static inline void netif_addr_lock_nested(struct net_device *dev)
-{
-       int subclass = SINGLE_DEPTH_NESTING;
-
-       if (dev->netdev_ops->ndo_get_lock_subclass)
-               subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
-
-       spin_lock_nested(&dev->addr_list_lock, subclass);
-}
-
 static inline void netif_addr_lock_bh(struct net_device *dev)
 {
        spin_lock_bh(&dev->addr_list_lock);
@@ -4342,6 +4325,16 @@ int netdev_master_upper_dev_link(struct net_device *dev,
                                 struct netlink_ext_ack *extack);
 void netdev_upper_dev_unlink(struct net_device *dev,
                             struct net_device *upper_dev);
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev,
+                                  struct netlink_ext_ack *extack);
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev);
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+                                 struct net_device *new_dev,
+                                 struct net_device *dev);
 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
@@ -4353,7 +4346,6 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
 void netdev_rss_key_fill(void *buffer, size_t len);
 
-int dev_get_nest_level(struct net_device *dev);
 int skb_checksum_help(struct sk_buff *skb);
 int skb_crc32c_csum_help(struct sk_buff *skb);
 int skb_csum_hwoffload_help(struct sk_buff *skb,
index 61448c1..68ccc5b 100644 (file)
@@ -292,7 +292,7 @@ struct pmu {
         *  -EBUSY      -- @event is for this PMU but PMU temporarily unavailable
         *  -EINVAL     -- @event is for this PMU but @event is not valid
         *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
-        *  -EACCESS    -- @event is for this PMU, @event is valid, but no privilidges
+        *  -EACCES     -- @event is for this PMU, @event is valid, but no privileges
         *
         *  0           -- @event is for this PMU and valid
         *
index 6eaa53c..30e676b 100644 (file)
@@ -51,7 +51,10 @@ struct sdma_script_start_addrs {
        /* End of v2 array */
        s32 zcanfd_2_mcu_addr;
        s32 zqspi_2_mcu_addr;
+       s32 mcu_2_ecspi_addr;
        /* End of v3 array */
+       s32 mcu_2_zqspi_addr;
+       /* End of v4 array */
 };
 
 /**
index 222c3e0..ebf5ef1 100644 (file)
@@ -34,8 +34,6 @@ enum pm_qos_flags_status {
 #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT    PM_QOS_LATENCY_ANY
 #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
 #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
-#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE     0
-#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE     (-1)
 #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
 
 #define PM_QOS_FLAG_NO_POWER_OFF       (1 << 0)
@@ -54,8 +52,6 @@ struct pm_qos_flags_request {
 enum dev_pm_qos_req_type {
        DEV_PM_QOS_RESUME_LATENCY = 1,
        DEV_PM_QOS_LATENCY_TOLERANCE,
-       DEV_PM_QOS_MIN_FREQUENCY,
-       DEV_PM_QOS_MAX_FREQUENCY,
        DEV_PM_QOS_FLAGS,
 };
 
@@ -97,14 +93,10 @@ struct pm_qos_flags {
 struct dev_pm_qos {
        struct pm_qos_constraints resume_latency;
        struct pm_qos_constraints latency_tolerance;
-       struct pm_qos_constraints min_frequency;
-       struct pm_qos_constraints max_frequency;
        struct pm_qos_flags flags;
        struct dev_pm_qos_request *resume_latency_req;
        struct dev_pm_qos_request *latency_tolerance_req;
        struct dev_pm_qos_request *flags_req;
-       struct dev_pm_qos_request *min_frequency_req;
-       struct dev_pm_qos_request *max_frequency_req;
 };
 
 /* Action requested to pm_qos_update_target */
@@ -199,10 +191,6 @@ static inline s32 dev_pm_qos_read_value(struct device *dev,
        switch (type) {
        case DEV_PM_QOS_RESUME_LATENCY:
                return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
        default:
                WARN_ON(1);
                return 0;
@@ -267,4 +255,48 @@ static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
 }
 #endif
 
+#define FREQ_QOS_MIN_DEFAULT_VALUE     0
+#define FREQ_QOS_MAX_DEFAULT_VALUE     (-1)
+
+enum freq_qos_req_type {
+       FREQ_QOS_MIN = 1,
+       FREQ_QOS_MAX,
+};
+
+struct freq_constraints {
+       struct pm_qos_constraints min_freq;
+       struct blocking_notifier_head min_freq_notifiers;
+       struct pm_qos_constraints max_freq;
+       struct blocking_notifier_head max_freq_notifiers;
+};
+
+struct freq_qos_request {
+       enum freq_qos_req_type type;
+       struct plist_node pnode;
+       struct freq_constraints *qos;
+};
+
+static inline int freq_qos_request_active(struct freq_qos_request *req)
+{
+       return !IS_ERR_OR_NULL(req->qos);
+}
+
+void freq_constraints_init(struct freq_constraints *qos);
+
+s32 freq_qos_read_value(struct freq_constraints *qos,
+                       enum freq_qos_req_type type);
+
+int freq_qos_add_request(struct freq_constraints *qos,
+                        struct freq_qos_request *req,
+                        enum freq_qos_req_type type, s32 value);
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
+int freq_qos_remove_request(struct freq_qos_request *req);
+
+int freq_qos_add_notifier(struct freq_constraints *qos,
+                         enum freq_qos_req_type type,
+                         struct notifier_block *notifier);
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+                            enum freq_qos_req_type type,
+                            struct notifier_block *notifier);
+
 #endif
index a8d59d6..9df7547 100644 (file)
@@ -105,6 +105,7 @@ enum lockdown_reason {
        LOCKDOWN_NONE,
        LOCKDOWN_MODULE_SIGNATURE,
        LOCKDOWN_DEV_MEM,
+       LOCKDOWN_EFI_TEST,
        LOCKDOWN_KEXEC,
        LOCKDOWN_HIBERNATION,
        LOCKDOWN_PCI_ACCESS,
index f7ae12a..53238ac 100644 (file)
@@ -1354,7 +1354,8 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6
        return skb->hash;
 }
 
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+                          const siphash_key_t *perturb);
 
 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
 {
@@ -1494,6 +1495,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
        return list->next == (const struct sk_buff *) list;
 }
 
+/**
+ *     skb_queue_empty_lockless - check if a queue is empty
+ *     @list: queue head
+ *
+ *     Returns true if the queue is empty, false otherwise.
+ *     This variant can be used in lockless contexts.
+ */
+static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
+{
+       return READ_ONCE(list->next) == (const struct sk_buff *) list;
+}
+
+
 /**
  *     skb_queue_is_last - check if skb is the last entry in the queue
  *     @list: queue head
@@ -1847,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk,
                                struct sk_buff *prev, struct sk_buff *next,
                                struct sk_buff_head *list)
 {
-       newsk->next = next;
-       newsk->prev = prev;
-       next->prev  = prev->next = newsk;
+       /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+       WRITE_ONCE(newsk->next, next);
+       WRITE_ONCE(newsk->prev, prev);
+       WRITE_ONCE(next->prev, newsk);
+       WRITE_ONCE(prev->next, newsk);
        list->qlen++;
 }
 
@@ -1860,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list,
        struct sk_buff *first = list->next;
        struct sk_buff *last = list->prev;
 
-       first->prev = prev;
-       prev->next = first;
+       WRITE_ONCE(first->prev, prev);
+       WRITE_ONCE(prev->next, first);
 
-       last->next = next;
-       next->prev = last;
+       WRITE_ONCE(last->next, next);
+       WRITE_ONCE(next->prev, last);
 }
 
 /**
@@ -2005,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
        next       = skb->next;
        prev       = skb->prev;
        skb->next  = skb->prev = NULL;
-       next->prev = prev;
-       prev->next = next;
+       WRITE_ONCE(next->prev, prev);
+       WRITE_ONCE(prev->next, next);
 }
 
 /**
index fc0bed5..4049d97 100644 (file)
@@ -263,7 +263,7 @@ struct ucred {
 #define PF_MAX         AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
-#define SOMAXCONN      128
+#define SOMAXCONN      4096
 
 /* Flags we can use with send/ and recv.
    Added those for 1003.1g not all are supported yet
index 87d27e1..d796058 100644 (file)
@@ -64,6 +64,11 @@ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
        return 0;
 }
 
+static inline void xprt_destroy_backchannel(struct rpc_xprt *xprt,
+                                           unsigned int max_reqs)
+{
+}
+
 static inline bool svc_is_backchannel(const struct svc_rqst *rqstp)
 {
        return false;
index 5420817..fa7ee50 100644 (file)
@@ -196,9 +196,9 @@ struct bin_attribute {
        .size   = _size,                                                \
 }
 
-#define __BIN_ATTR_WO(_name) {                                         \
+#define __BIN_ATTR_WO(_name, _size) {                                  \
        .attr   = { .name = __stringify(_name), .mode = 0200 },         \
-       .store  = _name##_store,                                        \
+       .write  = _name##_write,                                        \
        .size   = _size,                                                \
 }
 
index 4c7781f..07875cc 100644 (file)
@@ -48,7 +48,6 @@ struct virtio_vsock_sock {
 
 struct virtio_vsock_pkt {
        struct virtio_vsock_hdr hdr;
-       struct work_struct work;
        struct list_head list;
        /* socket refcnt not held, only use for cancellation */
        struct vsock_sock *vsk;
index b18c699..0495bdc 100644 (file)
@@ -41,6 +41,7 @@ struct tc_action {
        struct gnet_stats_queue __percpu *cpu_qstats;
        struct tc_cookie        __rcu *act_cookie;
        struct tcf_chain        __rcu *goto_chain;
+       u32                     tcfa_flags;
 };
 #define tcf_index      common.tcfa_index
 #define tcf_refcnt     common.tcfa_refcnt
@@ -94,7 +95,7 @@ struct tc_action_ops {
        int     (*init)(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **act, int ovr,
                        int bind, bool rtnl_held, struct tcf_proto *tp,
-                       struct netlink_ext_ack *extack);
+                       u32 flags, struct netlink_ext_ack *extack);
        int     (*walk)(struct net *, struct sk_buff *,
                        struct netlink_callback *, int,
                        const struct tc_action_ops *,
@@ -154,7 +155,11 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                   struct tc_action **a, const struct tc_action_ops *ops,
-                  int bind, bool cpustats);
+                  int bind, bool cpustats, u32 flags);
+int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
+                             struct nlattr *est, struct tc_action **a,
+                             const struct tc_action_ops *ops, int bind,
+                             u32 flags);
 void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
 
 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
@@ -186,6 +191,43 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
                    int ref);
 int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+
+static inline void tcf_action_update_bstats(struct tc_action *a,
+                                           struct sk_buff *skb)
+{
+       if (likely(a->cpu_bstats)) {
+               bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+               return;
+       }
+       spin_lock(&a->tcfa_lock);
+       bstats_update(&a->tcfa_bstats, skb);
+       spin_unlock(&a->tcfa_lock);
+}
+
+static inline void tcf_action_inc_drop_qstats(struct tc_action *a)
+{
+       if (likely(a->cpu_qstats)) {
+               qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
+               return;
+       }
+       spin_lock(&a->tcfa_lock);
+       qstats_drop_inc(&a->tcfa_qstats);
+       spin_unlock(&a->tcfa_lock);
+}
+
+static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
+{
+       if (likely(a->cpu_qstats)) {
+               qstats_overlimit_inc(this_cpu_ptr(a->cpu_qstats));
+               return;
+       }
+       spin_lock(&a->tcfa_lock);
+       qstats_overlimit_inc(&a->tcfa_qstats);
+       spin_unlock(&a->tcfa_lock);
+}
+
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
+                            bool drop, bool hw);
 int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
 
 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
index f7fe456..1afc125 100644 (file)
@@ -203,7 +203,6 @@ struct bonding {
        struct   slave __rcu *primary_slave;
        struct   bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
        bool     force_primary;
-       u32      nest_level;
        s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
        int     (*recv_probe)(const struct sk_buff *, struct bonding *,
                              struct slave *);
@@ -239,6 +238,7 @@ struct bonding {
        struct   dentry *debug_dir;
 #endif /* CONFIG_DEBUG_FS */
        struct rtnl_link_stats64 bond_stats;
+       struct lock_class_key stats_lock_key;
 };
 
 #define bond_slave_get_rcu(dev) \
index 127a5c4..86e0283 100644 (file)
@@ -122,7 +122,7 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
 static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
 {
 #ifdef CONFIG_NET_RX_BUSY_POLL
-       sk->sk_napi_id = skb->napi_id;
+       WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
 #endif
        sk_rx_queue_set(sk, skb);
 }
@@ -132,8 +132,8 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
                                        const struct sk_buff *skb)
 {
 #ifdef CONFIG_NET_RX_BUSY_POLL
-       if (!sk->sk_napi_id)
-               sk->sk_napi_id = skb->napi_id;
+       if (!READ_ONCE(sk->sk_napi_id))
+               WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
 #endif
 }
 
index 4ab2c49..ab6850b 100644 (file)
@@ -6593,7 +6593,7 @@ struct cfg80211_roam_info {
  * time it is accessed in __cfg80211_roamed() due to delay in scheduling
  * rdev->event_work. In case of any failures, the reference is released
  * either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be
- * released while diconneting from the current bss.
+ * released while disconnecting from the current bss.
  */
 void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
                     gfp_t gfp);
index e3c14dc..e4c697b 100644 (file)
@@ -123,10 +123,8 @@ struct dsa_switch_tree {
        /* List of switch ports */
        struct list_head ports;
 
-       /*
-        * Data for the individual switch chips.
-        */
-       struct dsa_switch       *ds[DSA_MAX_SWITCHES];
+       /* List of DSA links composing the routing table */
+       struct list_head rtable;
 };
 
 /* TC matchall action types, only mirroring for now */
@@ -214,6 +212,17 @@ struct dsa_port {
        bool setup;
 };
 
+/* TODO: ideally DSA ports would have a single dp->link_dp member,
+ * and no dst->rtable nor this struct dsa_link would be needed,
+ * but this would require some more complex tree walking,
+ * so keep it stupid at the moment and list them all.
+ */
+struct dsa_link {
+       struct dsa_port *dp;
+       struct dsa_port *link_dp;
+       struct list_head list;
+};
+
 struct dsa_switch {
        bool setup;
 
@@ -244,13 +253,6 @@ struct dsa_switch {
         */
        const struct dsa_switch_ops     *ops;
 
-       /*
-        * An array of which element [a] indicates which port on this
-        * switch should be used to send packets to that are destined
-        * for switch a. Can be NULL if there is only one switch chip.
-        */
-       s8              rtable[DSA_MAX_SWITCHES];
-
        /*
         * Slave mii_bus and devices for the individual ports.
         */
@@ -283,13 +285,13 @@ struct dsa_switch {
 static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
 {
        struct dsa_switch_tree *dst = ds->dst;
-       struct dsa_port *dp = NULL;
+       struct dsa_port *dp;
 
        list_for_each_entry(dp, &dst->ports, list)
                if (dp->ds == ds && dp->index == p)
-                       break;
+                       return dp;
 
-       return dp;
+       return NULL;
 }
 
 static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
@@ -324,6 +326,19 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
        return mask;
 }
 
+/* Return the local port used to reach an arbitrary switch device */
+static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
+{
+       struct dsa_switch_tree *dst = ds->dst;
+       struct dsa_link *dl;
+
+       list_for_each_entry(dl, &dst->rtable, list)
+               if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
+                       return dl->dp->index;
+
+       return ds->num_ports;
+}
+
 /* Return the local port used to reach an arbitrary switch port */
 static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
                                            int port)
@@ -331,7 +346,7 @@ static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
        if (device == ds->index)
                return port;
        else
-               return ds->rtable[device];
+               return dsa_routing_port(ds, device);
 }
 
 /* Return the local port used to reach the dedicated CPU port */
@@ -550,6 +565,29 @@ struct dsa_switch_ops {
         */
        netdev_tx_t (*port_deferred_xmit)(struct dsa_switch *ds, int port,
                                          struct sk_buff *skb);
+       /* Devlink parameters */
+       int     (*devlink_param_get)(struct dsa_switch *ds, u32 id,
+                                    struct devlink_param_gset_ctx *ctx);
+       int     (*devlink_param_set)(struct dsa_switch *ds, u32 id,
+                                    struct devlink_param_gset_ctx *ctx);
+};
+
+#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes)           \
+       DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes,                \
+                            dsa_devlink_param_get, dsa_devlink_param_set, NULL)
+
+int dsa_devlink_param_get(struct devlink *dl, u32 id,
+                         struct devlink_param_gset_ctx *ctx);
+int dsa_devlink_param_set(struct devlink *dl, u32 id,
+                         struct devlink_param_gset_ctx *ctx);
+int dsa_devlink_params_register(struct dsa_switch *ds,
+                               const struct devlink_param *params,
+                               size_t params_count);
+void dsa_devlink_params_unregister(struct dsa_switch *ds,
+                                  const struct devlink_param *params,
+                                  size_t params_count);
+struct dsa_devlink_priv {
+       struct dsa_switch *ds;
 };
 
 struct dsa_switch_driver {
index 90bd210..b1063db 100644 (file)
@@ -4,8 +4,11 @@
 
 #include <linux/types.h>
 #include <linux/in6.h>
+#include <linux/siphash.h>
 #include <uapi/linux/if_ether.h>
 
+struct sk_buff;
+
 /**
  * struct flow_dissector_key_control:
  * @thoff: Transport header offset
@@ -156,19 +159,16 @@ struct flow_dissector_key_ports {
 
 /**
  * flow_dissector_key_icmp:
- *     @ports: type and code of ICMP header
- *             icmp: ICMP type (high) and code (low)
  *             type: ICMP type
  *             code: ICMP code
+ *             id:   session identifier
  */
 struct flow_dissector_key_icmp {
-       union {
-               __be16 icmp;
-               struct {
-                       u8 type;
-                       u8 code;
-               };
+       struct {
+               u8 type;
+               u8 code;
        };
+       u16 id;
 };
 
 /**
@@ -276,12 +276,14 @@ struct flow_keys_basic {
 struct flow_keys {
        struct flow_dissector_key_control control;
 #define FLOW_KEYS_HASH_START_FIELD basic
-       struct flow_dissector_key_basic basic;
+       struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
        struct flow_dissector_key_tags tags;
        struct flow_dissector_key_vlan vlan;
        struct flow_dissector_key_vlan cvlan;
        struct flow_dissector_key_keyid keyid;
        struct flow_dissector_key_ports ports;
+       struct flow_dissector_key_icmp icmp;
+       /* 'addrs' must be the last member */
        struct flow_dissector_key_addrs addrs;
 };
 
@@ -315,6 +317,9 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys)
 }
 
 u32 flow_hash_from_keys(struct flow_keys *keys);
+void skb_flow_get_icmp_tci(const struct sk_buff *skb,
+                          struct flow_dissector_key_icmp *key_icmp,
+                          void *data, int thoff, int hlen);
 
 static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector,
                                      enum flow_dissector_key_id key_id)
index d126b5d..2ad85e6 100644 (file)
@@ -69,7 +69,7 @@ struct fq {
        struct list_head backlogs;
        spinlock_t lock;
        u32 flows_cnt;
-       u32 perturbation;
+       siphash_key_t   perturbation;
        u32 limit;
        u32 memory_limit;
        u32 memory_usage;
index be40a4b..107c0d7 100644 (file)
@@ -108,7 +108,7 @@ begin:
 
 static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
 {
-       u32 hash = skb_get_hash_perturb(skb, fq->perturbation);
+       u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
 
        return reciprocal_scale(hash, fq->flows_cnt);
 }
@@ -308,7 +308,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
        INIT_LIST_HEAD(&fq->backlogs);
        spin_lock_init(&fq->lock);
        fq->flows_cnt = max_t(u32, flows_cnt, 1);
-       fq->perturbation = prandom_u32();
+       get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
        fq->quantum = 300;
        fq->limit = 8192;
        fq->memory_limit = 16 << 20; /* 16 MBytes */
index 81643cf..c814446 100644 (file)
@@ -21,9 +21,13 @@ void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf);
 int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
 int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num);
 #else
-void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
-int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; }
-int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num)
+static inline void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
+
+static inline int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
+{ return 0; }
+
+static inline int hwbm_pool_add(struct hwbm_pool *bm_pool,
+                               unsigned int buf_num)
 { return 0; }
 #endif /* CONFIG_HWBM */
 #endif /* _HWBM_H */
index 95bb77f..a2c61c3 100644 (file)
@@ -185,7 +185,7 @@ static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
 }
 
 struct ip_frag_state {
-       struct iphdr    *iph;
+       bool            DF;
        unsigned int    hlen;
        unsigned int    ll_rs;
        unsigned int    mtu;
@@ -196,7 +196,7 @@ struct ip_frag_state {
 };
 
 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
-                 unsigned int mtu, struct ip_frag_state *state);
+                 unsigned int mtu, bool DF, struct ip_frag_state *state);
 struct sk_buff *ip_frag_next(struct sk_buff *skb,
                             struct ip_frag_state *state);
 
index 93e7a25..83be2d9 100644 (file)
@@ -889,6 +889,7 @@ struct netns_ipvs {
        struct delayed_work     defense_work;   /* Work handler */
        int                     drop_rate;
        int                     drop_counter;
+       int                     old_secure_tcp;
        atomic_t                dropentry;
        /* locks in ctl.c */
        spinlock_t              dropentry_lock;  /* drop entry handling */
index c5d6829..b8ceaf0 100644 (file)
@@ -346,7 +346,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
 #define __net_initconst        __initconst
 #endif
 
-int peernet2id_alloc(struct net *net, struct net *peer);
+int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
 int peernet2id(struct net *net, struct net *peer);
 bool peernet_has_id(struct net *net, struct net *peer);
 struct net *get_net_ns_by_id(struct net *net, int id);
index 637548d..a8b0a9a 100644 (file)
@@ -1286,17 +1286,9 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
                          struct mini_Qdisc __rcu **p_miniq);
 
-static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
 {
-       struct gnet_stats_queue *stats = res->qstats;
-       int ret;
-
-       if (res->ingress)
-               ret = netif_receive_skb(skb);
-       else
-               ret = dev_queue_xmit(skb);
-       if (ret && stats)
-               qstats_overlimit_inc(res->qstats);
+       return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
 }
 
 #endif
index 380312c..ac6042d 100644 (file)
@@ -66,7 +66,6 @@
 #include <net/checksum.h>
 #include <net/tcp_states.h>
 #include <linux/net_tstamp.h>
-#include <net/smc.h>
 #include <net/l3mdev.h>
 
 /*
@@ -954,8 +953,8 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
 {
        int cpu = raw_smp_processor_id();
 
-       if (unlikely(sk->sk_incoming_cpu != cpu))
-               sk->sk_incoming_cpu = cpu;
+       if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
+               WRITE_ONCE(sk->sk_incoming_cpu, cpu);
 }
 
 static inline void sock_rps_record_flow_hash(__u32 hash)
@@ -2242,12 +2241,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
  * sk_page_frag - return an appropriate page_frag
  * @sk: socket
  *
- * If socket allocation mode allows current thread to sleep, it means its
- * safe to use the per task page_frag instead of the per socket one.
+ * Use the per task page_frag instead of the per socket one for
+ * optimization when we know that we're in the normal context and owns
+ * everything that's associated with %current.
+ *
+ * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
+ * inside other socket operations and end up recursing into sk_page_frag()
+ * while it's already in use.
  */
 static inline struct page_frag *sk_page_frag(struct sock *sk)
 {
-       if (gfpflags_allow_blocking(sk->sk_allocation))
+       if (gfpflags_normal_context(sk->sk_allocation))
                return &current->task_frag;
 
        return &sk->sk_frag;
index 335283d..373aadc 100644 (file)
@@ -197,6 +197,7 @@ struct vxlan_rdst {
        u8                       offloaded:1;
        __be32                   remote_vni;
        u32                      remote_ifindex;
+       struct net_device        *remote_dev;
        struct list_head         list;
        struct rcu_head          rcu;
        struct dst_cache         dst_cache;
index 6a47ba8..e7e733a 100644 (file)
@@ -366,7 +366,7 @@ struct ib_tm_caps {
 
 struct ib_cq_init_attr {
        unsigned int    cqe;
-       int             comp_vector;
+       u32             comp_vector;
        u32             flags;
 };
 
index aa31c05..cfe00e0 100644 (file)
@@ -32,6 +32,7 @@
 #define __FSL_QMAN_H
 
 #include <linux/bitops.h>
+#include <linux/device.h>
 
 /* Hardware constants */
 #define QM_CHANNEL_SWPORTAL0 0
@@ -914,6 +915,16 @@ u16 qman_affine_channel(int cpu);
  */
 struct qman_portal *qman_get_affine_portal(int cpu);
 
+/**
+ * qman_start_using_portal - register a device link for the portal user
+ * @p: the portal that will be in use
+ * @dev: the device that will use the portal
+ *
+ * Makes sure that the devices that use the portal are unbound when the
+ * portal is unbound
+ */
+int qman_start_using_portal(struct qman_portal *p, struct device *dev);
+
 /**
  * qman_p_poll_dqrr - process DQRR (fast-path) entries
  * @limit: the maximum number of DQRR entries to process
index 985a5f5..31f76b6 100644 (file)
@@ -135,9 +135,9 @@ int asoc_simple_init_priv(struct asoc_simple_priv *priv,
                               struct link_info *li);
 
 #ifdef DEBUG
-inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
-                                 char *name,
-                                 struct asoc_simple_dai *dai)
+static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
+                                        char *name,
+                                        struct asoc_simple_dai *dai)
 {
        struct device *dev = simple_priv_to_dev(priv);
 
@@ -167,7 +167,7 @@ inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
                dev_dbg(dev, "%s clk %luHz\n", name, clk_get_rate(dai->clk));
 }
 
-inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
+static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
 {
        struct snd_soc_card *card = simple_priv_to_card(priv);
        struct device *dev = simple_priv_to_dev(priv);
index 8ea9664..6b20005 100644 (file)
@@ -95,16 +95,16 @@ TRACE_EVENT(fdb_delete,
 TRACE_EVENT(br_fdb_update,
 
        TP_PROTO(struct net_bridge *br, struct net_bridge_port *source,
-                const unsigned char *addr, u16 vid, bool added_by_user),
+                const unsigned char *addr, u16 vid, unsigned long flags),
 
-       TP_ARGS(br, source, addr, vid, added_by_user),
+       TP_ARGS(br, source, addr, vid, flags),
 
        TP_STRUCT__entry(
                __string(br_dev, br->dev->name)
                __string(dev, source->dev->name)
                __array(unsigned char, addr, ETH_ALEN)
                __field(u16, vid)
-               __field(bool, added_by_user)
+               __field(unsigned long, flags)
        ),
 
        TP_fast_assign(
@@ -112,14 +112,14 @@ TRACE_EVENT(br_fdb_update,
                __assign_str(dev, source->dev->name);
                memcpy(__entry->addr, addr, ETH_ALEN);
                __entry->vid = vid;
-               __entry->added_by_user = added_by_user;
+               __entry->flags = flags;
        ),
 
-       TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u added_by_user %d",
+       TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u flags 0x%lx",
                  __get_str(br_dev), __get_str(dev), __entry->addr[0],
                  __entry->addr[1], __entry->addr[2], __entry->addr[3],
                  __entry->addr[4], __entry->addr[5], __entry->vid,
-                 __entry->added_by_user)
+                 __entry->flags)
 );
 
 
index 5df604d..75ae189 100644 (file)
@@ -1688,6 +1688,7 @@ TRACE_EVENT(qgroup_update_reserve,
                __entry->qgid           = qgroup->qgroupid;
                __entry->cur_reserved   = qgroup->rsv.values[type];
                __entry->diff           = diff;
+               __entry->type           = type;
        ),
 
        TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
@@ -1710,6 +1711,7 @@ TRACE_EVENT(qgroup_meta_reserve,
        TP_fast_assign_btrfs(root->fs_info,
                __entry->refroot        = root->root_key.objectid;
                __entry->diff           = diff;
+               __entry->type           = type;
        ),
 
        TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
@@ -1726,7 +1728,6 @@ TRACE_EVENT(qgroup_meta_convert,
        TP_STRUCT__entry_btrfs(
                __field(        u64,    refroot                 )
                __field(        s64,    diff                    )
-               __field(        int,    type                    )
        ),
 
        TP_fast_assign_btrfs(root->fs_info,
index 69df19a..a791a94 100644 (file)
@@ -286,7 +286,7 @@ struct dcbmsg {
  * @DCB_CMD_GNUMTCS: get the number of traffic classes currently supported
  * @DCB_CMD_SNUMTCS: set the number of traffic classes
  * @DCB_CMD_GBCN: set backward congestion notification configuration
- * @DCB_CMD_SBCN: get backward congestion notification configration.
+ * @DCB_CMD_SBCN: get backward congestion notification configuration.
  * @DCB_CMD_GAPP: get application protocol configuration
  * @DCB_CMD_SAPP: set application protocol configuration
  * @DCB_CMD_IEEE_SET: set IEEE 802.1Qaz configuration
index 802b037..373cada 100644 (file)
  *
  * Protocol changelog:
  *
+ * 7.1:
+ *  - add the following messages:
+ *      FUSE_SETATTR, FUSE_SYMLINK, FUSE_MKNOD, FUSE_MKDIR, FUSE_UNLINK,
+ *      FUSE_RMDIR, FUSE_RENAME, FUSE_LINK, FUSE_OPEN, FUSE_READ, FUSE_WRITE,
+ *      FUSE_RELEASE, FUSE_FSYNC, FUSE_FLUSH, FUSE_SETXATTR, FUSE_GETXATTR,
+ *      FUSE_LISTXATTR, FUSE_REMOVEXATTR, FUSE_OPENDIR, FUSE_READDIR,
+ *      FUSE_RELEASEDIR
+ *  - add padding to messages to accommodate 32-bit servers on 64-bit kernels
+ *
+ * 7.2:
+ *  - add FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE flags
+ *  - add FUSE_FSYNCDIR message
+ *
+ * 7.3:
+ *  - add FUSE_ACCESS message
+ *  - add FUSE_CREATE message
+ *  - add filehandle to fuse_setattr_in
+ *
+ * 7.4:
+ *  - add frsize to fuse_kstatfs
+ *  - clean up request size limit checking
+ *
+ * 7.5:
+ *  - add flags and max_write to fuse_init_out
+ *
+ * 7.6:
+ *  - add max_readahead to fuse_init_in and fuse_init_out
+ *
+ * 7.7:
+ *  - add FUSE_INTERRUPT message
+ *  - add POSIX file lock support
+ *
+ * 7.8:
+ *  - add lock_owner and flags fields to fuse_release_in
+ *  - add FUSE_BMAP message
+ *  - add FUSE_DESTROY message
+ *
  * 7.9:
  *  - new fuse_getattr_in input argument of GETATTR
  *  - add lk_flags in fuse_lk_in
index a6aa466..c6ad22f 100644 (file)
@@ -16,9 +16,14 @@ enum {
        TCA_ACT_STATS,
        TCA_ACT_PAD,
        TCA_ACT_COOKIE,
+       TCA_ACT_FLAGS,
        __TCA_ACT_MAX
 };
 
+#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for
+                                        * actions stats.
+                                        */
+
 #define TCA_ACT_MAX __TCA_ACT_MAX
 #define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
 #define TCA_ACT_MAX_PRIO 32
index 7df026e..76421b8 100644 (file)
@@ -191,6 +191,7 @@ struct sockaddr_tipc {
 #define TIPC_GROUP_JOIN         135     /* Takes struct tipc_group_req* */
 #define TIPC_GROUP_LEAVE        136     /* No argument */
 #define TIPC_SOCK_RECVQ_USED    137     /* Default: none (read only) */
+#define TIPC_NODELAY            138     /* Default: false */
 
 /*
  * Flag values
index df82d5a..97e37d8 100644 (file)
@@ -502,7 +502,7 @@ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
        return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
 }
 
-void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 {
        int i;
 
index d27f3b6..3867864 100644 (file)
@@ -128,7 +128,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
 
                if (!dtab->n_buckets) /* Overflow check */
                        return -EINVAL;
-               cost += sizeof(struct hlist_head) * dtab->n_buckets;
+               cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
        }
 
        /* if map size is larger than memlock limit, reject it */
@@ -719,6 +719,32 @@ const struct bpf_map_ops dev_map_hash_ops = {
        .map_check_btf = map_check_no_btf,
 };
 
+static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
+                                      struct net_device *netdev)
+{
+       unsigned long flags;
+       u32 i;
+
+       spin_lock_irqsave(&dtab->index_lock, flags);
+       for (i = 0; i < dtab->n_buckets; i++) {
+               struct bpf_dtab_netdev *dev;
+               struct hlist_head *head;
+               struct hlist_node *next;
+
+               head = dev_map_index_hash(dtab, i);
+
+               hlist_for_each_entry_safe(dev, next, head, index_hlist) {
+                       if (netdev != dev->dev)
+                               continue;
+
+                       dtab->items--;
+                       hlist_del_rcu(&dev->index_hlist);
+                       call_rcu(&dev->rcu, __dev_map_entry_free);
+               }
+       }
+       spin_unlock_irqrestore(&dtab->index_lock, flags);
+}
+
 static int dev_map_notification(struct notifier_block *notifier,
                                ulong event, void *ptr)
 {
@@ -735,6 +761,11 @@ static int dev_map_notification(struct notifier_block *notifier,
                 */
                rcu_read_lock();
                list_for_each_entry_rcu(dtab, &dev_map_list, list) {
+                       if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+                               dev_map_hash_remove_netdev(dtab, netdev);
+                               continue;
+                       }
+
                        for (i = 0; i < dtab->map.max_entries; i++) {
                                struct bpf_dtab_netdev *dev, *odev;
 
index 985d01c..6d9ce95 100644 (file)
@@ -1327,24 +1327,32 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 {
        struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
 
+       kvfree(aux->func_info);
        free_used_maps(aux);
        bpf_prog_uncharge_memlock(aux->prog);
        security_bpf_prog_free(aux);
        bpf_prog_free(aux->prog);
 }
 
+static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
+{
+       bpf_prog_kallsyms_del_all(prog);
+       btf_put(prog->aux->btf);
+       bpf_prog_free_linfo(prog);
+
+       if (deferred)
+               call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+       else
+               __bpf_prog_put_rcu(&prog->aux->rcu);
+}
+
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
                perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
-               bpf_prog_kallsyms_del_all(prog);
-               btf_put(prog->aux->btf);
-               kvfree(prog->aux->func_info);
-               bpf_prog_free_linfo(prog);
-
-               call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+               __bpf_prog_put_noref(prog, true);
        }
 }
 
@@ -1756,11 +1764,12 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
        return err;
 
 free_used_maps:
-       bpf_prog_free_linfo(prog);
-       kvfree(prog->aux->func_info);
-       btf_put(prog->aux->btf);
-       bpf_prog_kallsyms_del_subprogs(prog);
-       free_used_maps(prog->aux);
+       /* In case we have subprogs, we need to wait for a grace
+        * period before we can tear down JIT memory since symbols
+        * are already exposed under kallsyms.
+        */
+       __bpf_prog_put_noref(prog, prog->aux->func_cnt);
+       return err;
 free_prog:
        bpf_prog_uncharge_memlock(prog);
 free_prog_sec:
index c52bc91..c87ee64 100644 (file)
@@ -798,7 +798,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
                    cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
                        continue;
 
-               if (is_sched_load_balance(cp))
+               if (is_sched_load_balance(cp) &&
+                   !cpumask_empty(cp->effective_cpus))
                        csa[csn++] = cp;
 
                /* skip @cp's subtree if not a partition root */
index 9ec0b0b..aec8dba 100644 (file)
@@ -5607,8 +5607,10 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                perf_pmu_output_stop(event);
 
                /* now it's safe to free the pages */
-               atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
-               atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
+               if (!rb->aux_mmap_locked)
+                       atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
+               else
+                       atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
 
                /* this has to be the last one */
                rb_free_aux(rb);
@@ -6947,7 +6949,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
 static int __perf_pmu_output_stop(void *info)
 {
        struct perf_event *event = info;
-       struct pmu *pmu = event->pmu;
+       struct pmu *pmu = event->ctx->pmu;
        struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
        struct remote_output ro = {
                .rb     = event->rb,
@@ -10633,7 +10635,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
 
        attr->size = size;
 
-       if (attr->__reserved_1)
+       if (attr->__reserved_1 || attr->__reserved_2)
                return -EINVAL;
 
        if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
index aff79e4..5a0fc0b 100755 (executable)
@@ -71,10 +71,13 @@ done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1
 find $cpio_dir -type f -print0 |
        xargs -0 -P8 -n1 perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;'
 
-# Create archive and try to normalize metadata for reproducibility
-tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
-    --owner=0 --group=0 --sort=name --numeric-owner \
-    -Jcf $tarfile -C $cpio_dir/ . > /dev/null
+# Create archive and try to normalize metadata for reproducibility.
+# For compatibility with older versions of tar, files are fed to tar
+# pre-sorted, as --sort=name might not be available.
+find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
+    tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+    --owner=0 --group=0 --numeric-owner --no-recursion \
+    -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null
 
 echo "$src_files_md5" >  kernel/kheaders.md5
 echo "$obj_files_md5" >> kernel/kheaders.md5
index 9568a2f..04e83fd 100644 (file)
@@ -650,3 +650,243 @@ static int __init pm_qos_power_init(void)
 }
 
 late_initcall(pm_qos_power_init);
+
+/* Definitions related to the frequency QoS below. */
+
+/**
+ * freq_constraints_init - Initialize frequency QoS constraints.
+ * @qos: Frequency QoS constraints to initialize.
+ */
+void freq_constraints_init(struct freq_constraints *qos)
+{
+       struct pm_qos_constraints *c;
+
+       c = &qos->min_freq;
+       plist_head_init(&c->list);
+       c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->type = PM_QOS_MAX;
+       c->notifiers = &qos->min_freq_notifiers;
+       BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
+
+       c = &qos->max_freq;
+       plist_head_init(&c->list);
+       c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->type = PM_QOS_MIN;
+       c->notifiers = &qos->max_freq_notifiers;
+       BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
+}
+
+/**
+ * freq_qos_read_value - Get frequency QoS constraint for a given list.
+ * @qos: Constraints to evaluate.
+ * @type: QoS request type.
+ */
+s32 freq_qos_read_value(struct freq_constraints *qos,
+                       enum freq_qos_req_type type)
+{
+       s32 ret;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = IS_ERR_OR_NULL(qos) ?
+                       FREQ_QOS_MIN_DEFAULT_VALUE :
+                       pm_qos_read_value(&qos->min_freq);
+               break;
+       case FREQ_QOS_MAX:
+               ret = IS_ERR_OR_NULL(qos) ?
+                       FREQ_QOS_MAX_DEFAULT_VALUE :
+                       pm_qos_read_value(&qos->max_freq);
+               break;
+       default:
+               WARN_ON(1);
+               ret = 0;
+       }
+
+       return ret;
+}
+
+/**
+ * freq_qos_apply - Add/modify/remove frequency QoS request.
+ * @req: Constraint request to apply.
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
+ */
+static int freq_qos_apply(struct freq_qos_request *req,
+                         enum pm_qos_req_action action, s32 value)
+{
+       int ret;
+
+       switch(req->type) {
+       case FREQ_QOS_MIN:
+               ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
+                                          action, value);
+               break;
+       case FREQ_QOS_MAX:
+               ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
+                                          action, value);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+/**
+ * freq_qos_add_request - Insert new frequency QoS request into a given list.
+ * @qos: Constraints to update.
+ * @req: Preallocated request object.
+ * @type: Request type.
+ * @value: Request value.
+ *
+ * Insert a new entry into the @qos list of requests, recompute the effective
+ * QoS constraint value for that list and initialize the @req object.  The
+ * caller needs to save that object for later use in updates and removal.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_add_request(struct freq_constraints *qos,
+                        struct freq_qos_request *req,
+                        enum freq_qos_req_type type, s32 value)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !req)
+               return -EINVAL;
+
+       if (WARN(freq_qos_request_active(req),
+                "%s() called for active request\n", __func__))
+               return -EINVAL;
+
+       req->qos = qos;
+       req->type = type;
+       ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
+       if (ret < 0) {
+               req->qos = NULL;
+               req->type = 0;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_add_request);
+
+/**
+ * freq_qos_update_request - Modify existing frequency QoS request.
+ * @req: Request to modify.
+ * @new_value: New request value.
+ *
+ * Update an existing frequency QoS request along with the effective constraint
+ * value for the list of requests it belongs to.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
+{
+       if (!req)
+               return -EINVAL;
+
+       if (WARN(!freq_qos_request_active(req),
+                "%s() called for unknown object\n", __func__))
+               return -EINVAL;
+
+       if (req->pnode.prio == new_value)
+               return 0;
+
+       return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
+}
+EXPORT_SYMBOL_GPL(freq_qos_update_request);
+
+/**
+ * freq_qos_remove_request - Remove frequency QoS request from its list.
+ * @req: Request to remove.
+ *
+ * Remove the given frequency QoS request from the list of constraints it
+ * belongs to and recompute the effective constraint value for that list.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_remove_request(struct freq_qos_request *req)
+{
+       if (!req)
+               return -EINVAL;
+
+       if (WARN(!freq_qos_request_active(req),
+                "%s() called for unknown object\n", __func__))
+               return -EINVAL;
+
+       return freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+}
+EXPORT_SYMBOL_GPL(freq_qos_remove_request);
+
+/**
+ * freq_qos_add_notifier - Add frequency QoS change notifier.
+ * @qos: List of requests to add the notifier to.
+ * @type: Request type.
+ * @notifier: Notifier block to add.
+ */
+int freq_qos_add_notifier(struct freq_constraints *qos,
+                         enum freq_qos_req_type type,
+                         struct notifier_block *notifier)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !notifier)
+               return -EINVAL;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
+                                                      notifier);
+               break;
+       case FREQ_QOS_MAX:
+               ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
+                                                      notifier);
+               break;
+       default:
+               WARN_ON(1);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
+
+/**
+ * freq_qos_remove_notifier - Remove frequency QoS change notifier.
+ * @qos: List of requests to remove the notifier from.
+ * @type: Request type.
+ * @notifier: Notifier block to remove.
+ */
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+                            enum freq_qos_req_type type,
+                            struct notifier_block *notifier)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !notifier)
+               return -EINVAL;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
+                                                        notifier);
+               break;
+       case FREQ_QOS_MAX:
+               ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
+                                                        notifier);
+               break;
+       default:
+               WARN_ON(1);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);
index b5667a2..49b835f 100644 (file)
@@ -1948,7 +1948,7 @@ next_level:
 static int
 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
 {
-       enum s_alloc alloc_state;
+       enum s_alloc alloc_state = sa_none;
        struct sched_domain *sd;
        struct s_data d;
        struct rq *rq = NULL;
@@ -1956,6 +1956,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        struct sched_domain_topology_level *tl_asym;
        bool has_asym = false;
 
+       if (WARN_ON(cpumask_empty(cpu_map)))
+               goto error;
+
        alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
        if (alloc_state != sa_rootdomain)
                goto error;
@@ -2026,7 +2029,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        rcu_read_unlock();
 
        if (has_asym)
-               static_branch_enable_cpuslocked(&sched_asym_cpucapacity);
+               static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
 
        if (rq && sched_debug_enabled) {
                pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
@@ -2121,8 +2124,12 @@ int sched_init_domains(const struct cpumask *cpu_map)
  */
 static void detach_destroy_domains(const struct cpumask *cpu_map)
 {
+       unsigned int cpu = cpumask_any(cpu_map);
        int i;
 
+       if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
+               static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
+
        rcu_read_lock();
        for_each_cpu(i, cpu_map)
                cpu_attach_domain(NULL, &def_root_domain, i);
index c7031a2..998d50e 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2010          SUSE Linux Products GmbH
  * Copyright (C) 2010          Tejun Heo <tj@kernel.org>
  */
+#include <linux/compiler.h>
 #include <linux/completion.h>
 #include <linux/cpu.h>
 #include <linux/init.h>
@@ -167,7 +168,7 @@ static void set_state(struct multi_stop_data *msdata,
        /* Reset ack counter. */
        atomic_set(&msdata->thread_ack, msdata->num_threads);
        smp_wmb();
-       msdata->state = newstate;
+       WRITE_ONCE(msdata->state, newstate);
 }
 
 /* Last one to ack a state moves to the next state. */
@@ -186,7 +187,7 @@ void __weak stop_machine_yield(const struct cpumask *cpumask)
 static int multi_cpu_stop(void *data)
 {
        struct multi_stop_data *msdata = data;
-       enum multi_stop_state curstate = MULTI_STOP_NONE;
+       enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
        int cpu = smp_processor_id(), err = 0;
        const struct cpumask *cpumask;
        unsigned long flags;
@@ -210,8 +211,9 @@ static int multi_cpu_stop(void *data)
        do {
                /* Chill out and ensure we re-read multi_stop_state. */
                stop_machine_yield(cpumask);
-               if (msdata->state != curstate) {
-                       curstate = msdata->state;
+               newstate = READ_ONCE(msdata->state);
+               if (newstate != curstate) {
+                       curstate = newstate;
                        switch (curstate) {
                        case MULTI_STOP_DISABLE_IRQ:
                                local_irq_disable();
index 0d4dc24..6560553 100644 (file)
@@ -164,7 +164,7 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
        struct hrtimer_clock_base *base;
 
        for (;;) {
-               base = timer->base;
+               base = READ_ONCE(timer->base);
                if (likely(base != &migration_base)) {
                        raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
                        if (likely(base == timer->base))
@@ -244,7 +244,7 @@ again:
                        return base;
 
                /* See the comment in lock_hrtimer_base() */
-               timer->base = &migration_base;
+               WRITE_ONCE(timer->base, &migration_base);
                raw_spin_unlock(&base->cpu_base->lock);
                raw_spin_lock(&new_base->cpu_base->lock);
 
@@ -253,10 +253,10 @@ again:
                        raw_spin_unlock(&new_base->cpu_base->lock);
                        raw_spin_lock(&base->cpu_base->lock);
                        new_cpu_base = this_cpu_base;
-                       timer->base = base;
+                       WRITE_ONCE(timer->base, base);
                        goto again;
                }
-               timer->base = new_base;
+               WRITE_ONCE(timer->base, new_base);
        } else {
                if (new_cpu_base != this_cpu_base &&
                    hrtimer_check_target(timer, new_base)) {
index 92a4319..42d512f 100644 (file)
@@ -266,7 +266,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
 /**
  * thread_group_sample_cputime - Sample cputime for a given task
  * @tsk:       Task for which cputime needs to be started
- * @iimes:     Storage for time samples
+ * @samples:   Storage for time samples
  *
  * Called from sys_getitimer() to calculate the expiry time of an active
  * timer. That means group cputime accounting is already active. Called
@@ -1038,12 +1038,12 @@ unlock:
  * member of @pct->bases[CLK].nextevt. False otherwise
  */
 static inline bool
-task_cputimers_expired(const u64 *sample, struct posix_cputimers *pct)
+task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
 {
        int i;
 
        for (i = 0; i < CPUCLOCK_MAX; i++) {
-               if (sample[i] >= pct->bases[i].nextevt)
+               if (samples[i] >= pct->bases[i].nextevt)
                        return true;
        }
        return false;
index 142b076..dbd6905 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/seqlock.h>
 #include <linux/bitops.h>
 
+#include "timekeeping.h"
+
 /**
  * struct clock_read_data - data required to read from sched_clock()
  *
index 0892e38..a9dfa04 100644 (file)
@@ -272,9 +272,11 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
                goto out;
        }
 
+       mutex_lock(&event_mutex);
        ret = perf_trace_event_init(tp_event, p_event);
        if (ret)
                destroy_local_trace_kprobe(tp_event);
+       mutex_unlock(&event_mutex);
 out:
        kfree(func);
        return ret;
@@ -282,8 +284,10 @@ out:
 
 void perf_kprobe_destroy(struct perf_event *p_event)
 {
+       mutex_lock(&event_mutex);
        perf_trace_event_close(p_event);
        perf_trace_event_unreg(p_event);
+       mutex_unlock(&event_mutex);
 
        destroy_local_trace_kprobe(p_event->tp_event);
 }
index 57648c5..7482a14 100644 (file)
@@ -679,6 +679,8 @@ static bool synth_field_signed(char *type)
 {
        if (str_has_prefix(type, "u"))
                return false;
+       if (strcmp(type, "gfp_t") == 0)
+               return false;
 
        return true;
 }
index e630e7f..45f57fd 100644 (file)
@@ -214,9 +214,10 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
                return -1;
        }
 
-       res->tv_sec = 0;
-       res->tv_nsec = ns;
-
+       if (likely(res)) {
+               res->tv_sec = 0;
+               res->tv_nsec = ns;
+       }
        return 0;
 }
 
@@ -245,7 +246,7 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
                ret = clock_getres_fallback(clock, &ts);
 #endif
 
-       if (likely(!ret)) {
+       if (likely(!ret && res)) {
                res->tv_sec = ts.tv_sec;
                res->tv_nsec = ts.tv_nsec;
        }
index 54728d2..d4bcfd8 100644 (file)
@@ -172,7 +172,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
        if (err < 0)
                goto out_uninit_mvrp;
 
-       vlan->nest_level = dev_get_nest_level(real_dev) + 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto out_uninit_mvrp;
index 93eadf1..e5bff5c 100644 (file)
@@ -489,36 +489,6 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
        dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
-/*
- * vlan network devices have devices nesting below it, and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key vlan_netdev_xmit_lock_key;
-static struct lock_class_key vlan_netdev_addr_lock_key;
-
-static void vlan_dev_set_lockdep_one(struct net_device *dev,
-                                    struct netdev_queue *txq,
-                                    void *_subclass)
-{
-       lockdep_set_class_and_subclass(&txq->_xmit_lock,
-                                      &vlan_netdev_xmit_lock_key,
-                                      *(int *)_subclass);
-}
-
-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
-{
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &vlan_netdev_addr_lock_key,
-                                      subclass);
-       netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
-}
-
-static int vlan_dev_get_lock_subclass(struct net_device *dev)
-{
-       return vlan_dev_priv(dev)->nest_level;
-}
-
 static const struct header_ops vlan_header_ops = {
        .create  = vlan_dev_hard_header,
        .parse   = eth_header_parse,
@@ -609,8 +579,6 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
-
        vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan->vlan_pcpu_stats)
                return -ENOMEM;
@@ -812,7 +780,6 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
-       .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
        .ndo_get_iflink         = vlan_dev_get_iflink,
 };
 
index b7528e7..0ce530a 100644 (file)
@@ -668,7 +668,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
                mask |= EPOLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* writable? */
index d78938e..5b0b20e 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/pkt_sched.h>
@@ -193,14 +195,18 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        unsigned char *ogm_buff;
        u32 random_seqno;
 
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
        /* randomize initial seqno to avoid collision */
        get_random_bytes(&random_seqno, sizeof(random_seqno));
        atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
 
        hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
        ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
-       if (!ogm_buff)
+       if (!ogm_buff) {
+               mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
                return -ENOMEM;
+       }
 
        hard_iface->bat_iv.ogm_buff = ogm_buff;
 
@@ -212,35 +218,59 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        batadv_ogm_packet->reserved = 0;
        batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
 
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+
        return 0;
 }
 
 static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
 {
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
        kfree(hard_iface->bat_iv.ogm_buff);
        hard_iface->bat_iv.ogm_buff = NULL;
+
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
 }
 
 static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_ogm_packet *batadv_ogm_packet;
-       unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
+       void *ogm_buff;
 
-       batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+       ogm_buff = hard_iface->bat_iv.ogm_buff;
+       if (!ogm_buff)
+               goto unlock;
+
+       batadv_ogm_packet = ogm_buff;
        ether_addr_copy(batadv_ogm_packet->orig,
                        hard_iface->net_dev->dev_addr);
        ether_addr_copy(batadv_ogm_packet->prev_sender,
                        hard_iface->net_dev->dev_addr);
+
+unlock:
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
 }
 
 static void
 batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_ogm_packet *batadv_ogm_packet;
-       unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
+       void *ogm_buff;
 
-       batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+       ogm_buff = hard_iface->bat_iv.ogm_buff;
+       if (!ogm_buff)
+               goto unlock;
+
+       batadv_ogm_packet = ogm_buff;
        batadv_ogm_packet->ttl = BATADV_TTL;
+
+unlock:
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
 }
 
 /* when do we schedule our own ogm to be sent */
@@ -742,7 +772,11 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
        }
 }
 
-static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+/**
+ * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
+ * @hard_iface: interface whose ogm buffer should be transmitted
+ */
+static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
@@ -753,9 +787,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        u16 tvlv_len = 0;
        unsigned long send_time;
 
-       if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
-           hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
-               return;
+       lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
 
        /* the interface gets activated here to avoid race conditions between
         * the moment of activating the interface in
@@ -823,6 +855,17 @@ out:
                batadv_hardif_put(primary_if);
 }
 
+static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+{
+       if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
+           hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
+               return;
+
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+       batadv_iv_ogm_schedule_buff(hard_iface);
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+}
+
 /**
  * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface
  * @orig_node: originator which reproadcasted the OGMs directly
index dc4f743..8033f24 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/random.h>
 #include <linux/rculist.h>
@@ -256,14 +257,12 @@ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
 }
 
 /**
- * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
- * @work: work queue item
+ * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
+ * @bat_priv: the bat priv with all the soft interface information
  */
-static void batadv_v_ogm_send(struct work_struct *work)
+static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
 {
        struct batadv_hard_iface *hard_iface;
-       struct batadv_priv_bat_v *bat_v;
-       struct batadv_priv *bat_priv;
        struct batadv_ogm2_packet *ogm_packet;
        struct sk_buff *skb, *skb_tmp;
        unsigned char *ogm_buff;
@@ -271,8 +270,7 @@ static void batadv_v_ogm_send(struct work_struct *work)
        u16 tvlv_len = 0;
        int ret;
 
-       bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
-       bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+       lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
 
        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
                goto out;
@@ -363,6 +361,23 @@ out:
        return;
 }
 
+/**
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
+ * @work: work queue item
+ */
+static void batadv_v_ogm_send(struct work_struct *work)
+{
+       struct batadv_priv_bat_v *bat_v;
+       struct batadv_priv *bat_priv;
+
+       bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
+       bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+
+       mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+       batadv_v_ogm_send_softif(bat_priv);
+       mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
+}
+
 /**
  * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface
  * @work: work queue item
@@ -424,11 +439,15 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
        struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
        struct batadv_ogm2_packet *ogm_packet;
 
+       mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
        if (!bat_priv->bat_v.ogm_buff)
-               return;
+               goto unlock;
 
        ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
        ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
+
+unlock:
+       mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
 }
 
 /**
@@ -1050,6 +1069,8 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
        atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
        INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
 
+       mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
+
        return 0;
 }
 
@@ -1061,7 +1082,11 @@ void batadv_v_ogm_free(struct batadv_priv *bat_priv)
 {
        cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
 
+       mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+
        kfree(bat_priv->bat_v.ogm_buff);
        bat_priv->bat_v.ogm_buff = NULL;
        bat_priv->bat_v.ogm_buff_len = 0;
+
+       mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
 }
index c90e473..afb5228 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kref.h>
 #include <linux/limits.h>
 #include <linux/list.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
 #include <linux/rculist.h>
@@ -929,6 +930,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
        INIT_LIST_HEAD(&hard_iface->list);
        INIT_HLIST_HEAD(&hard_iface->neigh_list);
 
+       mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
        spin_lock_init(&hard_iface->neigh_list_lock);
        kref_init(&hard_iface->refcount);
 
index 9cbed6f..5ee8e9a 100644 (file)
@@ -740,36 +740,6 @@ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
        return 0;
 }
 
-/* batman-adv network devices have devices nesting below it and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key batadv_netdev_xmit_lock_key;
-static struct lock_class_key batadv_netdev_addr_lock_key;
-
-/**
- * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
- * @dev: device which owns the tx queue
- * @txq: tx queue to modify
- * @_unused: always NULL
- */
-static void batadv_set_lockdep_class_one(struct net_device *dev,
-                                        struct netdev_queue *txq,
-                                        void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
-}
-
-/**
- * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
- * @dev: network device to modify
- */
-static void batadv_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
-}
-
 /**
  * batadv_softif_init_late() - late stage initialization of soft interface
  * @dev: registered network device to modify
@@ -783,8 +753,6 @@ static int batadv_softif_init_late(struct net_device *dev)
        int ret;
        size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
 
-       batadv_set_lockdep_class(dev);
-
        bat_priv = netdev_priv(dev);
        bat_priv->soft_iface = dev;
 
index be7c02a..4d7f1ba 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/kref.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/sched.h> /* for linux/wait.h */
@@ -81,6 +82,9 @@ struct batadv_hard_iface_bat_iv {
 
        /** @ogm_seqno: OGM sequence number - used to identify each OGM */
        atomic_t ogm_seqno;
+
+       /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
+       struct mutex ogm_buff_mutex;
 };
 
 /**
@@ -1539,6 +1543,9 @@ struct batadv_priv_bat_v {
        /** @ogm_seqno: OGM sequence number - used to identify each OGM */
        atomic_t ogm_seqno;
 
+       /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
+       struct mutex ogm_buff_mutex;
+
        /** @ogm_wq: workqueue used to schedule OGM transmissions */
        struct delayed_work ogm_wq;
 };
index bb55d92..4febc82 100644 (file)
@@ -571,15 +571,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
        return err < 0 ? NET_XMIT_DROP : err;
 }
 
-static int bt_dev_init(struct net_device *dev)
-{
-       netdev_lockdep_set_classes(dev);
-
-       return 0;
-}
-
 static const struct net_device_ops netdev_ops = {
-       .ndo_init               = bt_dev_init,
        .ndo_start_xmit         = bt_xmit,
 };
 
index 94ddf19..5f508c5 100644 (file)
@@ -460,7 +460,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_state == BT_LISTEN)
                return bt_accept_poll(sk);
 
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
@@ -470,7 +470,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_shutdown == SHUTDOWN_MASK)
                mask |= EPOLLHUP;
 
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_state == BT_CLOSED)
index 681b728..e804a30 100644 (file)
@@ -24,8 +24,6 @@
 const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
 EXPORT_SYMBOL_GPL(nf_br_ops);
 
-static struct lock_class_key bridge_netdev_addr_lock_key;
-
 /* net device transmit always called with BH disabled */
 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -108,11 +106,6 @@ out:
        return NETDEV_TX_OK;
 }
 
-static void br_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
-}
-
 static int br_dev_init(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -150,7 +143,6 @@ static int br_dev_init(struct net_device *dev)
                br_mdb_hash_fini(br);
                br_fdb_hash_fini(br);
        }
-       br_set_lockdep_class(dev);
 
        return err;
 }
index b1d3248..284b366 100644 (file)
@@ -75,8 +75,9 @@ static inline unsigned long hold_time(const struct net_bridge *br)
 static inline int has_expired(const struct net_bridge *br,
                                  const struct net_bridge_fdb_entry *fdb)
 {
-       return !fdb->is_static && !fdb->added_by_external_learn &&
-               time_before_eq(fdb->updated + hold_time(br), jiffies);
+       return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
+              !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
+              time_before_eq(fdb->updated + hold_time(br), jiffies);
 }
 
 static void fdb_rcu_free(struct rcu_head *head)
@@ -197,7 +198,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
 {
        trace_fdb_delete(br, f);
 
-       if (f->is_static)
+       if (test_bit(BR_FDB_STATIC, &f->flags))
                fdb_del_hw_addr(br, f->key.addr.addr);
 
        hlist_del_init_rcu(&f->fdb_node);
@@ -224,7 +225,7 @@ static void fdb_delete_local(struct net_bridge *br,
                if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
                    (!vid || br_vlan_find(vg, vid))) {
                        f->dst = op;
-                       f->added_by_user = 0;
+                       clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
                        return;
                }
        }
@@ -235,7 +236,7 @@ static void fdb_delete_local(struct net_bridge *br,
        if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
            (!vid || (v && br_vlan_should_use(v)))) {
                f->dst = NULL;
-               f->added_by_user = 0;
+               clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
                return;
        }
 
@@ -250,7 +251,8 @@ void br_fdb_find_delete_local(struct net_bridge *br,
 
        spin_lock_bh(&br->hash_lock);
        f = br_fdb_find(br, addr, vid);
-       if (f && f->is_local && !f->added_by_user && f->dst == p)
+       if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+           !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
                fdb_delete_local(br, p, f);
        spin_unlock_bh(&br->hash_lock);
 }
@@ -265,7 +267,8 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
        spin_lock_bh(&br->hash_lock);
        vg = nbp_vlan_group(p);
        hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
-               if (f->dst == p && f->is_local && !f->added_by_user) {
+               if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
+                   !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
                        /* delete old one */
                        fdb_delete_local(br, p, f);
 
@@ -306,7 +309,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
 
        /* If old entry was unassociated with any port, then delete it. */
        f = br_fdb_find(br, br->dev->dev_addr, 0);
-       if (f && f->is_local && !f->dst && !f->added_by_user)
+       if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+           !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
                fdb_delete_local(br, NULL, f);
 
        fdb_insert(br, NULL, newaddr, 0);
@@ -321,7 +325,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
                if (!br_vlan_should_use(v))
                        continue;
                f = br_fdb_find(br, br->dev->dev_addr, v->vid);
-               if (f && f->is_local && !f->dst && !f->added_by_user)
+               if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+                   !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
                        fdb_delete_local(br, NULL, f);
                fdb_insert(br, NULL, newaddr, v->vid);
        }
@@ -346,7 +351,8 @@ void br_fdb_cleanup(struct work_struct *work)
        hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
                unsigned long this_timer;
 
-               if (f->is_static || f->added_by_external_learn)
+               if (test_bit(BR_FDB_STATIC, &f->flags) ||
+                   test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags))
                        continue;
                this_timer = f->updated + delay;
                if (time_after(this_timer, now)) {
@@ -373,7 +379,7 @@ void br_fdb_flush(struct net_bridge *br)
 
        spin_lock_bh(&br->hash_lock);
        hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
-               if (!f->is_static)
+               if (!test_bit(BR_FDB_STATIC, &f->flags))
                        fdb_delete(br, f, true);
        }
        spin_unlock_bh(&br->hash_lock);
@@ -397,10 +403,11 @@ void br_fdb_delete_by_port(struct net_bridge *br,
                        continue;
 
                if (!do_all)
-                       if (f->is_static || (vid && f->key.vlan_id != vid))
+                       if (test_bit(BR_FDB_STATIC, &f->flags) ||
+                           (vid && f->key.vlan_id != vid))
                                continue;
 
-               if (f->is_local)
+               if (test_bit(BR_FDB_LOCAL, &f->flags))
                        fdb_delete_local(br, p, f);
                else
                        fdb_delete(br, f, true);
@@ -469,8 +476,8 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
                fe->port_no = f->dst->port_no;
                fe->port_hi = f->dst->port_no >> 8;
 
-               fe->is_local = f->is_local;
-               if (!f->is_static)
+               fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
+               if (!test_bit(BR_FDB_STATIC, &f->flags))
                        fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
                ++fe;
                ++num;
@@ -484,8 +491,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
                                               struct net_bridge_port *source,
                                               const unsigned char *addr,
                                               __u16 vid,
-                                              unsigned char is_local,
-                                              unsigned char is_static)
+                                              unsigned long flags)
 {
        struct net_bridge_fdb_entry *fdb;
 
@@ -494,12 +500,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
                memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
                fdb->dst = source;
                fdb->key.vlan_id = vid;
-               fdb->is_local = is_local;
-               fdb->is_static = is_static;
-               fdb->added_by_user = 0;
-               fdb->added_by_external_learn = 0;
-               fdb->offloaded = 0;
-               fdb->is_sticky = 0;
+               fdb->flags = flags;
                fdb->updated = fdb->used = jiffies;
                if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
                                                  &fdb->rhnode,
@@ -526,14 +527,15 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                /* it is okay to have multiple ports with same
                 * address, just use the first one.
                 */
-               if (fdb->is_local)
+               if (test_bit(BR_FDB_LOCAL, &fdb->flags))
                        return 0;
                br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
                       source ? source->dev->name : br->dev->name, addr, vid);
                fdb_delete(br, fdb, true);
        }
 
-       fdb = fdb_create(br, source, addr, vid, 1, 1);
+       fdb = fdb_create(br, source, addr, vid,
+                        BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
        if (!fdb)
                return -ENOMEM;
 
@@ -555,7 +557,7 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 }
 
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
-                  const unsigned char *addr, u16 vid, bool added_by_user)
+                  const unsigned char *addr, u16 vid, unsigned long flags)
 {
        struct net_bridge_fdb_entry *fdb;
        bool fdb_modified = false;
@@ -572,7 +574,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
        fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
        if (likely(fdb)) {
                /* attempt to update an entry for a local interface */
-               if (unlikely(fdb->is_local)) {
+               if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
                        if (net_ratelimit())
                                br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
                                        source->dev->name, addr, vid);
@@ -580,30 +582,30 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                        unsigned long now = jiffies;
 
                        /* fastpath: update of existing entry */
-                       if (unlikely(source != fdb->dst && !fdb->is_sticky)) {
+                       if (unlikely(source != fdb->dst &&
+                                    !test_bit(BR_FDB_STICKY, &fdb->flags))) {
                                fdb->dst = source;
                                fdb_modified = true;
                                /* Take over HW learned entry */
-                               if (unlikely(fdb->added_by_external_learn))
-                                       fdb->added_by_external_learn = 0;
+                               if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+                                                     &fdb->flags)))
+                                       clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+                                                 &fdb->flags);
                        }
                        if (now != fdb->updated)
                                fdb->updated = now;
-                       if (unlikely(added_by_user))
-                               fdb->added_by_user = 1;
+                       if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
+                               set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
                        if (unlikely(fdb_modified)) {
-                               trace_br_fdb_update(br, source, addr, vid, added_by_user);
+                               trace_br_fdb_update(br, source, addr, vid, flags);
                                fdb_notify(br, fdb, RTM_NEWNEIGH, true);
                        }
                }
        } else {
                spin_lock(&br->hash_lock);
-               fdb = fdb_create(br, source, addr, vid, 0, 0);
+               fdb = fdb_create(br, source, addr, vid, flags);
                if (fdb) {
-                       if (unlikely(added_by_user))
-                               fdb->added_by_user = 1;
-                       trace_br_fdb_update(br, source, addr, vid,
-                                           added_by_user);
+                       trace_br_fdb_update(br, source, addr, vid, flags);
                        fdb_notify(br, fdb, RTM_NEWNEIGH, true);
                }
                /* else  we lose race and someone else inserts
@@ -616,9 +618,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 static int fdb_to_nud(const struct net_bridge *br,
                      const struct net_bridge_fdb_entry *fdb)
 {
-       if (fdb->is_local)
+       if (test_bit(BR_FDB_LOCAL, &fdb->flags))
                return NUD_PERMANENT;
-       else if (fdb->is_static)
+       else if (test_bit(BR_FDB_STATIC, &fdb->flags))
                return NUD_NOARP;
        else if (has_expired(br, fdb))
                return NUD_STALE;
@@ -648,11 +650,11 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
        ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
        ndm->ndm_state   = fdb_to_nud(br, fdb);
 
-       if (fdb->offloaded)
+       if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
                ndm->ndm_flags |= NTF_OFFLOADED;
-       if (fdb->added_by_external_learn)
+       if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
                ndm->ndm_flags |= NTF_EXT_LEARNED;
-       if (fdb->is_sticky)
+       if (test_bit(BR_FDB_STICKY, &fdb->flags))
                ndm->ndm_flags |= NTF_STICKY;
 
        if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
@@ -799,7 +801,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
                         const u8 *addr, u16 state, u16 flags, u16 vid,
                         u8 ndm_flags)
 {
-       u8 is_sticky = !!(ndm_flags & NTF_STICKY);
+       bool is_sticky = !!(ndm_flags & NTF_STICKY);
        struct net_bridge_fdb_entry *fdb;
        bool modified = false;
 
@@ -823,7 +825,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
                if (!(flags & NLM_F_CREATE))
                        return -ENOENT;
 
-               fdb = fdb_create(br, source, addr, vid, 0, 0);
+               fdb = fdb_create(br, source, addr, vid, 0);
                if (!fdb)
                        return -ENOMEM;
 
@@ -840,34 +842,28 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
 
        if (fdb_to_nud(br, fdb) != state) {
                if (state & NUD_PERMANENT) {
-                       fdb->is_local = 1;
-                       if (!fdb->is_static) {
-                               fdb->is_static = 1;
+                       set_bit(BR_FDB_LOCAL, &fdb->flags);
+                       if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
                                fdb_add_hw_addr(br, addr);
-                       }
                } else if (state & NUD_NOARP) {
-                       fdb->is_local = 0;
-                       if (!fdb->is_static) {
-                               fdb->is_static = 1;
+                       clear_bit(BR_FDB_LOCAL, &fdb->flags);
+                       if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
                                fdb_add_hw_addr(br, addr);
-                       }
                } else {
-                       fdb->is_local = 0;
-                       if (fdb->is_static) {
-                               fdb->is_static = 0;
+                       clear_bit(BR_FDB_LOCAL, &fdb->flags);
+                       if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
                                fdb_del_hw_addr(br, addr);
-                       }
                }
 
                modified = true;
        }
 
-       if (is_sticky != fdb->is_sticky) {
-               fdb->is_sticky = is_sticky;
+       if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
+               change_bit(BR_FDB_STICKY, &fdb->flags);
                modified = true;
        }
 
-       fdb->added_by_user = 1;
+       set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
 
        fdb->used = jiffies;
        if (modified) {
@@ -892,7 +888,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                }
                local_bh_disable();
                rcu_read_lock();
-               br_fdb_update(br, p, addr, vid, true);
+               br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
                rcu_read_unlock();
                local_bh_enable();
        } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
@@ -1064,7 +1060,7 @@ int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
        rcu_read_lock();
        hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
                /* We only care for static entries */
-               if (!f->is_static)
+               if (!test_bit(BR_FDB_STATIC, &f->flags))
                        continue;
                err = dev_uc_add(p->dev, f->key.addr.addr);
                if (err)
@@ -1078,7 +1074,7 @@ done:
 rollback:
        hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
                /* We only care for static entries */
-               if (!tmp->is_static)
+               if (!test_bit(BR_FDB_STATIC, &tmp->flags))
                        continue;
                if (tmp == f)
                        break;
@@ -1097,7 +1093,7 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
        rcu_read_lock();
        hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
                /* We only care for static entries */
-               if (!f->is_static)
+               if (!test_bit(BR_FDB_STATIC, &f->flags))
                        continue;
 
                dev_uc_del(p->dev, f->key.addr.addr);
@@ -1119,14 +1115,15 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 
        fdb = br_fdb_find(br, addr, vid);
        if (!fdb) {
-               fdb = fdb_create(br, p, addr, vid, 0, 0);
+               unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
+
+               if (swdev_notify)
+                       flags |= BIT(BR_FDB_ADDED_BY_USER);
+               fdb = fdb_create(br, p, addr, vid, flags);
                if (!fdb) {
                        err = -ENOMEM;
                        goto err_unlock;
                }
-               if (swdev_notify)
-                       fdb->added_by_user = 1;
-               fdb->added_by_external_learn = 1;
                fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
        } else {
                fdb->updated = jiffies;
@@ -1136,17 +1133,17 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
                        modified = true;
                }
 
-               if (fdb->added_by_external_learn) {
+               if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
                        /* Refresh entry */
                        fdb->used = jiffies;
-               } else if (!fdb->added_by_user) {
+               } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
                        /* Take over SW learned entry */
-                       fdb->added_by_external_learn = 1;
+                       set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
                        modified = true;
                }
 
                if (swdev_notify)
-                       fdb->added_by_user = 1;
+                       set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
 
                if (modified)
                        fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
@@ -1168,7 +1165,7 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
        spin_lock_bh(&br->hash_lock);
 
        fdb = br_fdb_find(br, addr, vid);
-       if (fdb && fdb->added_by_external_learn)
+       if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
                fdb_delete(br, fdb, swdev_notify);
        else
                err = -ENOENT;
@@ -1186,8 +1183,8 @@ void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
        spin_lock_bh(&br->hash_lock);
 
        fdb = br_fdb_find(br, addr, vid);
-       if (fdb)
-               fdb->offloaded = offloaded;
+       if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
+               change_bit(BR_FDB_OFFLOADED, &fdb->flags);
 
        spin_unlock_bh(&br->hash_lock);
 }
@@ -1206,7 +1203,7 @@ void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
        spin_lock_bh(&p->br->hash_lock);
        hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
                if (f->dst == p && f->key.vlan_id == vid)
-                       f->offloaded = 0;
+                       clear_bit(BR_FDB_OFFLOADED, &f->flags);
        }
        spin_unlock_bh(&p->br->hash_lock);
 }
index 09b1dd8..f37b050 100644 (file)
@@ -88,7 +88,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
        /* insert into forwarding database after filtering to avoid spoofing */
        br = p->br;
        if (p->flags & BR_LEARNING)
-               br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
+               br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
 
        local_rcv = !!(br->dev->flags & IFF_PROMISC);
        if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
@@ -151,7 +151,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
        if (dst) {
                unsigned long now = jiffies;
 
-               if (dst->is_local)
+               if (test_bit(BR_FDB_LOCAL, &dst->flags))
                        return br_pass_frame_up(skb);
 
                if (now != dst->used)
@@ -184,7 +184,7 @@ static void __br_handle_local_finish(struct sk_buff *skb)
        if ((p->flags & BR_LEARNING) &&
            !br_opt_get(p->br, BROPT_NO_LL_LEARN) &&
            br_should_learn(p, skb, &vid))
-               br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+               br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, 0);
 }
 
 /* note: already called with rcu_read_lock */
index ce2ab14..08742bf 100644 (file)
@@ -172,6 +172,16 @@ struct net_bridge_vlan_group {
        u16                             pvid;
 };
 
+/* bridge fdb flags */
+enum {
+       BR_FDB_LOCAL,
+       BR_FDB_STATIC,
+       BR_FDB_STICKY,
+       BR_FDB_ADDED_BY_USER,
+       BR_FDB_ADDED_BY_EXT_LEARN,
+       BR_FDB_OFFLOADED,
+};
+
 struct net_bridge_fdb_key {
        mac_addr addr;
        u16 vlan_id;
@@ -183,12 +193,7 @@ struct net_bridge_fdb_entry {
 
        struct net_bridge_fdb_key       key;
        struct hlist_node               fdb_node;
-       unsigned char                   is_local:1,
-                                       is_static:1,
-                                       is_sticky:1,
-                                       added_by_user:1,
-                                       added_by_external_learn:1,
-                                       offloaded:1;
+       unsigned long                   flags;
 
        /* write-heavy members should not affect lookups */
        unsigned long                   updated ____cacheline_aligned_in_smp;
@@ -566,7 +571,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
 int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                  const unsigned char *addr, u16 vid);
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
-                  const unsigned char *addr, u16 vid, bool added_by_user);
+                  const unsigned char *addr, u16 vid, unsigned long flags);
 
 int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
                  struct net_device *dev, const unsigned char *addr, u16 vid);
index 921310d..015209b 100644 (file)
@@ -129,15 +129,19 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
                br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr,
                                                fdb->key.vlan_id,
                                                fdb->dst->dev,
-                                               fdb->added_by_user,
-                                               fdb->offloaded);
+                                               test_bit(BR_FDB_ADDED_BY_USER,
+                                                        &fdb->flags),
+                                               test_bit(BR_FDB_OFFLOADED,
+                                                        &fdb->flags));
                break;
        case RTM_NEWNEIGH:
                br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr,
                                                fdb->key.vlan_id,
                                                fdb->dst->dev,
-                                               fdb->added_by_user,
-                                               fdb->offloaded);
+                                               test_bit(BR_FDB_ADDED_BY_USER,
+                                                        &fdb->flags),
+                                               test_bit(BR_FDB_OFFLOADED,
+                                                        &fdb->flags));
                break;
        }
 }
index 506d614..8096732 100644 (file)
@@ -95,7 +95,7 @@ slow_path:
         * This may also be a clone skbuff, we could preserve the geometry for
         * the copies but probably not worth the effort.
         */
-       ip_frag_init(skb, hlen, ll_rs, frag_max_size, &state);
+       ip_frag_init(skb, hlen, ll_rs, frag_max_size, false, &state);
 
        while (state.left > 0) {
                struct sk_buff *skb2;
index 13ea920..ef14da5 100644 (file)
@@ -953,7 +953,7 @@ static __poll_t caif_poll(struct file *file,
                mask |= EPOLLRDHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue) ||
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
                (sk->sk_shutdown & RCV_SHUTDOWN))
                mask |= EPOLLIN | EPOLLRDNORM;
 
index c210fc1..da3c24e 100644 (file)
@@ -97,7 +97,7 @@ int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
        if (error)
                goto out_err;
 
-       if (sk->sk_receive_queue.prev != skb)
+       if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
                goto out;
 
        /* Socket shut down? */
@@ -278,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                        break;
 
                sk_busy_loop(sk, flags & MSG_DONTWAIT);
-       } while (sk->sk_receive_queue.prev != *last);
+       } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
 
        error = -EAGAIN;
 
@@ -767,7 +767,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
        mask = 0;
 
        /* exceptional events? */
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
@@ -777,7 +777,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
                mask |= EPOLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
index 74f5939..bb15800 100644 (file)
 #include "net-sysfs.h"
 
 #define MAX_GRO_SKBS 8
+#define MAX_NEST_DEV 8
 
 /* This should be increased if a protocol with a bigger head is added. */
 #define GRO_MAX_HEAD (MAX_HEADER + 128)
@@ -392,88 +393,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 EXPORT_PER_CPU_SYMBOL(softnet_data);
 
-#ifdef CONFIG_LOCKDEP
-/*
- * register_netdevice() inits txq->_xmit_lock and sets lockdep class
- * according to dev->type
- */
-static const unsigned short netdev_lock_type[] = {
-        ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
-        ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
-        ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
-        ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
-        ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
-        ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
-        ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
-        ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
-        ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
-        ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
-        ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
-        ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
-        ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
-        ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
-        ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
-
-static const char *const netdev_lock_name[] = {
-       "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
-       "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
-       "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
-       "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
-       "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
-       "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
-       "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
-       "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
-       "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
-       "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
-       "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
-       "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
-       "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
-       "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
-       "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
-
-static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
-static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
-
-static inline unsigned short netdev_lock_pos(unsigned short dev_type)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
-               if (netdev_lock_type[i] == dev_type)
-                       return i;
-       /* the last key is used by default */
-       return ARRAY_SIZE(netdev_lock_type) - 1;
-}
-
-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-                                                unsigned short dev_type)
-{
-       int i;
-
-       i = netdev_lock_pos(dev_type);
-       lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
-                                  netdev_lock_name[i]);
-}
-
-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-{
-       int i;
-
-       i = netdev_lock_pos(dev->type);
-       lockdep_set_class_and_name(&dev->addr_list_lock,
-                                  &netdev_addr_lock_key[i],
-                                  netdev_lock_name[i]);
-}
-#else
-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-                                                unsigned short dev_type)
-{
-}
-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-{
-}
-#endif
-
 /*******************************************************************************
  *
  *             Protocol management and registration routines
@@ -6711,6 +6630,9 @@ struct netdev_adjacent {
        /* upper master flag, there can only be one master device per list */
        bool master;
 
+       /* lookup ignore flag */
+       bool ignore;
+
        /* counter for the number of times this device was added to us */
        u16 ref_nr;
 
@@ -6733,7 +6655,7 @@ static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
        return NULL;
 }
 
-static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
+static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
 {
        struct net_device *dev = data;
 
@@ -6754,7 +6676,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
 {
        ASSERT_RTNL();
 
-       return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
+       return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
                                             upper_dev);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev);
@@ -6772,7 +6694,7 @@ EXPORT_SYMBOL(netdev_has_upper_dev);
 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
                                  struct net_device *upper_dev)
 {
-       return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
+       return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
                                               upper_dev);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
@@ -6816,6 +6738,22 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_get);
 
+static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
+{
+       struct netdev_adjacent *upper;
+
+       ASSERT_RTNL();
+
+       if (list_empty(&dev->adj_list.upper))
+               return NULL;
+
+       upper = list_first_entry(&dev->adj_list.upper,
+                                struct netdev_adjacent, list);
+       if (likely(upper->master) && !upper->ignore)
+               return upper->dev;
+       return NULL;
+}
+
 /**
  * netdev_has_any_lower_dev - Check if device is linked to some device
  * @dev: device
@@ -6866,6 +6804,23 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
 
+static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
+                                                 struct list_head **iter,
+                                                 bool *ignore)
+{
+       struct netdev_adjacent *upper;
+
+       upper = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&upper->list == &dev->adj_list.upper)
+               return NULL;
+
+       *iter = &upper->list;
+       *ignore = upper->ignore;
+
+       return upper->dev;
+}
+
 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
                                                    struct list_head **iter)
 {
@@ -6883,34 +6838,111 @@ static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
        return upper->dev;
 }
 
+static int __netdev_walk_all_upper_dev(struct net_device *dev,
+                                      int (*fn)(struct net_device *dev,
+                                                void *data),
+                                      void *data)
+{
+       struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+       bool ignore;
+
+       now = dev;
+       iter = &dev->adj_list.upper;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       udev = __netdev_next_upper_dev(now, &iter, &ignore);
+                       if (!udev)
+                               break;
+                       if (ignore)
+                               continue;
+
+                       next = udev;
+                       niter = &udev->adj_list.upper;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
+       }
+
+       return 0;
+}
+
 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
                                  int (*fn)(struct net_device *dev,
                                            void *data),
                                  void *data)
 {
-       struct net_device *udev;
-       struct list_head *iter;
-       int ret;
+       struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
 
-       for (iter = &dev->adj_list.upper,
-            udev = netdev_next_upper_dev_rcu(dev, &iter);
-            udev;
-            udev = netdev_next_upper_dev_rcu(dev, &iter)) {
-               /* first is the upper device itself */
-               ret = fn(udev, data);
-               if (ret)
-                       return ret;
+       now = dev;
+       iter = &dev->adj_list.upper;
 
-               /* then look at all of its upper devices */
-               ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
-               if (ret)
-                       return ret;
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       udev = netdev_next_upper_dev_rcu(now, &iter);
+                       if (!udev)
+                               break;
+
+                       next = udev;
+                       niter = &udev->adj_list.upper;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
 
+static bool __netdev_has_upper_dev(struct net_device *dev,
+                                  struct net_device *upper_dev)
+{
+       ASSERT_RTNL();
+
+       return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
+                                          upper_dev);
+}
+
 /**
  * netdev_lower_get_next_private - Get the next ->private from the
  *                                lower neighbour list
@@ -7007,34 +7039,119 @@ static struct net_device *netdev_next_lower_dev(struct net_device *dev,
        return lower->dev;
 }
 
+static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
+                                                 struct list_head **iter,
+                                                 bool *ignore)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       *iter = &lower->list;
+       *ignore = lower->ignore;
+
+       return lower->dev;
+}
+
 int netdev_walk_all_lower_dev(struct net_device *dev,
                              int (*fn)(struct net_device *dev,
                                        void *data),
                              void *data)
 {
-       struct net_device *ldev;
-       struct list_head *iter;
-       int ret;
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
 
-       for (iter = &dev->adj_list.lower,
-            ldev = netdev_next_lower_dev(dev, &iter);
-            ldev;
-            ldev = netdev_next_lower_dev(dev, &iter)) {
-               /* first is the lower device itself */
-               ret = fn(ldev, data);
-               if (ret)
-                       return ret;
+       now = dev;
+       iter = &dev->adj_list.lower;
 
-               /* then look at all of its lower devices */
-               ret = netdev_walk_all_lower_dev(ldev, fn, data);
-               if (ret)
-                       return ret;
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = netdev_next_lower_dev(now, &iter);
+                       if (!ldev)
+                               break;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
 
+static int __netdev_walk_all_lower_dev(struct net_device *dev,
+                                      int (*fn)(struct net_device *dev,
+                                                void *data),
+                                      void *data)
+{
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+       bool ignore;
+
+       now = dev;
+       iter = &dev->adj_list.lower;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = __netdev_next_lower_dev(now, &iter, &ignore);
+                       if (!ldev)
+                               break;
+                       if (ignore)
+                               continue;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
+       }
+
+       return 0;
+}
+
 static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
                                                    struct list_head **iter)
 {
@@ -7049,28 +7166,99 @@ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
        return lower->dev;
 }
 
-int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
-                                 int (*fn)(struct net_device *dev,
-                                           void *data),
-                                 void *data)
+static u8 __netdev_upper_depth(struct net_device *dev)
+{
+       struct net_device *udev;
+       struct list_head *iter;
+       u8 max_depth = 0;
+       bool ignore;
+
+       for (iter = &dev->adj_list.upper,
+            udev = __netdev_next_upper_dev(dev, &iter, &ignore);
+            udev;
+            udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
+               if (ignore)
+                       continue;
+               if (max_depth < udev->upper_level)
+                       max_depth = udev->upper_level;
+       }
+
+       return max_depth;
+}
+
+static u8 __netdev_lower_depth(struct net_device *dev)
 {
        struct net_device *ldev;
        struct list_head *iter;
-       int ret;
+       u8 max_depth = 0;
+       bool ignore;
 
        for (iter = &dev->adj_list.lower,
-            ldev = netdev_next_lower_dev_rcu(dev, &iter);
+            ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
             ldev;
-            ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
-               /* first is the lower device itself */
-               ret = fn(ldev, data);
-               if (ret)
-                       return ret;
+            ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
+               if (ignore)
+                       continue;
+               if (max_depth < ldev->lower_level)
+                       max_depth = ldev->lower_level;
+       }
 
-               /* then look at all of its lower devices */
-               ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
-               if (ret)
-                       return ret;
+       return max_depth;
+}
+
+static int __netdev_update_upper_level(struct net_device *dev, void *data)
+{
+       dev->upper_level = __netdev_upper_depth(dev) + 1;
+       return 0;
+}
+
+static int __netdev_update_lower_level(struct net_device *dev, void *data)
+{
+       dev->lower_level = __netdev_lower_depth(dev) + 1;
+       return 0;
+}
+
+int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
+                                 int (*fn)(struct net_device *dev,
+                                           void *data),
+                                 void *data)
+{
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+
+       now = dev;
+       iter = &dev->adj_list.lower;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = netdev_next_lower_dev_rcu(now, &iter);
+                       if (!ldev)
+                               break;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
@@ -7174,6 +7362,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        adj->master = master;
        adj->ref_nr = 1;
        adj->private = private;
+       adj->ignore = false;
        dev_hold(adj_dev);
 
        pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
@@ -7324,14 +7513,17 @@ static int __netdev_upper_dev_link(struct net_device *dev,
                return -EBUSY;
 
        /* To prevent loops, check if dev is not upper device to upper_dev. */
-       if (netdev_has_upper_dev(upper_dev, dev))
+       if (__netdev_has_upper_dev(upper_dev, dev))
                return -EBUSY;
 
+       if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
+               return -EMLINK;
+
        if (!master) {
-               if (netdev_has_upper_dev(dev, upper_dev))
+               if (__netdev_has_upper_dev(dev, upper_dev))
                        return -EEXIST;
        } else {
-               master_dev = netdev_master_upper_dev_get(dev);
+               master_dev = __netdev_master_upper_dev_get(dev);
                if (master_dev)
                        return master_dev == upper_dev ? -EEXIST : -EBUSY;
        }
@@ -7353,6 +7545,13 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        if (ret)
                goto rollback;
 
+       __netdev_update_upper_level(dev, NULL);
+       __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+       __netdev_update_lower_level(upper_dev, NULL);
+       __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
+                                   NULL);
+
        return 0;
 
 rollback:
@@ -7435,9 +7634,96 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 
        call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
                                      &changeupper_info.info);
+
+       __netdev_update_upper_level(dev, NULL);
+       __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+       __netdev_update_lower_level(upper_dev, NULL);
+       __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
+                                   NULL);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
+                                     struct net_device *lower_dev,
+                                     bool val)
+{
+       struct netdev_adjacent *adj;
+
+       adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
+       if (adj)
+               adj->ignore = val;
+
+       adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
+       if (adj)
+               adj->ignore = val;
+}
+
+static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
+                                       struct net_device *lower_dev)
+{
+       __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
+}
+
+static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
+                                      struct net_device *lower_dev)
+{
+       __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
+}
+
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev,
+                                  struct netlink_ext_ack *extack)
+{
+       int err;
+
+       if (!new_dev)
+               return 0;
+
+       if (old_dev && new_dev != old_dev)
+               netdev_adjacent_dev_disable(dev, old_dev);
+
+       err = netdev_upper_dev_link(new_dev, dev, extack);
+       if (err) {
+               if (old_dev && new_dev != old_dev)
+                       netdev_adjacent_dev_enable(dev, old_dev);
+               return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(netdev_adjacent_change_prepare);
+
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev)
+{
+       if (!new_dev || !old_dev)
+               return;
+
+       if (new_dev == old_dev)
+               return;
+
+       netdev_adjacent_dev_enable(dev, old_dev);
+       netdev_upper_dev_unlink(old_dev, dev);
+}
+EXPORT_SYMBOL(netdev_adjacent_change_commit);
+
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+                                 struct net_device *new_dev,
+                                 struct net_device *dev)
+{
+       if (!new_dev)
+               return;
+
+       if (old_dev && new_dev != old_dev)
+               netdev_adjacent_dev_enable(dev, old_dev);
+
+       netdev_upper_dev_unlink(new_dev, dev);
+}
+EXPORT_SYMBOL(netdev_adjacent_change_abort);
+
 /**
  * netdev_bonding_info_change - Dispatch event about slave change
  * @dev: device
@@ -7551,25 +7837,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
 EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
 
-int dev_get_nest_level(struct net_device *dev)
-{
-       struct net_device *lower = NULL;
-       struct list_head *iter;
-       int max_nest = -1;
-       int nest;
-
-       ASSERT_RTNL();
-
-       netdev_for_each_lower_dev(dev, lower, iter) {
-               nest = dev_get_nest_level(lower);
-               if (max_nest < nest)
-                       max_nest = nest;
-       }
-
-       return max_nest + 1;
-}
-EXPORT_SYMBOL(dev_get_nest_level);
-
 /**
  * netdev_lower_change - Dispatch event about lower device state change
  * @lower_dev: device
@@ -8376,7 +8643,8 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
                        return -EINVAL;
                }
 
-               if (prog->aux->id == prog_id) {
+               /* prog->aux->id may be 0 for orphaned device-bound progs */
+               if (prog->aux->id && prog->aux->id == prog_id) {
                        bpf_prog_put(prog);
                        return 0;
                }
@@ -8844,7 +9112,7 @@ static void netdev_init_one_queue(struct net_device *dev,
 {
        /* Initialize queue lock */
        spin_lock_init(&queue->_xmit_lock);
-       netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
+       lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
        queue->xmit_lock_owner = -1;
        netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
        queue->dev = dev;
@@ -8891,6 +9159,43 @@ void netif_tx_stop_all_queues(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_tx_stop_all_queues);
 
+static void netdev_register_lockdep_key(struct net_device *dev)
+{
+       lockdep_register_key(&dev->qdisc_tx_busylock_key);
+       lockdep_register_key(&dev->qdisc_running_key);
+       lockdep_register_key(&dev->qdisc_xmit_lock_key);
+       lockdep_register_key(&dev->addr_list_lock_key);
+}
+
+static void netdev_unregister_lockdep_key(struct net_device *dev)
+{
+       lockdep_unregister_key(&dev->qdisc_tx_busylock_key);
+       lockdep_unregister_key(&dev->qdisc_running_key);
+       lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+       lockdep_unregister_key(&dev->addr_list_lock_key);
+}
+
+void netdev_update_lockdep_key(struct net_device *dev)
+{
+       struct netdev_queue *queue;
+       int i;
+
+       lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+       lockdep_unregister_key(&dev->addr_list_lock_key);
+
+       lockdep_register_key(&dev->qdisc_xmit_lock_key);
+       lockdep_register_key(&dev->addr_list_lock_key);
+
+       lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               queue = netdev_get_tx_queue(dev, i);
+
+               lockdep_set_class(&queue->_xmit_lock,
+                                 &dev->qdisc_xmit_lock_key);
+       }
+}
+EXPORT_SYMBOL(netdev_update_lockdep_key);
+
 /**
  *     register_netdevice      - register a network device
  *     @dev: device to register
@@ -8925,7 +9230,7 @@ int register_netdevice(struct net_device *dev)
        BUG_ON(!net);
 
        spin_lock_init(&dev->addr_list_lock);
-       netdev_set_addr_lockdep_class(dev);
+       lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
 
        ret = dev_get_valid_name(net, dev, dev->name);
        if (ret < 0)
@@ -9442,8 +9747,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        dev_net_set(dev, &init_net);
 
+       netdev_register_lockdep_key(dev);
+
        dev->gso_max_size = GSO_MAX_SIZE;
        dev->gso_max_segs = GSO_MAX_SEGS;
+       dev->upper_level = 1;
+       dev->lower_level = 1;
 
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
@@ -9524,6 +9833,8 @@ void free_netdev(struct net_device *dev)
        free_percpu(dev->pcpu_refcnt);
        dev->pcpu_refcnt = NULL;
 
+       netdev_unregister_lockdep_key(dev);
+
        /*  Compatibility with error handling in drivers */
        if (dev->reg_state == NETREG_UNINITIALIZED) {
                netdev_freemem(dev);
@@ -9692,7 +10003,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
        rcu_barrier();
 
-       new_nsid = peernet2id_alloc(dev_net(dev), net);
+       new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
        /* If there is an ifindex conflict assign a new one */
        if (__dev_get_by_index(net, dev->ifindex))
                new_ifindex = dev_new_index(net);
index 6393ba9..2f949b5 100644 (file)
@@ -637,7 +637,7 @@ int dev_uc_sync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -667,7 +667,7 @@ int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -691,7 +691,7 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
                return;
 
        netif_addr_lock_bh(from);
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
        __dev_set_rx_mode(to);
        netif_addr_unlock(to);
@@ -858,7 +858,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -888,7 +888,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -912,7 +912,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
                return;
 
        netif_addr_lock_bh(from);
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
        __dev_set_rx_mode(to);
        netif_addr_unlock(to);
index c763106..cd9bc67 100644 (file)
@@ -1396,11 +1396,13 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
 
 static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
 {
-       struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+       struct ethtool_wolinfo wol;
 
        if (!dev->ethtool_ops->get_wol)
                return -EOPNOTSUPP;
 
+       memset(&wol, 0, sizeof(struct ethtool_wolinfo));
+       wol.cmd = ETHTOOL_GWOL;
        dev->ethtool_ops->get_wol(dev, &wol);
 
        if (copy_to_user(useraddr, &wol, sizeof(wol)))
index dbf502c..ca87165 100644 (file)
@@ -178,27 +178,6 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
        mutex_unlock(&flow_dissector_mutex);
        return 0;
 }
-/**
- * skb_flow_get_be16 - extract be16 entity
- * @skb: sk_buff to extract from
- * @poff: offset to extract at
- * @data: raw buffer pointer to the packet
- * @hlen: packet header length
- *
- * The function will try to retrieve a be32 entity at
- * offset poff
- */
-static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
-                               void *data, int hlen)
-{
-       __be16 *u, _u;
-
-       u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
-       if (u)
-               return *u;
-
-       return 0;
-}
 
 /**
  * __skb_flow_get_ports - extract the upper layer ports and return them
@@ -234,6 +213,72 @@ __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
 }
 EXPORT_SYMBOL(__skb_flow_get_ports);
 
+static bool icmp_has_id(u8 type)
+{
+       switch (type) {
+       case ICMP_ECHO:
+       case ICMP_ECHOREPLY:
+       case ICMP_TIMESTAMP:
+       case ICMP_TIMESTAMPREPLY:
+       case ICMPV6_ECHO_REQUEST:
+       case ICMPV6_ECHO_REPLY:
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * skb_flow_get_icmp_tci - extract ICMP(6) Type, Code and Identifier fields
+ * @skb: sk_buff to extract from
+ * @key_icmp: struct flow_dissector_key_icmp to fill
+ * @data: raw buffer pointer to the packet
+ * @toff: offset to extract at
+ * @hlen: packet header length
+ */
+void skb_flow_get_icmp_tci(const struct sk_buff *skb,
+                          struct flow_dissector_key_icmp *key_icmp,
+                          void *data, int thoff, int hlen)
+{
+       struct icmphdr *ih, _ih;
+
+       ih = __skb_header_pointer(skb, thoff, sizeof(_ih), data, hlen, &_ih);
+       if (!ih)
+               return;
+
+       key_icmp->type = ih->type;
+       key_icmp->code = ih->code;
+
+       /* As we use 0 to signal that the Id field is not present,
+        * avoid confusion with packets without such field
+        */
+       if (icmp_has_id(ih->type))
+               key_icmp->id = ih->un.echo.id ? : 1;
+       else
+               key_icmp->id = 0;
+}
+EXPORT_SYMBOL(skb_flow_get_icmp_tci);
+
+/* If FLOW_DISSECTOR_KEY_ICMP is set, dissect an ICMP packet
+ * using skb_flow_get_icmp_tci().
+ */
+static void __skb_flow_dissect_icmp(const struct sk_buff *skb,
+                                   struct flow_dissector *flow_dissector,
+                                   void *target_container,
+                                   void *data, int thoff, int hlen)
+{
+       struct flow_dissector_key_icmp *key_icmp;
+
+       if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ICMP))
+               return;
+
+       key_icmp = skb_flow_dissector_target(flow_dissector,
+                                            FLOW_DISSECTOR_KEY_ICMP,
+                                            target_container);
+
+       skb_flow_get_icmp_tci(skb, key_icmp, data, thoff, hlen);
+}
+
 void skb_flow_dissect_meta(const struct sk_buff *skb,
                           struct flow_dissector *flow_dissector,
                           void *target_container)
@@ -884,7 +929,6 @@ bool __skb_flow_dissect(const struct net *net,
        struct flow_dissector_key_basic *key_basic;
        struct flow_dissector_key_addrs *key_addrs;
        struct flow_dissector_key_ports *key_ports;
-       struct flow_dissector_key_icmp *key_icmp;
        struct flow_dissector_key_tags *key_tags;
        struct flow_dissector_key_vlan *key_vlan;
        struct bpf_prog *attached = NULL;
@@ -1329,6 +1373,12 @@ ip_proto_again:
                                       data, nhoff, hlen);
                break;
 
+       case IPPROTO_ICMP:
+       case IPPROTO_ICMPV6:
+               __skb_flow_dissect_icmp(skb, flow_dissector, target_container,
+                                       data, nhoff, hlen);
+               break;
+
        default:
                break;
        }
@@ -1342,14 +1392,6 @@ ip_proto_again:
                                                        data, hlen);
        }
 
-       if (dissector_uses_key(flow_dissector,
-                              FLOW_DISSECTOR_KEY_ICMP)) {
-               key_icmp = skb_flow_dissector_target(flow_dissector,
-                                                    FLOW_DISSECTOR_KEY_ICMP,
-                                                    target_container);
-               key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
-       }
-
        /* Process result of IP proto processing */
        switch (fdret) {
        case FLOW_DISSECT_RET_PROTO_AGAIN:
@@ -1384,32 +1426,23 @@ out_bad:
 }
 EXPORT_SYMBOL(__skb_flow_dissect);
 
-static u32 hashrnd __read_mostly;
+static siphash_key_t hashrnd __read_mostly;
 static __always_inline void __flow_hash_secret_init(void)
 {
        net_get_random_once(&hashrnd, sizeof(hashrnd));
 }
 
-static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
-                                            u32 keyval)
+static const void *flow_keys_hash_start(const struct flow_keys *flow)
 {
-       return jhash2(words, length, keyval);
-}
-
-static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
-{
-       const void *p = flow;
-
-       BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
-       return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
+       BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
+       return &flow->FLOW_KEYS_HASH_START_FIELD;
 }
 
 static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
 {
        size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
+
        BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
-       BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
-                    sizeof(*flow) - sizeof(flow->addrs));
 
        switch (flow->control.addr_type) {
        case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
@@ -1422,7 +1455,7 @@ static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
                diff -= sizeof(flow->addrs.tipckey);
                break;
        }
-       return (sizeof(*flow) - diff) / sizeof(u32);
+       return sizeof(*flow) - diff;
 }
 
 __be32 flow_get_u32_src(const struct flow_keys *flow)
@@ -1455,6 +1488,9 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow)
 }
 EXPORT_SYMBOL(flow_get_u32_dst);
 
+/* Sort the source and destination IP (and the ports if the IP are the same),
+ * to have consistent hash within the two directions
+ */
 static inline void __flow_hash_consistentify(struct flow_keys *keys)
 {
        int addr_diff, i;
@@ -1488,14 +1524,15 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
        }
 }
 
-static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
+                                       const siphash_key_t *keyval)
 {
        u32 hash;
 
        __flow_hash_consistentify(keys);
 
-       hash = __flow_hash_words(flow_keys_hash_start(keys),
-                                flow_keys_hash_length(keys), keyval);
+       hash = siphash(flow_keys_hash_start(keys),
+                      flow_keys_hash_length(keys), keyval);
        if (!hash)
                hash = 1;
 
@@ -1505,12 +1542,13 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
 u32 flow_hash_from_keys(struct flow_keys *keys)
 {
        __flow_hash_secret_init();
-       return __flow_hash_from_keys(keys, hashrnd);
+       return __flow_hash_from_keys(keys, &hashrnd);
 }
 EXPORT_SYMBOL(flow_hash_from_keys);
 
 static inline u32 ___skb_get_hash(const struct sk_buff *skb,
-                                 struct flow_keys *keys, u32 keyval)
+                                 struct flow_keys *keys,
+                                 const siphash_key_t *keyval)
 {
        skb_flow_dissect_flow_keys(skb, keys,
                                   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
@@ -1558,7 +1596,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
                           &keys, NULL, 0, 0, 0,
                           FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
-       return __flow_hash_from_keys(&keys, hashrnd);
+       return __flow_hash_from_keys(&keys, &hashrnd);
 }
 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
 
@@ -1578,13 +1616,14 @@ void __skb_get_hash(struct sk_buff *skb)
 
        __flow_hash_secret_init();
 
-       hash = ___skb_get_hash(skb, &keys, hashrnd);
+       hash = ___skb_get_hash(skb, &keys, &hashrnd);
 
        __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+                          const siphash_key_t *perturb)
 {
        struct flow_keys keys;
 
index f93785e..74cfb8b 100644 (file)
@@ -88,11 +88,16 @@ static int bpf_lwt_input_reroute(struct sk_buff *skb)
        int err = -EINVAL;
 
        if (skb->protocol == htons(ETH_P_IP)) {
+               struct net_device *dev = skb_dst(skb)->dev;
                struct iphdr *iph = ip_hdr(skb);
 
+               dev_hold(dev);
+               skb_dst_drop(skb);
                err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
-                                          iph->tos, skb_dst(skb)->dev);
+                                          iph->tos, dev);
+               dev_put(dev);
        } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               skb_dst_drop(skb);
                err = ipv6_stub->ipv6_route_input(skb);
        } else {
                err = -EAFNOSUPPORT;
index 6d3e482..3940284 100644 (file)
@@ -246,11 +246,11 @@ static int __peernet2id(struct net *net, struct net *peer)
 }
 
 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
-                             struct nlmsghdr *nlh);
+                             struct nlmsghdr *nlh, gfp_t gfp);
 /* This function returns the id of a peer netns. If no id is assigned, one will
  * be allocated and returned.
  */
-int peernet2id_alloc(struct net *net, struct net *peer)
+int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
 {
        bool alloc = false, alive = false;
        int id;
@@ -269,7 +269,7 @@ int peernet2id_alloc(struct net *net, struct net *peer)
        id = __peernet2id_alloc(net, peer, &alloc);
        spin_unlock_bh(&net->nsid_lock);
        if (alloc && id >= 0)
-               rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL);
+               rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
        if (alive)
                put_net(peer);
        return id;
@@ -479,6 +479,7 @@ struct net *copy_net_ns(unsigned long flags,
 
        if (rv < 0) {
 put_userns:
+               key_remove_domain(net->key_domain);
                put_user_ns(user_ns);
                net_drop_ns(net);
 dec_ucounts:
@@ -533,7 +534,8 @@ static void unhash_nsid(struct net *net, struct net *last)
                        idr_remove(&tmp->netns_ids, id);
                spin_unlock_bh(&tmp->nsid_lock);
                if (id >= 0)
-                       rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL);
+                       rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
+                                         GFP_KERNEL);
                if (tmp == last)
                        break;
        }
@@ -766,7 +768,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
        spin_unlock_bh(&net->nsid_lock);
        if (err >= 0) {
                rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
-                                 nlh);
+                                 nlh, GFP_KERNEL);
                err = 0;
        } else if (err == -ENOSPC && nsid >= 0) {
                err = -EEXIST;
@@ -1054,7 +1056,7 @@ end:
 }
 
 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
-                             struct nlmsghdr *nlh)
+                             struct nlmsghdr *nlh, gfp_t gfp)
 {
        struct net_fill_args fillargs = {
                .portid = portid,
@@ -1065,7 +1067,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
        struct sk_buff *msg;
        int err = -ENOMEM;
 
-       msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
+       msg = nlmsg_new(rtnl_net_get_size(), gfp);
        if (!msg)
                goto out;
 
@@ -1073,7 +1075,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
        if (err < 0)
                goto err_out;
 
-       rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0);
+       rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
        return;
 
 err_out:
index 49fa910..000eddb 100644 (file)
@@ -1537,7 +1537,7 @@ static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
 
 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
                                  const struct net_device *dev,
-                                 struct net *src_net)
+                                 struct net *src_net, gfp_t gfp)
 {
        bool put_iflink = false;
 
@@ -1545,7 +1545,7 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
                struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
 
                if (!net_eq(dev_net(dev), link_net)) {
-                       int id = peernet2id_alloc(src_net, link_net);
+                       int id = peernet2id_alloc(src_net, link_net, gfp);
 
                        if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
                                return -EMSGSIZE;
@@ -1639,7 +1639,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
                            int type, u32 pid, u32 seq, u32 change,
                            unsigned int flags, u32 ext_filter_mask,
                            u32 event, int *new_nsid, int new_ifindex,
-                           int tgt_netnsid)
+                           int tgt_netnsid, gfp_t gfp)
 {
        struct ifinfomsg *ifm;
        struct nlmsghdr *nlh;
@@ -1731,7 +1731,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
                        goto nla_put_failure;
        }
 
-       if (rtnl_fill_link_netnsid(skb, dev, src_net))
+       if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
                goto nla_put_failure;
 
        if (new_nsid &&
@@ -2057,7 +2057,7 @@ walk_entries:
                                               NETLINK_CB(cb->skb).portid,
                                               nlh->nlmsg_seq, 0, flags,
                                               ext_filter_mask, 0, NULL, 0,
-                                              netnsid);
+                                              netnsid, GFP_KERNEL);
 
                        if (err < 0) {
                                if (likely(skb->len))
@@ -2411,6 +2411,7 @@ static int do_set_master(struct net_device *dev, int ifindex,
                        err = ops->ndo_del_slave(upper_dev, dev);
                        if (err)
                                return err;
+                       netdev_update_lockdep_key(dev);
                } else {
                        return -EOPNOTSUPP;
                }
@@ -3428,7 +3429,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
        err = rtnl_fill_ifinfo(nskb, dev, net,
                               RTM_NEWLINK, NETLINK_CB(skb).portid,
                               nlh->nlmsg_seq, 0, 0, ext_filter_mask,
-                              0, NULL, 0, netnsid);
+                              0, NULL, 0, netnsid, GFP_KERNEL);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in if_nlmsg_size */
                WARN_ON(err == -EMSGSIZE);
@@ -3634,7 +3635,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
 
        err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
                               type, 0, 0, change, 0, 0, event,
-                              new_nsid, new_ifindex, -1);
+                              new_nsid, new_ifindex, -1, flags);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in if_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -4079,7 +4080,7 @@ static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
        ndm = nlmsg_data(nlh);
        if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
            ndm->ndm_flags || ndm->ndm_type) {
-               NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request");
+               NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
                return -EINVAL;
        }
 
index 5cb567e..71787f7 100644 (file)
@@ -1125,7 +1125,7 @@ set_rcvbuf:
                break;
                }
        case SO_INCOMING_CPU:
-               sk->sk_incoming_cpu = val;
+               WRITE_ONCE(sk->sk_incoming_cpu, val);
                break;
 
        case SO_CNX_ADVICE:
@@ -1474,7 +1474,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 
        case SO_INCOMING_CPU:
-               v.val = sk->sk_incoming_cpu;
+               v.val = READ_ONCE(sk->sk_incoming_cpu);
                break;
 
        case SO_MEMINFO:
@@ -3013,7 +3013,7 @@ int sock_gettstamp(struct socket *sock, void __user *userstamp,
                return -ENOENT;
        if (ts.tv_sec == 0) {
                ktime_t kt = ktime_get_real();
-               sock_write_timestamp(sk, kt);;
+               sock_write_timestamp(sk, kt);
                ts = ktime_to_timespec64(kt);
        }
 
@@ -3598,7 +3598,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
 {
        struct sock *sk = p;
 
-       return !skb_queue_empty(&sk->sk_receive_queue) ||
+       return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
               sk_busy_loop_timeout(sk, start_time);
 }
 EXPORT_SYMBOL(sk_busy_loop_end);
index d9b4200..0d8f782 100644 (file)
@@ -117,7 +117,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                                                    inet->inet_daddr,
                                                    inet->inet_sport,
                                                    inet->inet_dport);
-       inet->inet_id = dp->dccps_iss ^ jiffies;
+       inet->inet_id = prandom_u32();
 
        err = dccp_connect(sk);
        rt = NULL;
index 0ea7528..3349ea8 100644 (file)
@@ -1205,7 +1205,7 @@ static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table  *wai
        struct dn_scp *scp = DN_SK(sk);
        __poll_t mask = datagram_poll(file, sock, wait);
 
-       if (!skb_queue_empty(&scp->other_receive_queue))
+       if (!skb_queue_empty_lockless(&scp->other_receive_queue))
                mask |= EPOLLRDBAND;
 
        return mask;
index a554576..db1c1c7 100644 (file)
@@ -331,6 +331,54 @@ int call_dsa_notifiers(unsigned long val, struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(call_dsa_notifiers);
 
+int dsa_devlink_param_get(struct devlink *dl, u32 id,
+                         struct devlink_param_gset_ctx *ctx)
+{
+       struct dsa_devlink_priv *dl_priv;
+       struct dsa_switch *ds;
+
+       dl_priv = devlink_priv(dl);
+       ds = dl_priv->ds;
+
+       if (!ds->ops->devlink_param_get)
+               return -EOPNOTSUPP;
+
+       return ds->ops->devlink_param_get(ds, id, ctx);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
+
+int dsa_devlink_param_set(struct devlink *dl, u32 id,
+                         struct devlink_param_gset_ctx *ctx)
+{
+       struct dsa_devlink_priv *dl_priv;
+       struct dsa_switch *ds;
+
+       dl_priv = devlink_priv(dl);
+       ds = dl_priv->ds;
+
+       if (!ds->ops->devlink_param_set)
+               return -EOPNOTSUPP;
+
+       return ds->ops->devlink_param_set(ds, id, ctx);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
+
+int dsa_devlink_params_register(struct dsa_switch *ds,
+                               const struct devlink_param *params,
+                               size_t params_count)
+{
+       return devlink_params_register(ds->devlink, params, params_count);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
+
+void dsa_devlink_params_unregister(struct dsa_switch *ds,
+                                  const struct devlink_param *params,
+                                  size_t params_count)
+{
+       devlink_params_unregister(ds->devlink, params, params_count);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
+
 static int __init dsa_init_module(void)
 {
        int rc;
index 1e3ac9b..ff2fa39 100644 (file)
@@ -45,6 +45,8 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index)
 
        dst->index = index;
 
+       INIT_LIST_HEAD(&dst->rtable);
+
        INIT_LIST_HEAD(&dst->ports);
 
        INIT_LIST_HEAD(&dst->list);
@@ -122,6 +124,31 @@ static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
        return NULL;
 }
 
+struct dsa_link *dsa_link_touch(struct dsa_port *dp, struct dsa_port *link_dp)
+{
+       struct dsa_switch *ds = dp->ds;
+       struct dsa_switch_tree *dst;
+       struct dsa_link *dl;
+
+       dst = ds->dst;
+
+       list_for_each_entry(dl, &dst->rtable, list)
+               if (dl->dp == dp && dl->link_dp == link_dp)
+                       return dl;
+
+       dl = kzalloc(sizeof(*dl), GFP_KERNEL);
+       if (!dl)
+               return NULL;
+
+       dl->dp = dp;
+       dl->link_dp = link_dp;
+
+       INIT_LIST_HEAD(&dl->list);
+       list_add_tail(&dl->list, &dst->rtable);
+
+       return dl;
+}
+
 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
@@ -129,6 +156,7 @@ static bool dsa_port_setup_routing_table(struct dsa_port *dp)
        struct device_node *dn = dp->dn;
        struct of_phandle_iterator it;
        struct dsa_port *link_dp;
+       struct dsa_link *dl;
        int err;
 
        of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
@@ -138,24 +166,23 @@ static bool dsa_port_setup_routing_table(struct dsa_port *dp)
                        return false;
                }
 
-               ds->rtable[link_dp->ds->index] = dp->index;
+               dl = dsa_link_touch(dp, link_dp);
+               if (!dl) {
+                       of_node_put(it.node);
+                       return false;
+               }
        }
 
        return true;
 }
 
-static bool dsa_switch_setup_routing_table(struct dsa_switch *ds)
+static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
 {
-       struct dsa_switch_tree *dst = ds->dst;
        bool complete = true;
        struct dsa_port *dp;
-       int i;
-
-       for (i = 0; i < DSA_MAX_SWITCHES; i++)
-               ds->rtable[i] = DSA_RTABLE_NONE;
 
        list_for_each_entry(dp, &dst->ports, list) {
-               if (dp->ds == ds && dsa_port_is_dsa(dp)) {
+               if (dsa_port_is_dsa(dp)) {
                        complete = dsa_port_setup_routing_table(dp);
                        if (!complete)
                                break;
@@ -165,25 +192,6 @@ static bool dsa_switch_setup_routing_table(struct dsa_switch *ds)
        return complete;
 }
 
-static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
-{
-       struct dsa_switch *ds;
-       bool complete = true;
-       int device;
-
-       for (device = 0; device < DSA_MAX_SWITCHES; device++) {
-               ds = dst->ds[device];
-               if (!ds)
-                       continue;
-
-               complete = dsa_switch_setup_routing_table(ds);
-               if (!complete)
-                       break;
-       }
-
-       return complete;
-}
-
 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
 {
        struct dsa_port *dp;
@@ -349,6 +357,7 @@ static void dsa_port_teardown(struct dsa_port *dp)
 
 static int dsa_switch_setup(struct dsa_switch *ds)
 {
+       struct dsa_devlink_priv *dl_priv;
        int err;
 
        if (ds->setup)
@@ -364,9 +373,11 @@ static int dsa_switch_setup(struct dsa_switch *ds)
        /* Add the switch to devlink before calling setup, so that setup can
         * add dpipe tables
         */
-       ds->devlink = devlink_alloc(&dsa_devlink_ops, 0);
+       ds->devlink = devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv));
        if (!ds->devlink)
                return -ENOMEM;
+       dl_priv = devlink_priv(ds->devlink);
+       dl_priv->ds = ds;
 
        err = devlink_register(ds->devlink, ds->dev);
        if (err)
@@ -380,6 +391,8 @@ static int dsa_switch_setup(struct dsa_switch *ds)
        if (err < 0)
                goto unregister_notifier;
 
+       devlink_params_publish(ds->devlink);
+
        if (!ds->slave_mii_bus && ds->ops->phy_read) {
                ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
                if (!ds->slave_mii_bus) {
@@ -539,6 +552,8 @@ teardown_default_cpu:
 
 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
 {
+       struct dsa_link *dl, *next;
+
        if (!dst->setup)
                return;
 
@@ -548,41 +563,16 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
 
        dsa_tree_teardown_default_cpu(dst);
 
+       list_for_each_entry_safe(dl, next, &dst->rtable, list) {
+               list_del(&dl->list);
+               kfree(dl);
+       }
+
        pr_info("DSA: tree %d torn down\n", dst->index);
 
        dst->setup = false;
 }
 
-static void dsa_tree_remove_switch(struct dsa_switch_tree *dst,
-                                  unsigned int index)
-{
-       dsa_tree_teardown(dst);
-
-       dst->ds[index] = NULL;
-       dsa_tree_put(dst);
-}
-
-static int dsa_tree_add_switch(struct dsa_switch_tree *dst,
-                              struct dsa_switch *ds)
-{
-       unsigned int index = ds->index;
-       int err;
-
-       if (dst->ds[index])
-               return -EBUSY;
-
-       dsa_tree_get(dst);
-       dst->ds[index] = ds;
-
-       err = dsa_tree_setup(dst);
-       if (err) {
-               dst->ds[index] = NULL;
-               dsa_tree_put(dst);
-       }
-
-       return err;
-}
-
 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
 {
        struct dsa_switch_tree *dst = ds->dst;
@@ -721,8 +711,6 @@ static int dsa_switch_parse_member_of(struct dsa_switch *ds,
                return sz;
 
        ds->index = m[1];
-       if (ds->index >= DSA_MAX_SWITCHES)
-               return -EINVAL;
 
        ds->dst = dsa_tree_touch(m[0]);
        if (!ds->dst)
@@ -833,22 +821,19 @@ static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
        return dsa_switch_parse_ports(ds, cd);
 }
 
-static int dsa_switch_add(struct dsa_switch *ds)
-{
-       struct dsa_switch_tree *dst = ds->dst;
-
-       return dsa_tree_add_switch(dst, ds);
-}
-
 static int dsa_switch_probe(struct dsa_switch *ds)
 {
-       struct dsa_chip_data *pdata = ds->dev->platform_data;
-       struct device_node *np = ds->dev->of_node;
+       struct dsa_switch_tree *dst;
+       struct dsa_chip_data *pdata;
+       struct device_node *np;
        int err;
 
        if (!ds->dev)
                return -ENODEV;
 
+       pdata = ds->dev->platform_data;
+       np = ds->dev->of_node;
+
        if (!ds->num_ports)
                return -EINVAL;
 
@@ -862,7 +847,13 @@ static int dsa_switch_probe(struct dsa_switch *ds)
        if (err)
                return err;
 
-       return dsa_switch_add(ds);
+       dst = ds->dst;
+       dsa_tree_get(dst);
+       err = dsa_tree_setup(dst);
+       if (err)
+               dsa_tree_put(dst);
+
+       return err;
 }
 
 int dsa_register_switch(struct dsa_switch *ds)
@@ -881,7 +872,6 @@ EXPORT_SYMBOL_GPL(dsa_register_switch);
 static void dsa_switch_remove(struct dsa_switch *ds)
 {
        struct dsa_switch_tree *dst = ds->dst;
-       unsigned int index = ds->index;
        struct dsa_port *dp, *next;
 
        list_for_each_entry_safe(dp, next, &dst->ports, list) {
@@ -889,7 +879,8 @@ static void dsa_switch_remove(struct dsa_switch *ds)
                kfree(dp);
        }
 
-       dsa_tree_remove_switch(dst, index);
+       dsa_tree_teardown(dst);
+       dsa_tree_put(dst);
 }
 
 void dsa_unregister_switch(struct dsa_switch *ds)
index a8e52c9..3255dfc 100644 (file)
@@ -310,8 +310,6 @@ static void dsa_master_reset_mtu(struct net_device *dev)
        rtnl_unlock();
 }
 
-static struct lock_class_key dsa_master_addr_list_lock_key;
-
 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 {
        int ret;
@@ -325,9 +323,6 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
        wmb();
 
        dev->dsa_ptr = cpu_dp;
-       lockdep_set_class(&dev->addr_list_lock,
-                         &dsa_master_addr_list_lock_key);
-
        ret = dsa_master_ethtool_setup(dev);
        if (ret)
                return ret;
index 75d5822..d187616 100644 (file)
@@ -789,6 +789,22 @@ static int dsa_slave_set_link_ksettings(struct net_device *dev,
        return phylink_ethtool_ksettings_set(dp->pl, cmd);
 }
 
+static void dsa_slave_get_pauseparam(struct net_device *dev,
+                                    struct ethtool_pauseparam *pause)
+{
+       struct dsa_port *dp = dsa_slave_to_port(dev);
+
+       phylink_ethtool_get_pauseparam(dp->pl, pause);
+}
+
+static int dsa_slave_set_pauseparam(struct net_device *dev,
+                                   struct ethtool_pauseparam *pause)
+{
+       struct dsa_port *dp = dsa_slave_to_port(dev);
+
+       return phylink_ethtool_set_pauseparam(dp->pl, pause);
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static int dsa_slave_netpoll_setup(struct net_device *dev,
                                   struct netpoll_info *ni)
@@ -1192,6 +1208,8 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
        .get_eee                = dsa_slave_get_eee,
        .get_link_ksettings     = dsa_slave_get_link_ksettings,
        .set_link_ksettings     = dsa_slave_set_link_ksettings,
+       .get_pauseparam         = dsa_slave_get_pauseparam,
+       .set_pauseparam         = dsa_slave_set_pauseparam,
        .get_rxnfc              = dsa_slave_get_rxnfc,
        .set_rxnfc              = dsa_slave_set_rxnfc,
        .get_ts_info            = dsa_slave_get_ts_info,
@@ -1341,15 +1359,6 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
        return ret;
 }
 
-static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
-static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
-                                           struct netdev_queue *txq,
-                                           void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock,
-                         &dsa_slave_netdev_xmit_lock_key);
-}
-
 int dsa_slave_suspend(struct net_device *slave_dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(slave_dev);
@@ -1433,9 +1442,6 @@ int dsa_slave_create(struct dsa_port *port)
        slave_dev->max_mtu = ETH_MAX_MTU;
        SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
 
-       netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
-                                NULL);
-
        SET_NETDEV_DEV(slave_dev, port->ds->dev);
        slave_dev->dev.of_node = port->dn;
        slave_dev->vlan_features = master->vlan_features;
index bf91fc5..bc5cb91 100644 (file)
  *     Must be transmitted as zero and ignored on receive.
  *
  * SWITCH_ID - VID[8:6]:
- *     Index of switch within DSA tree. Must be between 0 and
- *     DSA_MAX_SWITCHES - 1.
+ *     Index of switch within DSA tree. Must be between 0 and 7.
  *
  * RSV - VID[5:4]:
  *     To be used for further expansion of PORT or for other purposes.
  *     Must be transmitted as zero and ignored on receive.
  *
  * PORT - VID[3:0]:
- *     Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1.
+ *     Index of switch port. Must be between 0 and 15.
  */
 
 #define DSA_8021Q_DIR_SHIFT            10
index 3297e7f..c0b107c 100644 (file)
@@ -58,13 +58,6 @@ static const struct header_ops lowpan_header_ops = {
        .create = lowpan_header_create,
 };
 
-static int lowpan_dev_init(struct net_device *ldev)
-{
-       netdev_lockdep_set_classes(ldev);
-
-       return 0;
-}
-
 static int lowpan_open(struct net_device *dev)
 {
        if (!open_count)
@@ -96,7 +89,6 @@ static int lowpan_get_iflink(const struct net_device *dev)
 }
 
 static const struct net_device_ops lowpan_netdev_ops = {
-       .ndo_init               = lowpan_dev_init,
        .ndo_start_xmit         = lowpan_xmit,
        .ndo_open               = lowpan_open,
        .ndo_stop               = lowpan_stop,
index 9a0fe0c..4a8550c 100644 (file)
@@ -73,7 +73,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        reuseport_has_conns(sk, true);
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
-       inet->inet_id = jiffies;
+       inet->inet_id = prandom_u32();
 
        sk_dst_set(sk, &rt->dst);
        err = 0;
index dde77f7..71c78d2 100644 (file)
@@ -1148,7 +1148,7 @@ void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric)
        if (!(dev->flags & IFF_UP) ||
            ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) ||
            ipv4_is_zeronet(prefix) ||
-           prefix == ifa->ifa_local || ifa->ifa_prefixlen == 32)
+           (prefix == ifa->ifa_local && ifa->ifa_prefixlen == 32))
                return;
 
        /* add the new */
index 9782486..83fb001 100644 (file)
@@ -240,7 +240,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        return -1;
 
                score = sk->sk_family == PF_INET ? 2 : 1;
-               if (sk->sk_incoming_cpu == raw_smp_processor_id())
+               if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                        score++;
        }
        return score;
index 52690bb..10636fb 100644 (file)
@@ -509,9 +509,9 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
        key = &tun_info->key;
        if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
                goto err_free_skb;
-       md = ip_tunnel_info_opts(tun_info);
-       if (!md)
+       if (tun_info->options_len < sizeof(*md))
                goto err_free_skb;
+       md = ip_tunnel_info_opts(tun_info);
 
        /* ERSPAN has fixed 8 byte GRE header */
        version = md->version;
index c59a78a..24a9512 100644 (file)
@@ -611,5 +611,6 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
                list_add_tail(&skb->list, &sublist);
        }
        /* dispatch final sublist */
-       ip_sublist_rcv(&sublist, curr_dev, curr_net);
+       if (!list_empty(&sublist))
+               ip_sublist_rcv(&sublist, curr_dev, curr_net);
 }
index 814b9b8..3d8baaa 100644 (file)
@@ -645,11 +645,12 @@ void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
 EXPORT_SYMBOL(ip_fraglist_prepare);
 
 void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
-                 unsigned int ll_rs, unsigned int mtu,
+                 unsigned int ll_rs, unsigned int mtu, bool DF,
                  struct ip_frag_state *state)
 {
        struct iphdr *iph = ip_hdr(skb);
 
+       state->DF = DF;
        state->hlen = hlen;
        state->ll_rs = ll_rs;
        state->mtu = mtu;
@@ -668,9 +669,6 @@ static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
        /* Copy the flags to each fragment. */
        IPCB(to)->flags = IPCB(from)->flags;
 
-       if (IPCB(from)->flags & IPSKB_FRAG_PMTU)
-               state->iph->frag_off |= htons(IP_DF);
-
        /* ANK: dirty, but effective trick. Upgrade options only if
         * the segment to be fragmented was THE FIRST (otherwise,
         * options are already fixed) and make it ONCE
@@ -738,6 +736,8 @@ struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
         */
        iph = ip_hdr(skb2);
        iph->frag_off = htons((state->offset >> 3));
+       if (state->DF)
+               iph->frag_off |= htons(IP_DF);
 
        /*
         *      Added AC : If we are fragmenting a fragment that's not the
@@ -883,7 +883,8 @@ slow_path:
         *      Fragment the datagram.
         */
 
-       ip_frag_init(skb, hlen, ll_rs, mtu, &state);
+       ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
+                    &state);
 
        /*
         *      Keep copying data until we run out.
index 8fc1e8b..1dd2518 100644 (file)
@@ -584,7 +584,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
        }
        /* This barrier is coupled with smp_wmb() in tcp_reset() */
        smp_rmb();
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR;
 
        return mask;
@@ -1964,7 +1964,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
 
-       if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
+       if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
            (sk->sk_state == TCP_ESTABLISHED))
                sk_busy_loop(sk, nonblock);
 
index c616f0a..899e100 100644 (file)
@@ -301,7 +301,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                                                 inet->inet_daddr);
        }
 
-       inet->inet_id = tp->write_seq ^ jiffies;
+       inet->inet_id = prandom_u32();
 
        if (tcp_fastopen_defer_connect(sk, &err))
                return err;
@@ -1448,7 +1448,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
        if (inet_opt)
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
-       newinet->inet_id = newtp->write_seq ^ jiffies;
+       newinet->inet_id = prandom_u32();
 
        if (!dst) {
                dst = inet_csk_route_child_sock(sk, newsk, req);
@@ -2679,7 +2679,7 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
        net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
 
-       net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
+       net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
        net->ipv4.sysctl_tcp_sack = 1;
        net->ipv4.sysctl_tcp_window_scaling = 1;
        net->ipv4.sysctl_tcp_timestamps = 1;
index 14bc654..1d58ce8 100644 (file)
@@ -388,7 +388,7 @@ static int compute_score(struct sock *sk, struct net *net,
                return -1;
        score += 4;
 
-       if (sk->sk_incoming_cpu == raw_smp_processor_id())
+       if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                score++;
        return score;
 }
@@ -1316,6 +1316,20 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
                scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
 }
 
+static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
+{
+       /* We come here after udp_lib_checksum_complete() returned 0.
+        * This means that __skb_checksum_complete() might have
+        * set skb->csum_valid to 1.
+        * On 64bit platforms, we can set csum_unnecessary
+        * to true, but only if the skb is not shared.
+        */
+#if BITS_PER_LONG == 64
+       if (!skb_shared(skb))
+               udp_skb_scratch(skb)->csum_unnecessary = true;
+#endif
+}
+
 static int udp_skb_truesize(struct sk_buff *skb)
 {
        return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
@@ -1550,10 +1564,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk,
                        *total += skb->truesize;
                        kfree_skb(skb);
                } else {
-                       /* the csum related bits could be changed, refresh
-                        * the scratch area
-                        */
-                       udp_set_dev_scratch(skb);
+                       udp_skb_csum_unnecessary_set(skb);
                        break;
                }
        }
@@ -1577,7 +1588,7 @@ static int first_packet_length(struct sock *sk)
 
        spin_lock_bh(&rcvq->lock);
        skb = __first_packet_length(sk, rcvq, &total);
-       if (!skb && !skb_queue_empty(sk_queue)) {
+       if (!skb && !skb_queue_empty_lockless(sk_queue)) {
                spin_lock(&sk_queue->lock);
                skb_queue_splice_tail_init(sk_queue, rcvq);
                spin_unlock(&sk_queue->lock);
@@ -1650,7 +1661,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
                                return skb;
                        }
 
-                       if (skb_queue_empty(sk_queue)) {
+                       if (skb_queue_empty_lockless(sk_queue)) {
                                spin_unlock_bh(&queue->lock);
                                goto busy_check;
                        }
@@ -1676,7 +1687,7 @@ busy_check:
                                break;
 
                        sk_busy_loop(sk, flags & MSG_DONTWAIT);
-               } while (!skb_queue_empty(sk_queue));
+               } while (!skb_queue_empty_lockless(sk_queue));
 
                /* sk_queue is empty, reader_queue may contain peeked packets */
        } while (timeo &&
@@ -2712,7 +2723,7 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
        __poll_t mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
 
-       if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
+       if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Check for false positives due to checksum errors */
index 783f3c1..2fc0792 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/export.h>
 #include <net/ipv6.h>
 #include <net/ipv6_stubs.h>
+#include <net/addrconf.h>
 #include <net/ip.h>
 
 /* if ipv6 module registers this function is used by xfrm to force all
index cf60fae..fbe9d42 100644 (file)
@@ -105,7 +105,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        return -1;
 
                score = 1;
-               if (sk->sk_incoming_cpu == raw_smp_processor_id())
+               if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                        score++;
        }
        return score;
index 787d9f2..923034c 100644 (file)
@@ -980,9 +980,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                dsfield = key->tos;
                if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
                        goto tx_err;
-               md = ip_tunnel_info_opts(tun_info);
-               if (!md)
+               if (tun_info->options_len < sizeof(*md))
                        goto tx_err;
+               md = ip_tunnel_info_opts(tun_info);
 
                tun_id = tunnel_id_to_key32(key->tun_id);
                if (md->version == 1) {
index 3d71c7d..ef7f707 100644 (file)
@@ -325,7 +325,8 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
                list_add_tail(&skb->list, &sublist);
        }
        /* dispatch final sublist */
-       ip6_sublist_rcv(&sublist, curr_dev, curr_net);
+       if (!list_empty(&sublist))
+               ip6_sublist_rcv(&sublist, curr_dev, curr_net);
 }
 
 INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
index 6324d3a..9fec580 100644 (file)
@@ -135,7 +135,7 @@ static int compute_score(struct sock *sk, struct net *net,
                return -1;
        score++;
 
-       if (sk->sk_incoming_cpu == raw_smp_processor_id())
+       if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                score++;
 
        return score;
index fd5ac27..d3b520b 100644 (file)
@@ -56,7 +56,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
 {
        eth_hw_addr_random(dev);
        eth_broadcast_addr(dev->broadcast);
-       netdev_lockdep_set_classes(dev);
 
        return 0;
 }
index 4515056..f9b16f2 100644 (file)
@@ -193,21 +193,29 @@ struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *
 
        mutex_lock(&__ip_vs_app_mutex);
 
+       /* increase the module use count */
+       if (!ip_vs_use_count_inc()) {
+               err = -ENOENT;
+               goto out_unlock;
+       }
+
        list_for_each_entry(a, &ipvs->app_list, a_list) {
                if (!strcmp(app->name, a->name)) {
                        err = -EEXIST;
+                       /* decrease the module use count */
+                       ip_vs_use_count_dec();
                        goto out_unlock;
                }
        }
        a = kmemdup(app, sizeof(*app), GFP_KERNEL);
        if (!a) {
                err = -ENOMEM;
+               /* decrease the module use count */
+               ip_vs_use_count_dec();
                goto out_unlock;
        }
        INIT_LIST_HEAD(&a->incs_list);
        list_add(&a->a_list, &ipvs->app_list);
-       /* increase the module use count */
-       ip_vs_use_count_inc();
 
 out_unlock:
        mutex_unlock(&__ip_vs_app_mutex);
index 153c77b..3be7398 100644 (file)
@@ -93,7 +93,6 @@ static bool __ip_vs_addr_is_local_v6(struct net *net,
 static void update_defense_level(struct netns_ipvs *ipvs)
 {
        struct sysinfo i;
-       static int old_secure_tcp = 0;
        int availmem;
        int nomem;
        int to_change = -1;
@@ -174,35 +173,35 @@ static void update_defense_level(struct netns_ipvs *ipvs)
        spin_lock(&ipvs->securetcp_lock);
        switch (ipvs->sysctl_secure_tcp) {
        case 0:
-               if (old_secure_tcp >= 2)
+               if (ipvs->old_secure_tcp >= 2)
                        to_change = 0;
                break;
        case 1:
                if (nomem) {
-                       if (old_secure_tcp < 2)
+                       if (ipvs->old_secure_tcp < 2)
                                to_change = 1;
                        ipvs->sysctl_secure_tcp = 2;
                } else {
-                       if (old_secure_tcp >= 2)
+                       if (ipvs->old_secure_tcp >= 2)
                                to_change = 0;
                }
                break;
        case 2:
                if (nomem) {
-                       if (old_secure_tcp < 2)
+                       if (ipvs->old_secure_tcp < 2)
                                to_change = 1;
                } else {
-                       if (old_secure_tcp >= 2)
+                       if (ipvs->old_secure_tcp >= 2)
                                to_change = 0;
                        ipvs->sysctl_secure_tcp = 1;
                }
                break;
        case 3:
-               if (old_secure_tcp < 2)
+               if (ipvs->old_secure_tcp < 2)
                        to_change = 1;
                break;
        }
-       old_secure_tcp = ipvs->sysctl_secure_tcp;
+       ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
        if (to_change >= 0)
                ip_vs_protocol_timeout_change(ipvs,
                                              ipvs->sysctl_secure_tcp > 1);
@@ -1275,7 +1274,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
        struct ip_vs_service *svc = NULL;
 
        /* increase the module use count */
-       ip_vs_use_count_inc();
+       if (!ip_vs_use_count_inc())
+               return -ENOPROTOOPT;
 
        /* Lookup the scheduler by 'u->sched_name' */
        if (strcmp(u->sched_name, "none")) {
@@ -2441,9 +2441,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
        if (copy_from_user(arg, user, len) != 0)
                return -EFAULT;
 
-       /* increase the module use count */
-       ip_vs_use_count_inc();
-
        /* Handle daemons since they have another lock */
        if (cmd == IP_VS_SO_SET_STARTDAEMON ||
            cmd == IP_VS_SO_SET_STOPDAEMON) {
@@ -2456,13 +2453,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
                        ret = -EINVAL;
                        if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
                                    sizeof(cfg.mcast_ifn)) <= 0)
-                               goto out_dec;
+                               return ret;
                        cfg.syncid = dm->syncid;
                        ret = start_sync_thread(ipvs, &cfg, dm->state);
                } else {
                        ret = stop_sync_thread(ipvs, dm->state);
                }
-               goto out_dec;
+               return ret;
        }
 
        mutex_lock(&__ip_vs_mutex);
@@ -2557,10 +2554,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 
   out_unlock:
        mutex_unlock(&__ip_vs_mutex);
-  out_dec:
-       /* decrease the module use count */
-       ip_vs_use_count_dec();
-
        return ret;
 }
 
index 78b074c..c03066f 100644 (file)
@@ -5,7 +5,7 @@
  * Authors:     Raducu Deaconu <rhadoo_io@yahoo.com>
  *
  * Scheduler implements "overflow" loadbalancing according to number of active
- * connections , will keep all conections to the node with the highest weight
+ * connections , will keep all connections to the node with the highest weight
  * and overflow to the next node if the number of connections exceeds the node's
  * weight.
  * Note that this scheduler might not be suitable for UDP because it only uses
index 8e104df..166c669 100644 (file)
@@ -68,7 +68,8 @@ int register_ip_vs_pe(struct ip_vs_pe *pe)
        struct ip_vs_pe *tmp;
 
        /* increase the module use count */
-       ip_vs_use_count_inc();
+       if (!ip_vs_use_count_inc())
+               return -ENOENT;
 
        mutex_lock(&ip_vs_pe_mutex);
        /* Make sure that the pe with this name doesn't exist
index 2f9d5cd..d490372 100644 (file)
@@ -179,7 +179,8 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
        }
 
        /* increase the module use count */
-       ip_vs_use_count_inc();
+       if (!ip_vs_use_count_inc())
+               return -ENOENT;
 
        mutex_lock(&ip_vs_sched_mutex);
 
index a4a78c4..8dc892a 100644 (file)
@@ -1762,6 +1762,10 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
        IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
                  sizeof(struct ip_vs_sync_conn_v0));
 
+       /* increase the module use count */
+       if (!ip_vs_use_count_inc())
+               return -ENOPROTOOPT;
+
        /* Do not hold one mutex and then to block on another */
        for (;;) {
                rtnl_lock();
@@ -1892,9 +1896,6 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
        mutex_unlock(&ipvs->sync_mutex);
        rtnl_unlock();
 
-       /* increase the module use count */
-       ip_vs_use_count_inc();
-
        return 0;
 
 out:
@@ -1924,11 +1925,17 @@ out:
                }
                kfree(ti);
        }
+
+       /* decrease the module use count */
+       ip_vs_use_count_dec();
        return result;
 
 out_early:
        mutex_unlock(&ipvs->sync_mutex);
        rtnl_unlock();
+
+       /* decrease the module use count */
+       ip_vs_use_count_dec();
        return result;
 }
 
index 132f522..128245e 100644 (file)
@@ -202,6 +202,8 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
 {
        int err;
 
+       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+
        err = rhashtable_insert_fast(&flow_table->rhashtable,
                                     &flow->tuplehash[0].node,
                                     nf_flow_offload_rhash_params);
@@ -218,7 +220,6 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
                return err;
        }
 
-       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
        return 0;
 }
 EXPORT_SYMBOL_GPL(flow_offload_add);
index 4e0625c..93e27a6 100644 (file)
@@ -410,7 +410,7 @@ int nft_flow_rule_offload_commit(struct net *net)
 
                        policy = nft_trans_chain_policy(trans);
                        err = nft_flow_offload_chain(trans->ctx.chain, &policy,
-                                                    FLOW_BLOCK_BIND);
+                                                    FLOW_BLOCK_UNBIND);
                        break;
                case NFT_MSG_NEWRULE:
                        if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
index 22a80eb..5cb2d89 100644 (file)
@@ -161,13 +161,21 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct ethhdr, h_source):
+               if (priv->len != ETH_ALEN)
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
                                  src, ETH_ALEN, reg);
                break;
        case offsetof(struct ethhdr, h_dest):
+               if (priv->len != ETH_ALEN)
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
                                  dst, ETH_ALEN, reg);
                break;
+       default:
+               return -EOPNOTSUPP;
        }
 
        return 0;
@@ -181,14 +189,23 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct iphdr, saddr):
+               if (priv->len != sizeof(struct in_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
                                  sizeof(struct in_addr), reg);
                break;
        case offsetof(struct iphdr, daddr):
+               if (priv->len != sizeof(struct in_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
                                  sizeof(struct in_addr), reg);
                break;
        case offsetof(struct iphdr, protocol):
+               if (priv->len != sizeof(__u8))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
                                  sizeof(__u8), reg);
                nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
@@ -208,14 +225,23 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct ipv6hdr, saddr):
+               if (priv->len != sizeof(struct in6_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
                                  sizeof(struct in6_addr), reg);
                break;
        case offsetof(struct ipv6hdr, daddr):
+               if (priv->len != sizeof(struct in6_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
                                  sizeof(struct in6_addr), reg);
                break;
        case offsetof(struct ipv6hdr, nexthdr):
+               if (priv->len != sizeof(__u8))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
                                  sizeof(__u8), reg);
                nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
@@ -255,10 +281,16 @@ static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct tcphdr, source):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
                                  sizeof(__be16), reg);
                break;
        case offsetof(struct tcphdr, dest):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
                                  sizeof(__be16), reg);
                break;
@@ -277,10 +309,16 @@ static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct udphdr, source):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
                                  sizeof(__be16), reg);
                break;
        case offsetof(struct udphdr, dest):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
                                  sizeof(__be16), reg);
                break;
index c4f54ad..58d5373 100644 (file)
@@ -63,28 +63,6 @@ static DEFINE_SPINLOCK(nr_list_lock);
 
 static const struct proto_ops nr_proto_ops;
 
-/*
- * NETROM network devices are virtual network devices encapsulating NETROM
- * frames into AX.25 which will be sent through an AX.25 device, so form a
- * special "super class" of normal net devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key nr_netdev_xmit_lock_key;
-static struct lock_class_key nr_netdev_addr_lock_key;
-
-static void nr_set_lockdep_one(struct net_device *dev,
-                              struct netdev_queue *txq,
-                              void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
-}
-
-static void nr_set_lockdep_key(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
-}
-
 /*
  *     Socket removal during an interrupt is now safe.
  */
@@ -1414,7 +1392,6 @@ static int __init nr_proto_init(void)
                        free_netdev(dev);
                        goto fail;
                }
-               nr_set_lockdep_key(dev);
                dev_nr[i] = dev;
        }
 
index ccdd790..2860441 100644 (file)
@@ -554,11 +554,11 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_state == LLCP_LISTEN)
                return llcp_accept_poll(sk);
 
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_state == LLCP_CLOSED)
index f30e406..d8c364d 100644 (file)
@@ -1881,7 +1881,7 @@ static struct genl_family dp_datapath_genl_family __ro_after_init = {
 /* Called with ovs_mutex or RCU read lock. */
 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
                                   struct net *net, u32 portid, u32 seq,
-                                  u32 flags, u8 cmd)
+                                  u32 flags, u8 cmd, gfp_t gfp)
 {
        struct ovs_header *ovs_header;
        struct ovs_vport_stats vport_stats;
@@ -1902,7 +1902,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
                goto nla_put_failure;
 
        if (!net_eq(net, dev_net(vport->dev))) {
-               int id = peernet2id_alloc(net, dev_net(vport->dev));
+               int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
 
                if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
                        goto nla_put_failure;
@@ -1943,11 +1943,12 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
        struct sk_buff *skb;
        int retval;
 
-       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd);
+       retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
+                                        GFP_KERNEL);
        BUG_ON(retval < 0);
 
        return skb;
@@ -2089,7 +2090,7 @@ restart:
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_NEW);
+                                     OVS_VPORT_CMD_NEW, GFP_KERNEL);
 
        new_headroom = netdev_get_fwd_headroom(vport->dev);
 
@@ -2150,7 +2151,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_SET);
+                                     OVS_VPORT_CMD_SET, GFP_KERNEL);
        BUG_ON(err < 0);
 
        ovs_unlock();
@@ -2190,7 +2191,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_DEL);
+                                     OVS_VPORT_CMD_DEL, GFP_KERNEL);
        BUG_ON(err < 0);
 
        /* the vport deletion may trigger dp headroom update */
@@ -2237,7 +2238,7 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto exit_unlock_free;
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_GET);
+                                     OVS_VPORT_CMD_GET, GFP_ATOMIC);
        BUG_ON(err < 0);
        rcu_read_unlock();
 
@@ -2273,7 +2274,8 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                                    NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    NLM_F_MULTI,
-                                                   OVS_VPORT_CMD_GET) < 0)
+                                                   OVS_VPORT_CMD_GET,
+                                                   GFP_ATOMIC) < 0)
                                goto out;
 
                        j++;
index 21c90d3..58a7b83 100644 (file)
@@ -137,7 +137,7 @@ static void do_setup(struct net_device *netdev)
        netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
                              IFF_NO_QUEUE;
        netdev->needs_free_netdev = true;
-       netdev->priv_destructor = internal_dev_destructor;
+       netdev->priv_destructor = NULL;
        netdev->ethtool_ops = &internal_dev_ethtool_ops;
        netdev->rtnl_link_ops = &internal_dev_link_ops;
 
@@ -159,7 +159,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
        struct internal_dev *internal_dev;
        struct net_device *dev;
        int err;
-       bool free_vport = true;
 
        vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
        if (IS_ERR(vport)) {
@@ -190,10 +189,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
 
        rtnl_lock();
        err = register_netdevice(vport->dev);
-       if (err) {
-               free_vport = false;
+       if (err)
                goto error_unlock;
-       }
+       vport->dev->priv_destructor = internal_dev_destructor;
 
        dev_set_promiscuity(vport->dev, 1);
        rtnl_unlock();
@@ -207,8 +205,7 @@ error_unlock:
 error_free_netdev:
        free_netdev(dev);
 error_free_vport:
-       if (free_vport)
-               ovs_vport_free(vport);
+       ovs_vport_free(vport);
 error:
        return ERR_PTR(err);
 }
index 96ea9f2..76d499f 100644 (file)
@@ -338,9 +338,9 @@ static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
 
        if (sk->sk_state == TCP_CLOSE)
                return EPOLLERR;
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
-       if (!skb_queue_empty(&pn->ctrlreq_queue))
+       if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
                mask |= EPOLLPRI;
        if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
                return EPOLLHUP;
index e35869e..15ce9b6 100644 (file)
@@ -111,15 +111,11 @@ static __poll_t qrtr_tun_poll(struct file *filp, poll_table *wait)
 static int qrtr_tun_release(struct inode *inode, struct file *filp)
 {
        struct qrtr_tun *tun = filp->private_data;
-       struct sk_buff *skb;
 
        qrtr_endpoint_unregister(&tun->ep);
 
        /* Discard all SKBs */
-       while (!skb_queue_empty(&tun->queue)) {
-               skb = skb_dequeue(&tun->queue);
-               kfree_skb(skb);
-       }
+       skb_queue_purge(&tun->queue);
 
        kfree(tun);
 
index f0e9ccf..6a0df7c 100644 (file)
@@ -64,28 +64,6 @@ static const struct proto_ops rose_proto_ops;
 
 ax25_address rose_callsign;
 
-/*
- * ROSE network devices are virtual network devices encapsulating ROSE
- * frames into AX.25 which will be sent through an AX.25 device, so form a
- * special "super class" of normal net devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key rose_netdev_xmit_lock_key;
-static struct lock_class_key rose_netdev_addr_lock_key;
-
-static void rose_set_lockdep_one(struct net_device *dev,
-                                struct netdev_queue *txq,
-                                void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
-}
-
-static void rose_set_lockdep_key(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
-}
-
 /*
  *     Convert a ROSE address into text.
  */
@@ -1533,7 +1511,6 @@ static int __init rose_proto_init(void)
                        free_netdev(dev);
                        goto fail;
                }
-               rose_set_lockdep_key(dev);
                dev_rose[i] = dev;
        }
 
index ecc17da..7c7d10f 100644 (file)
@@ -601,6 +601,7 @@ struct rxrpc_call {
        int                     debug_id;       /* debug ID for printks */
        unsigned short          rx_pkt_offset;  /* Current recvmsg packet offset */
        unsigned short          rx_pkt_len;     /* Current recvmsg packet len */
+       bool                    rx_pkt_last;    /* Current recvmsg packet is last */
 
        /* Rx/Tx circular buffer, depending on phase.
         *
index a409079..8578c39 100644 (file)
@@ -267,11 +267,13 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
  */
 static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
                             u8 *_annotation,
-                            unsigned int *_offset, unsigned int *_len)
+                            unsigned int *_offset, unsigned int *_len,
+                            bool *_last)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        unsigned int offset = sizeof(struct rxrpc_wire_header);
        unsigned int len;
+       bool last = false;
        int ret;
        u8 annotation = *_annotation;
        u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
@@ -281,6 +283,8 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
        len = skb->len - offset;
        if (subpacket < sp->nr_subpackets - 1)
                len = RXRPC_JUMBO_DATALEN;
+       else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
+               last = true;
 
        if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
                ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
@@ -291,6 +295,7 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
 
        *_offset = offset;
        *_len = len;
+       *_last = last;
        call->security->locate_data(call, skb, _offset, _len);
        return 0;
 }
@@ -309,7 +314,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
        rxrpc_serial_t serial;
        rxrpc_seq_t hard_ack, top, seq;
        size_t remain;
-       bool last;
+       bool rx_pkt_last;
        unsigned int rx_pkt_offset, rx_pkt_len;
        int ix, copy, ret = -EAGAIN, ret2;
 
@@ -319,6 +324,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
 
        rx_pkt_offset = call->rx_pkt_offset;
        rx_pkt_len = call->rx_pkt_len;
+       rx_pkt_last = call->rx_pkt_last;
 
        if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
                seq = call->rx_hard_ack;
@@ -329,6 +335,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
        /* Barriers against rxrpc_input_data(). */
        hard_ack = call->rx_hard_ack;
        seq = hard_ack + 1;
+
        while (top = smp_load_acquire(&call->rx_top),
               before_eq(seq, top)
               ) {
@@ -356,7 +363,8 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                if (rx_pkt_offset == 0) {
                        ret2 = rxrpc_locate_data(call, skb,
                                                 &call->rxtx_annotations[ix],
-                                                &rx_pkt_offset, &rx_pkt_len);
+                                                &rx_pkt_offset, &rx_pkt_len,
+                                                &rx_pkt_last);
                        trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
                                            rx_pkt_offset, rx_pkt_len, ret2);
                        if (ret2 < 0) {
@@ -396,13 +404,12 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                }
 
                /* The whole packet has been transferred. */
-               last = sp->hdr.flags & RXRPC_LAST_PACKET;
                if (!(flags & MSG_PEEK))
                        rxrpc_rotate_rx_window(call);
                rx_pkt_offset = 0;
                rx_pkt_len = 0;
 
-               if (last) {
+               if (rx_pkt_last) {
                        ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
                        ret = 1;
                        goto out;
@@ -415,6 +422,7 @@ out:
        if (!(flags & MSG_PEEK)) {
                call->rx_pkt_offset = rx_pkt_offset;
                call->rx_pkt_len = rx_pkt_len;
+               call->rx_pkt_last = rx_pkt_last;
        }
 done:
        trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
index 69d4676..6284c55 100644 (file)
@@ -399,7 +399,7 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
 
 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                   struct tc_action **a, const struct tc_action_ops *ops,
-                  int bind, bool cpustats)
+                  int bind, bool cpustats, u32 flags)
 {
        struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
@@ -427,6 +427,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
        p->tcfa_tm.install = jiffies;
        p->tcfa_tm.lastuse = jiffies;
        p->tcfa_tm.firstuse = 0;
+       p->tcfa_flags = flags;
        if (est) {
                err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
                                        &p->tcfa_rate_est,
@@ -451,6 +452,17 @@ err1:
 }
 EXPORT_SYMBOL(tcf_idr_create);
 
+int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
+                             struct nlattr *est, struct tc_action **a,
+                             const struct tc_action_ops *ops, int bind,
+                             u32 flags)
+{
+       /* Set cpustats according to actions flags. */
+       return tcf_idr_create(tn, index, est, a, ops, bind,
+                             !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
+}
+EXPORT_SYMBOL(tcf_idr_create_from_flags);
+
 void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
 {
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
@@ -773,6 +785,14 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        }
        rcu_read_unlock();
 
+       if (a->tcfa_flags) {
+               struct nla_bitfield32 flags = { a->tcfa_flags,
+                                               a->tcfa_flags, };
+
+               if (nla_put(skb, TCA_ACT_FLAGS, sizeof(flags), &flags))
+                       goto nla_put_failure;
+       }
+
        nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
@@ -831,12 +851,15 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
        return c;
 }
 
+static const u32 tca_act_flags_allowed = TCA_ACT_FLAGS_NO_PERCPU_STATS;
 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
        [TCA_ACT_KIND]          = { .type = NLA_STRING },
        [TCA_ACT_INDEX]         = { .type = NLA_U32 },
        [TCA_ACT_COOKIE]        = { .type = NLA_BINARY,
                                    .len = TC_COOKIE_MAX_SIZE },
        [TCA_ACT_OPTIONS]       = { .type = NLA_NESTED },
+       [TCA_ACT_FLAGS]         = { .type = NLA_BITFIELD32,
+                                   .validation_data = &tca_act_flags_allowed },
 };
 
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
@@ -845,6 +868,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    bool rtnl_held,
                                    struct netlink_ext_ack *extack)
 {
+       struct nla_bitfield32 flags = { 0, 0 };
        struct tc_action *a;
        struct tc_action_ops *a_o;
        struct tc_cookie *cookie = NULL;
@@ -876,6 +900,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                goto err_out;
                        }
                }
+               if (tb[TCA_ACT_FLAGS])
+                       flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
        } else {
                if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) {
                        NL_SET_ERR_MSG(extack, "TC action name too long");
@@ -914,10 +940,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        /* backward compatibility for policer */
        if (name == NULL)
                err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
-                               rtnl_held, tp, extack);
+                               rtnl_held, tp, flags.value, extack);
        else
                err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
-                               tp, extack);
+                               tp, flags.value, extack);
        if (err < 0)
                goto err_mod;
 
@@ -989,6 +1015,29 @@ err:
        return err;
 }
 
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
+                            bool drop, bool hw)
+{
+       if (a->cpu_bstats) {
+               _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+
+               if (drop)
+                       this_cpu_ptr(a->cpu_qstats)->drops += packets;
+
+               if (hw)
+                       _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
+                                          bytes, packets);
+               return;
+       }
+
+       _bstats_update(&a->tcfa_bstats, bytes, packets);
+       if (drop)
+               a->tcfa_qstats.drops += packets;
+       if (hw)
+               _bstats_update(&a->tcfa_bstats_hw, bytes, packets);
+}
+EXPORT_SYMBOL(tcf_action_update_stats);
+
 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
                          int compat_mode)
 {
index 04b7bd4..46f47e5 100644 (file)
@@ -275,7 +275,8 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
 static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **act,
                        int replace, int bind, bool rtnl_held,
-                       struct tcf_proto *tp, struct netlink_ext_ack *extack)
+                       struct tcf_proto *tp, u32 flags,
+                       struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, bpf_net_id);
        struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
@@ -303,7 +304,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        ret = tcf_idr_check_alloc(tn, &index, act, bind);
        if (!ret) {
                ret = tcf_idr_create(tn, index, est, act,
-                                    &act_bpf_ops, bind, true);
+                                    &act_bpf_ops, bind, true, 0);
                if (ret < 0) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
index 2b43cac..43a2430 100644 (file)
@@ -94,7 +94,7 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
 static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                             struct nlattr *est, struct tc_action **a,
                             int ovr, int bind, bool rtnl_held,
-                            struct tcf_proto *tp,
+                            struct tcf_proto *tp, u32 flags,
                             struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, connmark_net_id);
@@ -121,7 +121,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
        ret = tcf_idr_check_alloc(tn, &index, a, bind);
        if (!ret) {
                ret = tcf_idr_create(tn, index, est, a,
-                                    &act_connmark_ops, bind, false);
+                                    &act_connmark_ops, bind, false, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
index d3cfad8..16e67e1 100644 (file)
@@ -43,7 +43,7 @@ static struct tc_action_ops act_csum_ops;
 static int tcf_csum_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a, int ovr,
                         int bind, bool rtnl_held, struct tcf_proto *tp,
-                        struct netlink_ext_ack *extack)
+                        u32 flags, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, csum_net_id);
        struct tcf_csum_params *params_new;
@@ -68,8 +68,8 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
        index = parm->index;
        err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (!err) {
-               ret = tcf_idr_create(tn, index, est, a,
-                                    &act_csum_ops, bind, true);
+               ret = tcf_idr_create_from_flags(tn, index, est, a,
+                                               &act_csum_ops, bind, flags);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
@@ -580,7 +580,7 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
        params = rcu_dereference_bh(p->params);
 
        tcf_lastuse_update(&p->tcf_tm);
-       bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
+       tcf_action_update_bstats(&p->common, skb);
 
        action = READ_ONCE(p->tcf_action);
        if (unlikely(action == TC_ACT_SHOT))
@@ -624,7 +624,7 @@ out:
        return action;
 
 drop:
-       qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
+       tcf_action_inc_drop_qstats(&p->common);
        action = TC_ACT_SHOT;
        goto out;
 }
index fcc4602..68d6af5 100644 (file)
@@ -465,11 +465,11 @@ out_push:
        skb_push_rcsum(skb, nh_ofs);
 
 out:
-       bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+       tcf_action_update_bstats(&c->common, skb);
        return retval;
 
 drop:
-       qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
+       tcf_action_inc_drop_qstats(&c->common);
        return TC_ACT_SHOT;
 }
 
@@ -656,7 +656,7 @@ static int tcf_ct_fill_params(struct net *net,
 static int tcf_ct_init(struct net *net, struct nlattr *nla,
                       struct nlattr *est, struct tc_action **a,
                       int replace, int bind, bool rtnl_held,
-                      struct tcf_proto *tp,
+                      struct tcf_proto *tp, u32 flags,
                       struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, ct_net_id);
@@ -688,8 +688,8 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
                return err;
 
        if (!err) {
-               err = tcf_idr_create(tn, index, est, a,
-                                    &act_ct_ops, bind, true);
+               err = tcf_idr_create_from_flags(tn, index, est, a,
+                                               &act_ct_ops, bind, flags);
                if (err) {
                        tcf_idr_cleanup(tn, index);
                        return err;
@@ -905,11 +905,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
 {
        struct tcf_ct *c = to_ct(a);
 
-       _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
-
-       if (hw)
-               _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
-                                  bytes, packets);
+       tcf_action_update_stats(a, bytes, packets, false, hw);
        c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
 }
 
index 0dbcfd1..b1e6010 100644 (file)
@@ -153,7 +153,7 @@ static const struct nla_policy ctinfo_policy[TCA_CTINFO_MAX + 1] = {
 static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
-                          struct tcf_proto *tp,
+                          struct tcf_proto *tp, u32 flags,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
@@ -210,7 +210,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
        err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (!err) {
                ret = tcf_idr_create(tn, index, est, a,
-                                    &act_ctinfo_ops, bind, false);
+                                    &act_ctinfo_ops, bind, false, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
index 324f1d1..4160657 100644 (file)
@@ -53,7 +53,8 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
 static int tcf_gact_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, u32 flags,
+                        struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, gact_net_id);
        struct nlattr *tb[TCA_GACT_MAX + 1];
@@ -98,8 +99,8 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
 
        err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (!err) {
-               ret = tcf_idr_create(tn, index, est, a,
-                                    &act_gact_ops, bind, true);
+               ret = tcf_idr_create_from_flags(tn, index, est, a,
+                                               &act_gact_ops, bind, flags);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
@@ -161,9 +162,9 @@ static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
                action = gact_rand[ptype](gact);
        }
 #endif
-       bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
+       tcf_action_update_bstats(&gact->common, skb);
        if (action == TC_ACT_SHOT)
-               qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
+               tcf_action_inc_drop_qstats(&gact->common);
 
        tcf_lastuse_update(&gact->tcf_tm);
 
@@ -177,15 +178,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
        int action = READ_ONCE(gact->tcf_action);
        struct tcf_t *tm = &gact->tcf_tm;
 
-       _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes,
-                          packets);
-       if (action == TC_ACT_SHOT)
-               this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
-
-       if (hw)
-               _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats_hw),
-                                  bytes, packets);
-
+       tcf_action_update_stats(a, bytes, packets, action == TC_ACT_SHOT, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index 3a31e24..d562c88 100644 (file)
@@ -465,7 +465,8 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
 static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a,
                        int ovr, int bind, bool rtnl_held,
-                       struct tcf_proto *tp, struct netlink_ext_ack *extack)
+                       struct tcf_proto *tp, u32 flags,
+                       struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, ife_net_id);
        struct nlattr *tb[TCA_IFE_MAX + 1];
@@ -522,7 +523,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
 
        if (!exists) {
                ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
-                                    bind, true);
+                                    bind, true, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        kfree(p);
index 214a03d..400a2cf 100644 (file)
@@ -95,7 +95,7 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
                          const struct tc_action_ops *ops, int ovr, int bind,
-                         struct tcf_proto *tp)
+                         struct tcf_proto *tp, u32 flags)
 {
        struct tc_action_net *tn = net_generic(net, id);
        struct nlattr *tb[TCA_IPT_MAX + 1];
@@ -144,7 +144,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
 
        if (!exists) {
                ret = tcf_idr_create(tn, index, est, a, ops, bind,
-                                    false);
+                                    false, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
@@ -205,19 +205,19 @@ err1:
 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a, int ovr,
                        int bind, bool rtnl_held, struct tcf_proto *tp,
-                       struct netlink_ext_ack *extack)
+                       u32 flags, struct netlink_ext_ack *extack)
 {
        return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
-                             bind, tp);
+                             bind, tp, flags);
 }
 
 static int tcf_xt_init(struct net *net, struct nlattr *nla,
                       struct nlattr *est, struct tc_action **a, int ovr,
                       int bind, bool unlocked, struct tcf_proto *tp,
-                      struct netlink_ext_ack *extack)
+                      u32 flags, struct netlink_ext_ack *extack)
 {
        return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
-                             bind, tp);
+                             bind, tp, flags);
 }
 
 static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
index 08923b2..b6e1b5b 100644 (file)
@@ -93,7 +93,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
                           struct tcf_proto *tp,
-                          struct netlink_ext_ack *extack)
+                          u32 flags, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, mirred_net_id);
        struct nlattr *tb[TCA_MIRRED_MAX + 1];
@@ -148,8 +148,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                        NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
                        return -EINVAL;
                }
-               ret = tcf_idr_create(tn, index, est, a,
-                                    &act_mirred_ops, bind, true);
+               ret = tcf_idr_create_from_flags(tn, index, est, a,
+                                               &act_mirred_ops, bind, flags);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
@@ -231,7 +231,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
        }
 
        tcf_lastuse_update(&m->tcf_tm);
-       bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
+       tcf_action_update_bstats(&m->common, skb);
 
        m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
        m_eaction = READ_ONCE(m->tcfm_eaction);
@@ -289,8 +289,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
                /* let's the caller reinsert the packet, if possible */
                if (use_reinsert) {
                        res->ingress = want_ingress;
-                       res->qstats = this_cpu_ptr(m->common.cpu_qstats);
-                       skb_tc_reinsert(skb, res);
+                       if (skb_tc_reinsert(skb, res))
+                               tcf_action_inc_overlimit_qstats(&m->common);
                        __this_cpu_dec(mirred_rec_level);
                        return TC_ACT_CONSUMED;
                }
@@ -303,7 +303,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
 
        if (err) {
 out:
-               qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
+               tcf_action_inc_overlimit_qstats(&m->common);
                if (tcf_mirred_is_act_redirect(m_eaction))
                        retval = TC_ACT_SHOT;
        }
@@ -318,10 +318,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
        struct tcf_mirred *m = to_mirred(a);
        struct tcf_t *tm = &m->tcf_tm;
 
-       _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
-       if (hw)
-               _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
-                                  bytes, packets);
+       tcf_action_update_stats(a, bytes, packets, false, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index 4cf6c55..4d8c822 100644 (file)
@@ -131,7 +131,8 @@ static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
 static int tcf_mpls_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, u32 flags,
+                        struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, mpls_net_id);
        struct nlattr *tb[TCA_MPLS_MAX + 1];
@@ -224,7 +225,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
 
        if (!exists) {
                ret = tcf_idr_create(tn, index, est, a,
-                                    &act_mpls_ops, bind, true);
+                                    &act_mpls_ops, bind, true, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
index ea4c535..88a1b79 100644 (file)
@@ -36,7 +36,7 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
 static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                        struct tc_action **a, int ovr, int bind,
                        bool rtnl_held, struct tcf_proto *tp,
-                       struct netlink_ext_ack *extack)
+                       u32 flags, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, nat_net_id);
        struct nlattr *tb[TCA_NAT_MAX + 1];
@@ -61,7 +61,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (!err) {
                ret = tcf_idr_create(tn, index, est, a,
-                                    &act_nat_ops, bind, false);
+                                    &act_nat_ops, bind, false, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
index cdfaa79..d5eff6a 100644 (file)
@@ -137,7 +137,8 @@ nla_failure:
 static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
                          int ovr, int bind, bool rtnl_held,
-                         struct tcf_proto *tp, struct netlink_ext_ack *extack)
+                         struct tcf_proto *tp, u32 flags,
+                         struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, pedit_net_id);
        struct nlattr *tb[TCA_PEDIT_MAX + 1];
@@ -190,7 +191,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                        goto out_free;
                }
                ret = tcf_idr_create(tn, index, est, a,
-                                    &act_pedit_ops, bind, false);
+                                    &act_pedit_ops, bind, false, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        goto out_free;
index 981a9ec..d962715 100644 (file)
@@ -47,7 +47,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
 static int tcf_police_init(struct net *net, struct nlattr *nla,
                               struct nlattr *est, struct tc_action **a,
                               int ovr, int bind, bool rtnl_held,
-                              struct tcf_proto *tp,
+                              struct tcf_proto *tp, u32 flags,
                               struct netlink_ext_ack *extack)
 {
        int ret = 0, tcfp_result = TC_ACT_OK, err, size;
@@ -87,7 +87,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
 
        if (!exists) {
                ret = tcf_idr_create(tn, index, NULL, a,
-                                    &act_police_ops, bind, true);
+                                    &act_police_ops, bind, true, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
@@ -294,10 +294,7 @@ static void tcf_police_stats_update(struct tc_action *a,
        struct tcf_police *police = to_police(a);
        struct tcf_t *tm = &police->tcf_tm;
 
-       _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
-       if (hw)
-               _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
-                                  bytes, packets);
+       tcf_action_update_stats(a, bytes, packets, false, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index 514456a..29b23bf 100644 (file)
@@ -36,7 +36,7 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
 static int tcf_sample_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a, int ovr,
                           int bind, bool rtnl_held, struct tcf_proto *tp,
-                          struct netlink_ext_ack *extack)
+                          u32 flags, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, sample_net_id);
        struct nlattr *tb[TCA_SAMPLE_MAX + 1];
@@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 
        if (!exists) {
                ret = tcf_idr_create(tn, index, est, a,
-                                    &act_sample_ops, bind, true);
+                                    &act_sample_ops, bind, true, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
index 6120e56..97639b2 100644 (file)
@@ -86,7 +86,8 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
 static int tcf_simp_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, u32 flags,
+                        struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, simp_net_id);
        struct nlattr *tb[TCA_DEF_MAX + 1];
@@ -127,7 +128,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
 
        if (!exists) {
                ret = tcf_idr_create(tn, index, est, a,
-                                    &act_simp_ops, bind, false);
+                                    &act_simp_ops, bind, false, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
index 6a8d333..5f7ca7f 100644 (file)
@@ -86,7 +86,7 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
 static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                            struct nlattr *est, struct tc_action **a,
                            int ovr, int bind, bool rtnl_held,
-                           struct tcf_proto *tp,
+                           struct tcf_proto *tp, u32 act_flags,
                            struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
@@ -165,7 +165,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
 
        if (!exists) {
                ret = tcf_idr_create(tn, index, est, a,
-                                    &act_skbedit_ops, bind, true);
+                                    &act_skbedit_ops, bind, true, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
index 888437f..39e6d94 100644 (file)
@@ -79,7 +79,7 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
 static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
-                          struct tcf_proto *tp,
+                          struct tcf_proto *tp, u32 flags,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, skbmod_net_id);
@@ -143,7 +143,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
 
        if (!exists) {
                ret = tcf_idr_create(tn, index, est, a,
-                                    &act_skbmod_ops, bind, true);
+                                    &act_skbmod_ops, bind, true, 0);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
index 2f83a79..cb34e5d 100644 (file)
@@ -31,7 +31,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
        params = rcu_dereference_bh(t->params);
 
        tcf_lastuse_update(&t->tcf_tm);
-       bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
+       tcf_action_update_bstats(&t->common, skb);
        action = READ_ONCE(t->tcf_action);
 
        switch (params->tcft_action) {
@@ -208,7 +208,7 @@ static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
 static int tunnel_key_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
-                          struct tcf_proto *tp,
+                          struct tcf_proto *tp, u32 act_flags,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
@@ -347,8 +347,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
        }
 
        if (!exists) {
-               ret = tcf_idr_create(tn, index, est, a,
-                                    &act_tunnel_key_ops, bind, true);
+               ret = tcf_idr_create_from_flags(tn, index, est, a,
+                                               &act_tunnel_key_ops, bind,
+                                               act_flags);
                if (ret) {
                        NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
                        goto release_tun_meta;
index 08aaf71..b6939ab 100644 (file)
@@ -29,7 +29,7 @@ static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
        u16 tci;
 
        tcf_lastuse_update(&v->tcf_tm);
-       bstats_cpu_update(this_cpu_ptr(v->common.cpu_bstats), skb);
+       tcf_action_update_bstats(&v->common, skb);
 
        /* Ensure 'data' points at mac_header prior calling vlan manipulating
         * functions.
@@ -88,7 +88,7 @@ out:
        return action;
 
 drop:
-       qstats_drop_inc(this_cpu_ptr(v->common.cpu_qstats));
+       tcf_action_inc_drop_qstats(&v->common);
        return TC_ACT_SHOT;
 }
 
@@ -102,7 +102,8 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
 static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, u32 flags,
+                        struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, vlan_net_id);
        struct nlattr *tb[TCA_VLAN_MAX + 1];
@@ -188,8 +189,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        action = parm->v_action;
 
        if (!exists) {
-               ret = tcf_idr_create(tn, index, est, a,
-                                    &act_vlan_ops, bind, true);
+               ret = tcf_idr_create_from_flags(tn, index, est, a,
+                                               &act_vlan_ops, bind, flags);
                if (ret) {
                        tcf_idr_cleanup(tn, index);
                        return ret;
@@ -307,10 +308,7 @@ static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u32 packets,
        struct tcf_vlan *v = to_vlan(a);
        struct tcf_t *tm = &v->tcf_tm;
 
-       _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
-       if (hw)
-               _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
-                                  bytes, packets);
+       tcf_action_update_stats(a, bytes, packets, false, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index bf10bda..8229ed4 100644 (file)
@@ -162,16 +162,20 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
        cls_bpf.name = obj->bpf_name;
        cls_bpf.exts_integrated = obj->exts_integrated;
 
-       if (oldprog)
+       if (oldprog && prog)
                err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
                                          skip_sw, &oldprog->gen_flags,
                                          &oldprog->in_hw_count,
                                          &prog->gen_flags, &prog->in_hw_count,
                                          true);
-       else
+       else if (prog)
                err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
                                      skip_sw, &prog->gen_flags,
                                      &prog->in_hw_count, true);
+       else
+               err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
+                                         skip_sw, &oldprog->gen_flags,
+                                         &oldprog->in_hw_count, true);
 
        if (prog && err) {
                cls_bpf_offload_cmd(tp, oldprog, prog, extack);
index 4c5dfcb..8561e82 100644 (file)
@@ -794,9 +794,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 };
 EXPORT_SYMBOL(pfifo_fast_ops);
 
-static struct lock_class_key qdisc_tx_busylock;
-static struct lock_class_key qdisc_running_key;
-
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          const struct Qdisc_ops *ops,
                          struct netlink_ext_ack *extack)
@@ -849,17 +846,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        }
 
        spin_lock_init(&sch->busylock);
-       lockdep_set_class(&sch->busylock,
-                         dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
-
        /* seqlock has the same scope of busylock, for NOLOCK qdisc */
        spin_lock_init(&sch->seqlock);
-       lockdep_set_class(&sch->busylock,
-                         dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
-
        seqcount_init(&sch->running);
-       lockdep_set_class(&sch->running,
-                         dev->qdisc_running_key ?: &qdisc_running_key);
 
        sch->ops = ops;
        sch->flags = ops->static_flags;
@@ -870,6 +859,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        dev_hold(dev);
        refcount_set(&sch->refcnt, 1);
 
+       if (sch != &noop_qdisc) {
+               lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key);
+               lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key);
+               lockdep_set_class(&sch->running, &dev->qdisc_running_key);
+       }
+
        return sch;
 errout1:
        kfree(p);
index 23cd1c8..be35f03 100644 (file)
@@ -5,11 +5,11 @@
  * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
  */
 
-#include <linux/jhash.h>
 #include <linux/jiffies.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
+#include <linux/siphash.h>
 #include <net/pkt_sched.h>
 #include <net/sock.h>
 
@@ -126,7 +126,7 @@ struct wdrr_bucket {
 
 struct hhf_sched_data {
        struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
-       u32                perturbation;   /* hash perturbation */
+       siphash_key_t      perturbation;   /* hash perturbation */
        u32                quantum;        /* psched_mtu(qdisc_dev(sch)); */
        u32                drop_overlimit; /* number of times max qdisc packet
                                            * limit was hit
@@ -264,7 +264,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
        }
 
        /* Get hashed flow-id of the skb. */
-       hash = skb_get_hash_perturb(skb, q->perturbation);
+       hash = skb_get_hash_perturb(skb, &q->perturbation);
 
        /* Check if this packet belongs to an already established HH flow. */
        flow_pos = hash & HHF_BIT_MASK;
@@ -582,7 +582,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
 
        sch->limit = 1000;
        q->quantum = psched_mtu(qdisc_dev(sch));
-       q->perturbation = prandom_u32();
+       get_random_bytes(&q->perturbation, sizeof(q->perturbation));
        INIT_LIST_HEAD(&q->new_buckets);
        INIT_LIST_HEAD(&q->old_buckets);
 
index d448fe3..4074c50 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 #include <linux/random.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <net/ip.h>
 #include <net/pkt_sched.h>
 #include <net/pkt_cls.h>
@@ -45,7 +45,7 @@ struct sfb_bucket {
  * (Section 4.4 of SFB reference : moving hash functions)
  */
 struct sfb_bins {
-       u32               perturbation; /* jhash perturbation */
+       siphash_key_t     perturbation; /* siphash key */
        struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
 };
 
@@ -217,7 +217,8 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_da
 
 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
 {
-       q->bins[slot].perturbation = prandom_u32();
+       get_random_bytes(&q->bins[slot].perturbation,
+                        sizeof(q->bins[slot].perturbation));
 }
 
 static void sfb_swap_slot(struct sfb_sched_data *q)
@@ -314,9 +315,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                /* If using external classifiers, get result and record it. */
                if (!sfb_classify(skb, fl, &ret, &salt))
                        goto other_drop;
-               sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+               sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
        } else {
-               sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
+               sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
        }
 
 
@@ -352,7 +353,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                /* Inelastic flow */
                if (q->double_buffering) {
                        sfbhash = skb_get_hash_perturb(skb,
-                           q->bins[slot].perturbation);
+                           &q->bins[slot].perturbation);
                        if (!sfbhash)
                                sfbhash = 1;
                        sfb_skb_cb(skb)->hashes[slot] = sfbhash;
index 68404a9..c787d4d 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/skbuff.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
@@ -117,7 +117,7 @@ struct sfq_sched_data {
        u8              headdrop;
        u8              maxdepth;       /* limit of packets per flow */
 
-       u32             perturbation;
+       siphash_key_t   perturbation;
        u8              cur_depth;      /* depth of longest slot */
        u8              flags;
        unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
@@ -157,7 +157,7 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
 static unsigned int sfq_hash(const struct sfq_sched_data *q,
                             const struct sk_buff *skb)
 {
-       return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
+       return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
 }
 
 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -607,9 +607,11 @@ static void sfq_perturbation(struct timer_list *t)
        struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
        struct Qdisc *sch = q->sch;
        spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+       siphash_key_t nkey;
 
+       get_random_bytes(&nkey, sizeof(nkey));
        spin_lock(root_lock);
-       q->perturbation = prandom_u32();
+       q->perturbation = nkey;
        if (!q->filter_list && q->tail)
                sfq_rehash(sch);
        spin_unlock(root_lock);
@@ -688,7 +690,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        del_timer(&q->perturb_timer);
        if (q->perturb_period) {
                mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
-               q->perturbation = prandom_u32();
+               get_random_bytes(&q->perturbation, sizeof(q->perturbation));
        }
        sch_tree_unlock(sch);
        kfree(p);
@@ -745,7 +747,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
        q->quantum = psched_mtu(qdisc_dev(sch));
        q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
        q->perturb_period = 0;
-       q->perturbation = prandom_u32();
+       get_random_bytes(&q->perturbation, sizeof(q->perturbation));
 
        if (opt) {
                int err = sfq_change(sch, opt);
index 6719a65..2121187 100644 (file)
@@ -1152,7 +1152,7 @@ EXPORT_SYMBOL_GPL(taprio_offload_free);
  * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
  * This is left as TODO.
  */
-void taprio_offload_config_changed(struct taprio_sched *q)
+static void taprio_offload_config_changed(struct taprio_sched *q)
 {
        struct sched_gate_list *oper, *admin;
 
index 5ca0ec0..ffd3262 100644 (file)
@@ -8476,7 +8476,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
        mask = 0;
 
        /* Is there any exceptional events?  */
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
        if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -8485,7 +8485,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
                mask |= EPOLLHUP;
 
        /* Is it readable?  Reconsider this code with TCP-style support.  */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* The association is either gone or not ready.  */
@@ -8871,7 +8871,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
                if (sk_can_busy_loop(sk)) {
                        sk_busy_loop(sk, noblock);
 
-                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                                continue;
                }
 
@@ -9306,7 +9306,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
        newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
        newinet->inet_dport = htons(asoc->peer.port);
        newinet->pmtudisc = inet->pmtudisc;
-       newinet->inet_id = asoc->next_tsn ^ jiffies;
+       newinet->inet_id = prandom_u32();
 
        newinet->uc_ttl = inet->uc_ttl;
        newinet->mc_loop = 1;
index 91ea098..b7d9fd2 100644 (file)
@@ -123,6 +123,12 @@ struct proto smc_proto6 = {
 };
 EXPORT_SYMBOL_GPL(smc_proto6);
 
+static void smc_restore_fallback_changes(struct smc_sock *smc)
+{
+       smc->clcsock->file->private_data = smc->sk.sk_socket;
+       smc->clcsock->file = NULL;
+}
+
 static int __smc_release(struct smc_sock *smc)
 {
        struct sock *sk = &smc->sk;
@@ -141,6 +147,7 @@ static int __smc_release(struct smc_sock *smc)
                }
                sk->sk_state = SMC_CLOSED;
                sk->sk_state_change(sk);
+               smc_restore_fallback_changes(smc);
        }
 
        sk->sk_prot->unhash(sk);
@@ -702,8 +709,6 @@ static int __smc_connect(struct smc_sock *smc)
        int smc_type;
        int rc = 0;
 
-       sock_hold(&smc->sk); /* sock put in passive closing */
-
        if (smc->use_fallback)
                return smc_connect_fallback(smc, smc->fallback_rsn);
 
@@ -848,6 +853,8 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
        rc = kernel_connect(smc->clcsock, addr, alen, flags);
        if (rc && rc != -EINPROGRESS)
                goto out;
+
+       sock_hold(&smc->sk); /* sock put in passive closing */
        if (flags & O_NONBLOCK) {
                if (schedule_work(&smc->connect_work))
                        smc->connect_nonblock = 1;
@@ -1295,8 +1302,8 @@ static void smc_listen_work(struct work_struct *work)
        /* check if RDMA is available */
        if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
                /* prepare RDMA check */
-               memset(&ini, 0, sizeof(ini));
                ini.is_smcd = false;
+               ini.ism_dev = NULL;
                ini.ib_lcl = &pclc->lcl;
                rc = smc_find_rdma_device(new_smc, &ini);
                if (rc) {
index ed02eac..0d92456 100644 (file)
@@ -646,7 +646,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
        }
 
        rtnl_lock();
-       nest_lvl = dev_get_nest_level(ndev);
+       nest_lvl = ndev->lower_level;
        for (i = 0; i < nest_lvl; i++) {
                struct list_head *lower = &ndev->adj_list.lower;
 
index 6b7799b..352ee2f 100644 (file)
@@ -718,7 +718,7 @@ static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
        int i, nest_lvl;
 
        rtnl_lock();
-       nest_lvl = dev_get_nest_level(ndev);
+       nest_lvl = ndev->lower_level;
        for (i = 0; i < nest_lvl; i++) {
                struct list_head *lower = &ndev->adj_list.lower;
 
index 339e8c0..195b40c 100644 (file)
@@ -220,7 +220,7 @@ void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
                goto out;
 
        spin_lock_bh(&xprt->bc_pa_lock);
-       xprt->bc_alloc_max -= max_reqs;
+       xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
        list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
                dprintk("RPC:        req=%p\n", req);
                list_del(&req->rq_bc_pa_list);
@@ -307,8 +307,8 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
                 */
                dprintk("RPC:       Last session removed req=%p\n", req);
                xprt_free_allocation(req);
-               return;
        }
+       xprt_put(xprt);
 }
 
 /*
@@ -339,7 +339,7 @@ found:
                spin_unlock(&xprt->bc_pa_lock);
                if (new) {
                        if (req != new)
-                               xprt_free_bc_rqst(new);
+                               xprt_free_allocation(new);
                        break;
                } else if (req)
                        break;
@@ -368,6 +368,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
        set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 
        dprintk("RPC:       add callback request to list\n");
+       xprt_get(xprt);
        spin_lock(&bc_serv->sv_cb_lock);
        list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
        wake_up(&bc_serv->sv_cb_waitq);
index 8a45b3c..41df4c5 100644 (file)
@@ -1942,6 +1942,11 @@ static void xprt_destroy_cb(struct work_struct *work)
        rpc_destroy_wait_queue(&xprt->sending);
        rpc_destroy_wait_queue(&xprt->backlog);
        kfree(xprt->servername);
+       /*
+        * Destroy any existing back channel
+        */
+       xprt_destroy_backchannel(xprt, UINT_MAX);
+
        /*
         * Tear down transport state and free the rpc_xprt
         */
index 50e075f..b458bf5 100644 (file)
@@ -163,6 +163,7 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
        spin_lock(&xprt->bc_pa_lock);
        list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
        spin_unlock(&xprt->bc_pa_lock);
+       xprt_put(xprt);
 }
 
 static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
@@ -259,6 +260,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
 
        /* Queue rqst for ULP's callback service */
        bc_serv = xprt->bc_serv;
+       xprt_get(xprt);
        spin_lock(&bc_serv->sv_cb_lock);
        list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
        spin_unlock(&bc_serv->sv_cb_lock);
index 23cb379..ab648dd 100644 (file)
@@ -105,6 +105,15 @@ static void __net_exit tipc_exit_net(struct net *net)
        tipc_sk_rht_destroy(net);
 }
 
+static void __net_exit tipc_pernet_pre_exit(struct net *net)
+{
+       tipc_node_pre_cleanup_net(net);
+}
+
+static struct pernet_operations tipc_pernet_pre_exit_ops = {
+       .pre_exit = tipc_pernet_pre_exit,
+};
+
 static struct pernet_operations tipc_net_ops = {
        .init = tipc_init_net,
        .exit = tipc_exit_net,
@@ -151,6 +160,10 @@ static int __init tipc_init(void)
        if (err)
                goto out_pernet_topsrv;
 
+       err = register_pernet_subsys(&tipc_pernet_pre_exit_ops);
+       if (err)
+               goto out_register_pernet_subsys;
+
        err = tipc_bearer_setup();
        if (err)
                goto out_bearer;
@@ -158,6 +171,8 @@ static int __init tipc_init(void)
        pr_info("Started in single node mode\n");
        return 0;
 out_bearer:
+       unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
+out_register_pernet_subsys:
        unregister_pernet_device(&tipc_topsrv_net_ops);
 out_pernet_topsrv:
        tipc_socket_stop();
@@ -177,6 +192,7 @@ out_netlink:
 static void __exit tipc_exit(void)
 {
        tipc_bearer_cleanup();
+       unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
        unregister_pernet_device(&tipc_topsrv_net_ops);
        tipc_socket_stop();
        unregister_pernet_device(&tipc_net_ops);
index 60d8295..8776d32 100644 (file)
@@ -59,6 +59,7 @@
 #include <net/netns/generic.h>
 #include <linux/rhashtable.h>
 #include <net/genetlink.h>
+#include <net/netns/hash.h>
 
 struct tipc_node;
 struct tipc_bearer;
@@ -185,6 +186,11 @@ static inline int in_range(u16 val, u16 min, u16 max)
        return !less(val, min) && !more(val, max);
 }
 
+static inline u32 tipc_net_hash_mixes(struct net *net, int tn_rand)
+{
+       return net_hash_mix(&init_net) ^ net_hash_mix(net) ^ tn_rand;
+}
+
 #ifdef CONFIG_SYSCTL
 int tipc_register_sysctl(void);
 void tipc_unregister_sysctl(void);
index c138d68..b043e8c 100644 (file)
@@ -94,6 +94,7 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
        msg_set_dest_domain(hdr, dest_domain);
        msg_set_bc_netid(hdr, tn->net_id);
        b->media->addr2msg(msg_media_addr(hdr), &b->addr);
+       msg_set_peer_net_hash(hdr, tipc_net_hash_mixes(net, tn->random));
        msg_set_node_id(hdr, tipc_own_id(net));
 }
 
@@ -242,7 +243,8 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
        if (!tipc_in_scope(legacy, b->domain, src))
                return;
        tipc_node_check_dest(net, src, peer_id, b, caps, signature,
-                            &maddr, &respond, &dupl_addr);
+                            msg_peer_net_hash(hdr), &maddr, &respond,
+                            &dupl_addr);
        if (dupl_addr)
                disc_dupl_alert(b, src, &maddr);
        if (!respond)
index 999eab5..7d7a661 100644 (file)
@@ -1873,7 +1873,7 @@ void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
 
        tipc_link_create_dummy_tnl_msg(tnl, xmitq);
 
-       /* This failover link enpoint was never established before,
+       /* This failover link endpoint was never established before,
         * so it has not received anything from peer.
         * Otherwise, it must be a normal failover situation or the
         * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
index 922d262..973795a 100644 (file)
@@ -190,6 +190,59 @@ err:
        return 0;
 }
 
+/**
+ * tipc_msg_append(): Append data to tail of an existing buffer queue
+ * @hdr: header to be used
+ * @m: the data to be appended
+ * @mss: max allowable size of buffer
+ * @dlen: size of data to be appended
+ * @txq: queue to appand to
+ * Returns the number og 1k blocks appended or errno value
+ */
+int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
+                   int mss, struct sk_buff_head *txq)
+{
+       struct sk_buff *skb, *prev;
+       int accounted, total, curr;
+       int mlen, cpy, rem = dlen;
+       struct tipc_msg *hdr;
+
+       skb = skb_peek_tail(txq);
+       accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
+       total = accounted;
+
+       while (rem) {
+               if (!skb || skb->len >= mss) {
+                       prev = skb;
+                       skb = tipc_buf_acquire(mss, GFP_KERNEL);
+                       if (unlikely(!skb))
+                               return -ENOMEM;
+                       skb_orphan(skb);
+                       skb_trim(skb, MIN_H_SIZE);
+                       hdr = buf_msg(skb);
+                       skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
+                       msg_set_hdr_sz(hdr, MIN_H_SIZE);
+                       msg_set_size(hdr, MIN_H_SIZE);
+                       __skb_queue_tail(txq, skb);
+                       total += 1;
+                       if (prev)
+                               msg_set_ack_required(buf_msg(prev), 0);
+                       msg_set_ack_required(hdr, 1);
+               }
+               hdr = buf_msg(skb);
+               curr = msg_blocks(hdr);
+               mlen = msg_size(hdr);
+               cpy = min_t(int, rem, mss - mlen);
+               if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
+                       return -EFAULT;
+               msg_set_size(hdr, mlen + cpy);
+               skb_put(skb, cpy);
+               rem -= cpy;
+               total += msg_blocks(hdr) - curr;
+       }
+       return total - accounted;
+}
+
 /* tipc_msg_validate - validate basic format of received message
  *
  * This routine ensures a TIPC message has an acceptable header, and at least
index 0daa6f0..0435dda 100644 (file)
@@ -290,6 +290,16 @@ static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d)
        msg_set_bits(m, 0, 18, 1, d);
 }
 
+static inline int msg_ack_required(struct tipc_msg *m)
+{
+       return msg_bits(m, 0, 18, 1);
+}
+
+static inline void msg_set_ack_required(struct tipc_msg *m, u32 d)
+{
+       msg_set_bits(m, 0, 18, 1, d);
+}
+
 static inline bool msg_is_rcast(struct tipc_msg *m)
 {
        return msg_bits(m, 0, 18, 0x1);
@@ -1026,6 +1036,20 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
        return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
 }
 
+/* Word 13
+ */
+static inline void msg_set_peer_net_hash(struct tipc_msg *m, u32 n)
+{
+       msg_set_word(m, 13, n);
+}
+
+static inline u32 msg_peer_net_hash(struct tipc_msg *m)
+{
+       return msg_word(m, 13);
+}
+
+/* Word 14
+ */
 static inline u32 msg_sugg_node_addr(struct tipc_msg *m)
 {
        return msg_word(m, 14);
@@ -1065,6 +1089,8 @@ int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
                      int pktmax, struct sk_buff_head *frags);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
                   int offset, int dsz, int mtu, struct sk_buff_head *list);
+int tipc_msg_append(struct tipc_msg *hdr, struct msghdr *m, int dlen,
+                   int mss, struct sk_buff_head *txq);
 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
 bool tipc_msg_assemble(struct sk_buff_head *list);
 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
index 836e629..5feaf3b 100644 (file)
@@ -146,7 +146,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
        struct publication *publ;
        struct sk_buff *skb = NULL;
        struct distr_item *item = NULL;
-       u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
+       u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
                        ITEM_SIZE) * ITEM_SIZE;
        u32 msg_rem = msg_dsz;
 
index f2e3cf7..4b60928 100644 (file)
@@ -126,6 +126,8 @@ struct tipc_node {
        struct timer_list timer;
        struct rcu_head rcu;
        unsigned long delete_at;
+       struct net *peer_net;
+       u32 peer_hash_mix;
 };
 
 /* Node FSM states and events:
@@ -184,7 +186,7 @@ static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
        return n->links[bearer_id].link;
 }
 
-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
 {
        struct tipc_node *n;
        int bearer_id;
@@ -194,6 +196,14 @@ int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
        if (unlikely(!n))
                return mtu;
 
+       /* Allow MAX_MSG_SIZE when building connection oriented message
+        * if they are in the same core network
+        */
+       if (n->peer_net && connected) {
+               tipc_node_put(n);
+               return mtu;
+       }
+
        bearer_id = n->active_links[sel & 1];
        if (likely(bearer_id != INVALID_BEARER_ID))
                mtu = n->links[bearer_id].mtu;
@@ -360,8 +370,37 @@ static void tipc_node_write_unlock(struct tipc_node *n)
        }
 }
 
+static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
+{
+       int net_id = tipc_netid(n->net);
+       struct tipc_net *tn_peer;
+       struct net *tmp;
+       u32 hash_chk;
+
+       if (n->peer_net)
+               return;
+
+       for_each_net_rcu(tmp) {
+               tn_peer = tipc_net(tmp);
+               if (!tn_peer)
+                       continue;
+               /* Integrity checking whether node exists in namespace or not */
+               if (tn_peer->net_id != net_id)
+                       continue;
+               if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
+                       continue;
+               hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
+               if (hash_mixes ^ hash_chk)
+                       continue;
+               n->peer_net = tmp;
+               n->peer_hash_mix = hash_mixes;
+               break;
+       }
+}
+
 static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
-                                         u8 *peer_id, u16 capabilities)
+                                         u8 *peer_id, u16 capabilities,
+                                         u32 signature, u32 hash_mixes)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *n, *temp_node;
@@ -372,6 +411,8 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
        spin_lock_bh(&tn->node_list_lock);
        n = tipc_node_find(net, addr);
        if (n) {
+               if (n->peer_hash_mix ^ hash_mixes)
+                       tipc_node_assign_peer_net(n, hash_mixes);
                if (n->capabilities == capabilities)
                        goto exit;
                /* Same node may come back with new capabilities */
@@ -389,6 +430,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
                list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                        tn->capabilities &= temp_node->capabilities;
                }
+
                goto exit;
        }
        n = kzalloc(sizeof(*n), GFP_ATOMIC);
@@ -399,6 +441,10 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
        n->addr = addr;
        memcpy(&n->peer_id, peer_id, 16);
        n->net = net;
+       n->peer_net = NULL;
+       n->peer_hash_mix = 0;
+       /* Assign kernel local namespace if exists */
+       tipc_node_assign_peer_net(n, hash_mixes);
        n->capabilities = capabilities;
        kref_init(&n->kref);
        rwlock_init(&n->lock);
@@ -426,6 +472,10 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
                                 tipc_bc_sndlink(net),
                                 &n->bc_entry.link)) {
                pr_warn("Broadcast rcv link creation failed, no memory\n");
+               if (n->peer_net) {
+                       n->peer_net = NULL;
+                       n->peer_hash_mix = 0;
+               }
                kfree(n);
                n = NULL;
                goto exit;
@@ -979,7 +1029,7 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
 
 void tipc_node_check_dest(struct net *net, u32 addr,
                          u8 *peer_id, struct tipc_bearer *b,
-                         u16 capabilities, u32 signature,
+                         u16 capabilities, u32 signature, u32 hash_mixes,
                          struct tipc_media_addr *maddr,
                          bool *respond, bool *dupl_addr)
 {
@@ -998,7 +1048,8 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        *dupl_addr = false;
        *respond = false;
 
-       n = tipc_node_create(net, addr, peer_id, capabilities);
+       n = tipc_node_create(net, addr, peer_id, capabilities, signature,
+                            hash_mixes);
        if (!n)
                return;
 
@@ -1343,6 +1394,10 @@ static void node_lost_contact(struct tipc_node *n,
        /* Notify publications from this node */
        n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
 
+       if (n->peer_net) {
+               n->peer_net = NULL;
+               n->peer_hash_mix = 0;
+       }
        /* Notify sockets connected to node */
        list_for_each_entry_safe(conn, safe, conns, list) {
                skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
@@ -1424,6 +1479,56 @@ msg_full:
        return -EMSGSIZE;
 }
 
+static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
+{
+       struct tipc_msg *hdr = buf_msg(skb_peek(list));
+       struct sk_buff_head inputq;
+
+       switch (msg_user(hdr)) {
+       case TIPC_LOW_IMPORTANCE:
+       case TIPC_MEDIUM_IMPORTANCE:
+       case TIPC_HIGH_IMPORTANCE:
+       case TIPC_CRITICAL_IMPORTANCE:
+               if (msg_connected(hdr) || msg_named(hdr)) {
+                       tipc_loopback_trace(peer_net, list);
+                       spin_lock_init(&list->lock);
+                       tipc_sk_rcv(peer_net, list);
+                       return;
+               }
+               if (msg_mcast(hdr)) {
+                       tipc_loopback_trace(peer_net, list);
+                       skb_queue_head_init(&inputq);
+                       tipc_sk_mcast_rcv(peer_net, list, &inputq);
+                       __skb_queue_purge(list);
+                       skb_queue_purge(&inputq);
+                       return;
+               }
+               return;
+       case MSG_FRAGMENTER:
+               if (tipc_msg_assemble(list)) {
+                       tipc_loopback_trace(peer_net, list);
+                       skb_queue_head_init(&inputq);
+                       tipc_sk_mcast_rcv(peer_net, list, &inputq);
+                       __skb_queue_purge(list);
+                       skb_queue_purge(&inputq);
+               }
+               return;
+       case GROUP_PROTOCOL:
+       case CONN_MANAGER:
+               tipc_loopback_trace(peer_net, list);
+               spin_lock_init(&list->lock);
+               tipc_sk_rcv(peer_net, list);
+               return;
+       case LINK_PROTOCOL:
+       case NAME_DISTRIBUTOR:
+       case TUNNEL_PROTOCOL:
+       case BCAST_PROTOCOL:
+               return;
+       default:
+               return;
+       };
+}
+
 /**
  * tipc_node_xmit() is the general link level function for message sending
  * @net: the applicable net namespace
@@ -1439,6 +1544,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
        struct tipc_link_entry *le = NULL;
        struct tipc_node *n;
        struct sk_buff_head xmitq;
+       bool node_up = false;
        int bearer_id;
        int rc;
 
@@ -1456,6 +1562,17 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
        }
 
        tipc_node_read_lock(n);
+       node_up = node_is_up(n);
+       if (node_up && n->peer_net && check_net(n->peer_net)) {
+               /* xmit inner linux container */
+               tipc_lxc_xmit(n->peer_net, list);
+               if (likely(skb_queue_empty(list))) {
+                       tipc_node_read_unlock(n);
+                       tipc_node_put(n);
+                       return 0;
+               }
+       }
+
        bearer_id = n->active_links[selector & 1];
        if (unlikely(bearer_id == INVALID_BEARER_ID)) {
                tipc_node_read_unlock(n);
@@ -2587,3 +2704,33 @@ int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
 
        return i;
 }
+
+void tipc_node_pre_cleanup_net(struct net *exit_net)
+{
+       struct tipc_node *n;
+       struct tipc_net *tn;
+       struct net *tmp;
+
+       rcu_read_lock();
+       for_each_net_rcu(tmp) {
+               if (tmp == exit_net)
+                       continue;
+               tn = tipc_net(tmp);
+               if (!tn)
+                       continue;
+               spin_lock_bh(&tn->node_list_lock);
+               list_for_each_entry_rcu(n, &tn->node_list, list) {
+                       if (!n->peer_net)
+                               continue;
+                       if (n->peer_net != exit_net)
+                               continue;
+                       tipc_node_write_lock(n);
+                       n->peer_net = NULL;
+                       n->peer_hash_mix = 0;
+                       tipc_node_write_unlock_fast(n);
+                       break;
+               }
+               spin_unlock_bh(&tn->node_list_lock);
+       }
+       rcu_read_unlock();
+}
index 291d0ec..c39cd86 100644 (file)
@@ -54,7 +54,8 @@ enum {
        TIPC_LINK_PROTO_SEQNO = (1 << 6),
        TIPC_MCAST_RBCTL      = (1 << 7),
        TIPC_GAP_ACK_BLOCK    = (1 << 8),
-       TIPC_TUNNEL_ENHANCED  = (1 << 9)
+       TIPC_TUNNEL_ENHANCED  = (1 << 9),
+       TIPC_NAGLE            = (1 << 10)
 };
 
 #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT           |  \
@@ -66,7 +67,9 @@ enum {
                                TIPC_LINK_PROTO_SEQNO  |   \
                                TIPC_MCAST_RBCTL       |   \
                                TIPC_GAP_ACK_BLOCK     |   \
-                               TIPC_TUNNEL_ENHANCED)
+                               TIPC_TUNNEL_ENHANCED   |   \
+                               TIPC_NAGLE)
+
 #define INVALID_BEARER_ID -1
 
 void tipc_node_stop(struct net *net);
@@ -75,7 +78,7 @@ u32 tipc_node_get_addr(struct tipc_node *node);
 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
 void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
                          struct tipc_bearer *bearer,
-                         u16 capabilities, u32 signature,
+                         u16 capabilities, u32 signature, u32 hash_mixes,
                          struct tipc_media_addr *maddr,
                          bool *respond, bool *dupl_addr);
 void tipc_node_delete_links(struct net *net, int bearer_id);
@@ -92,7 +95,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr);
 void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected);
 bool tipc_node_is_up(struct net *net, u32 addr);
 u16 tipc_node_get_capabilities(struct net *net, u32 addr);
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
@@ -107,4 +110,5 @@ int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
                                   struct netlink_callback *cb);
+void tipc_node_pre_cleanup_net(struct net *exit_net);
 #endif
index 35e32ff..5d7859a 100644 (file)
@@ -75,6 +75,7 @@ struct sockaddr_pair {
  * @conn_instance: TIPC instance used when connection was established
  * @published: non-zero if port has one or more associated names
  * @max_pkt: maximum packet size "hint" used when building messages sent by port
+ * @maxnagle: maximum size of msg which can be subject to nagle
  * @portid: unique port identity in TIPC socket hash table
  * @phdr: preformatted message header used when sending messages
  * #cong_links: list of congested links
@@ -97,6 +98,7 @@ struct tipc_sock {
        u32 conn_instance;
        int published;
        u32 max_pkt;
+       u32 maxnagle;
        u32 portid;
        struct tipc_msg phdr;
        struct list_head cong_links;
@@ -116,6 +118,10 @@ struct tipc_sock {
        struct tipc_mc_method mc_method;
        struct rcu_head rcu;
        struct tipc_group *group;
+       u32 oneway;
+       u16 snd_backlog;
+       bool expect_ack;
+       bool nodelay;
        bool group_is_open;
 };
 
@@ -137,6 +143,7 @@ static int tipc_sk_insert(struct tipc_sock *tsk);
 static void tipc_sk_remove(struct tipc_sock *tsk);
 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
+static void tipc_sk_push_backlog(struct tipc_sock *tsk);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
@@ -227,6 +234,26 @@ static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
        return 1;
 }
 
+/* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
+ */
+static void tsk_set_nagle(struct tipc_sock *tsk)
+{
+       struct sock *sk = &tsk->sk;
+
+       tsk->maxnagle = 0;
+       if (sk->sk_type != SOCK_STREAM)
+               return;
+       if (tsk->nodelay)
+               return;
+       if (!(tsk->peer_caps & TIPC_NAGLE))
+               return;
+       /* Limit node local buffer size to avoid receive queue overflow */
+       if (tsk->max_pkt == MAX_MSG_SIZE)
+               tsk->maxnagle = 1500;
+       else
+               tsk->maxnagle = tsk->max_pkt;
+}
+
 /**
  * tsk_advance_rx_queue - discard first buffer in socket receive queue
  *
@@ -446,6 +473,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
 
        tsk = tipc_sk(sk);
        tsk->max_pkt = MAX_PKT_DEFAULT;
+       tsk->maxnagle = 0;
        INIT_LIST_HEAD(&tsk->publications);
        INIT_LIST_HEAD(&tsk->cong_links);
        msg = &tsk->phdr;
@@ -512,8 +540,12 @@ static void __tipc_shutdown(struct socket *sock, int error)
        tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
                                            !tsk_conn_cong(tsk)));
 
-       /* Remove any pending SYN message */
-       __skb_queue_purge(&sk->sk_write_queue);
+       /* Push out unsent messages or remove if pending SYN */
+       skb = skb_peek(&sk->sk_write_queue);
+       if (skb && !msg_is_syn(buf_msg(skb)))
+               tipc_sk_push_backlog(tsk);
+       else
+               __skb_queue_purge(&sk->sk_write_queue);
 
        /* Reject all unreceived messages, except on an active connection
         * (which disconnects locally & sends a 'FIN+' to peer).
@@ -740,7 +772,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
                /* fall through */
        case TIPC_LISTEN:
        case TIPC_CONNECTING:
-               if (!skb_queue_empty(&sk->sk_receive_queue))
+               if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                        revents |= EPOLLIN | EPOLLRDNORM;
                break;
        case TIPC_OPEN:
@@ -748,7 +780,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
                        revents |= EPOLLOUT;
                if (!tipc_sk_type_connectionless(sk))
                        break;
-               if (skb_queue_empty(&sk->sk_receive_queue))
+               if (skb_queue_empty_lockless(&sk->sk_receive_queue))
                        break;
                revents |= EPOLLIN | EPOLLRDNORM;
                break;
@@ -854,7 +886,7 @@ static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
 
        /* Build message as chain of buffers */
        __skb_queue_head_init(&pkts);
-       mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+       mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
        rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
        if (unlikely(rc != dlen))
                return rc;
@@ -1208,6 +1240,27 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
        tipc_sk_rcv(net, inputq);
 }
 
+/* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
+ *                         when socket is in Nagle mode
+ */
+static void tipc_sk_push_backlog(struct tipc_sock *tsk)
+{
+       struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
+       struct net *net = sock_net(&tsk->sk);
+       u32 dnode = tsk_peer_node(tsk);
+       int rc;
+
+       if (skb_queue_empty(txq) || tsk->cong_link_cnt)
+               return;
+
+       tsk->snt_unacked += tsk->snd_backlog;
+       tsk->snd_backlog = 0;
+       tsk->expect_ack = true;
+       rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
+       if (rc == -ELINKCONG)
+               tsk->cong_link_cnt = 1;
+}
+
 /**
  * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
  * @tsk: receiving socket
@@ -1221,7 +1274,7 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
        u32 onode = tsk_own_node(tsk);
        struct sock *sk = &tsk->sk;
        int mtyp = msg_type(hdr);
-       bool conn_cong;
+       bool was_cong;
 
        /* Ignore if connection cannot be validated: */
        if (!tsk_peer_msg(tsk, hdr)) {
@@ -1254,11 +1307,13 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
                        __skb_queue_tail(xmitq, skb);
                return;
        } else if (mtyp == CONN_ACK) {
-               conn_cong = tsk_conn_cong(tsk);
+               was_cong = tsk_conn_cong(tsk);
+               tsk->expect_ack = false;
+               tipc_sk_push_backlog(tsk);
                tsk->snt_unacked -= msg_conn_ack(hdr);
                if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
                        tsk->snd_win = msg_adv_win(hdr);
-               if (conn_cong)
+               if (was_cong && !tsk_conn_cong(tsk))
                        sk->sk_write_space(sk);
        } else if (mtyp != CONN_PROBE_REPLY) {
                pr_warn("Received unknown CONN_PROTO msg\n");
@@ -1388,7 +1443,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
                return rc;
 
        __skb_queue_head_init(&pkts);
-       mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+       mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
        rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
        if (unlikely(rc != dlen))
                return rc;
@@ -1437,15 +1492,15 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
        struct sock *sk = sock->sk;
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+       struct sk_buff_head *txq = &sk->sk_write_queue;
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_msg *hdr = &tsk->phdr;
        struct net *net = sock_net(sk);
-       struct sk_buff_head pkts;
        u32 dnode = tsk_peer_node(tsk);
+       int maxnagle = tsk->maxnagle;
+       int maxpkt = tsk->max_pkt;
        int send, sent = 0;
-       int rc = 0;
-
-       __skb_queue_head_init(&pkts);
+       int blocks, rc = 0;
 
        if (unlikely(dlen > INT_MAX))
                return -EMSGSIZE;
@@ -1467,21 +1522,35 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
                                         tipc_sk_connected(sk)));
                if (unlikely(rc))
                        break;
-
                send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
-               rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
-               if (unlikely(rc != send))
-                       break;
-
-               trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
+               blocks = tsk->snd_backlog;
+               if (tsk->oneway++ >= 4 && send <= maxnagle) {
+                       rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
+                       if (unlikely(rc < 0))
+                               break;
+                       blocks += rc;
+                       if (blocks <= 64 && tsk->expect_ack) {
+                               tsk->snd_backlog = blocks;
+                               sent += send;
+                               break;
+                       }
+                       tsk->expect_ack = true;
+               } else {
+                       rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
+                       if (unlikely(rc != send))
+                               break;
+                       blocks += tsk_inc(tsk, send + MIN_H_SIZE);
+               }
+               trace_tipc_sk_sendstream(sk, skb_peek(txq),
                                         TIPC_DUMP_SK_SNDQ, " ");
-               rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
+               rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
                if (unlikely(rc == -ELINKCONG)) {
                        tsk->cong_link_cnt = 1;
                        rc = 0;
                }
                if (likely(!rc)) {
-                       tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
+                       tsk->snt_unacked += blocks;
+                       tsk->snd_backlog = 0;
                        sent += send;
                }
        } while (sent < dlen && !rc);
@@ -1526,8 +1595,9 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
        sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
        tipc_set_sk_state(sk, TIPC_ESTABLISHED);
        tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
-       tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
+       tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
        tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+       tsk_set_nagle(tsk);
        __skb_queue_purge(&sk->sk_write_queue);
        if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
                return;
@@ -1848,6 +1918,7 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
        bool peek = flags & MSG_PEEK;
        int offset, required, copy, copied = 0;
        int hlen, dlen, err, rc;
+       bool ack = false;
        long timeout;
 
        /* Catch invalid receive attempts */
@@ -1892,6 +1963,7 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
 
                /* Copy data if msg ok, otherwise return error/partial data */
                if (likely(!err)) {
+                       ack = msg_ack_required(hdr);
                        offset = skb_cb->bytes_read;
                        copy = min_t(int, dlen - offset, buflen - copied);
                        rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
@@ -1919,7 +1991,7 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
 
                /* Send connection flow control advertisement when applicable */
                tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
-               if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
+               if (ack || tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
                        tipc_sk_send_ack(tsk);
 
                /* Exit if all requested data or FIN/error received */
@@ -1990,6 +2062,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
                smp_wmb();
                tsk->cong_link_cnt--;
                wakeup = true;
+               tipc_sk_push_backlog(tsk);
                break;
        case GROUP_PROTOCOL:
                tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
@@ -2029,6 +2102,7 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
 
        if (unlikely(msg_mcast(hdr)))
                return false;
+       tsk->oneway = 0;
 
        switch (sk->sk_state) {
        case TIPC_CONNECTING:
@@ -2074,6 +2148,8 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
                        return true;
                return false;
        case TIPC_ESTABLISHED:
+               if (!skb_queue_empty(&sk->sk_write_queue))
+                       tipc_sk_push_backlog(tsk);
                /* Accept only connection-based messages sent by peer */
                if (likely(con_msg && !err && pport == oport && pnode == onode))
                        return true;
@@ -2959,6 +3035,7 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
        case TIPC_SRC_DROPPABLE:
        case TIPC_DEST_DROPPABLE:
        case TIPC_CONN_TIMEOUT:
+       case TIPC_NODELAY:
                if (ol < sizeof(value))
                        return -EINVAL;
                if (get_user(value, (u32 __user *)ov))
@@ -3007,6 +3084,10 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
        case TIPC_GROUP_LEAVE:
                res = tipc_sk_leave(tsk);
                break;
+       case TIPC_NODELAY:
+               tsk->nodelay = !!value;
+               tsk_set_nagle(tsk);
+               break;
        default:
                res = -EINVAL;
        }
index c853ad0..193cba2 100644 (file)
@@ -2597,7 +2597,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
                mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
@@ -2626,7 +2626,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
        mask = 0;
 
        /* exceptional events? */
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
@@ -2636,7 +2636,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
                mask |= EPOLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
index 2ab43b2..582a3e4 100644 (file)
@@ -870,7 +870,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
                 * the queue and write as long as the socket isn't shutdown for
                 * sending.
                 */
-               if (!skb_queue_empty(&sk->sk_receive_queue) ||
+               if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
                    (sk->sk_shutdown & RCV_SHUTDOWN)) {
                        mask |= EPOLLIN | EPOLLRDNORM;
                }
index e851caf..fcac5c6 100644 (file)
@@ -204,6 +204,11 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
                return false;
        }
 
+       /* channel 14 is only for IEEE 802.11b */
+       if (chandef->center_freq1 == 2484 &&
+           chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
+               return false;
+
        if (cfg80211_chandef_is_edmg(chandef) &&
            !cfg80211_edmg_chandef_valid(chandef))
                return false;
index d1451e7..7186cb6 100644 (file)
@@ -393,7 +393,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
        [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
                                   .len = IEEE80211_MAX_MESH_ID_LEN },
-       [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
+       [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
 
        [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
        [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
index dc8f689..f9e8303 100644 (file)
@@ -114,7 +114,7 @@ void regulatory_hint_country_ie(struct wiphy *wiphy,
                         u8 country_ie_len);
 
 /**
- * regulatory_hint_disconnect - informs all devices have been disconneted
+ * regulatory_hint_disconnect - informs all devices have been disconnected
  *
  * Regulotory rules can be enhanced further upon scanning and upon
  * connection to an AP. These rules become stale if we disconnect
index 419eb12..5b4ed5b 100644 (file)
@@ -1559,7 +1559,8 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
        }
 
        if (freq == 2484) {
-               if (chandef->width > NL80211_CHAN_WIDTH_40)
+               /* channel 14 is only for IEEE 802.11b */
+               if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
                        return false;
 
                *op_class = 82; /* channel 14 */
index 16d5f35..3049af2 100644 (file)
@@ -27,6 +27,9 @@ void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
 {
        unsigned long flags;
 
+       if (!xs->tx)
+               return;
+
        spin_lock_irqsave(&umem->xsk_list_lock, flags);
        list_add_rcu(&xs->list, &umem->xsk_list);
        spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
@@ -36,6 +39,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
 {
        unsigned long flags;
 
+       if (!xs->tx)
+               return;
+
        spin_lock_irqsave(&umem->xsk_list_lock, flags);
        list_del_rcu(&xs->list);
        spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
index 936d3ad..d2a30a7 100644 (file)
@@ -348,26 +348,38 @@ static enum export export_from_sec(struct elf_info *elf, unsigned int sec)
                return export_unknown;
 }
 
-static char *sym_extract_namespace(const char **symname)
+static const char *namespace_from_kstrtabns(struct elf_info *info,
+                                           Elf_Sym *kstrtabns)
 {
-       char *namespace = NULL;
-       char *ns_separator;
+       char *value = info->ksymtab_strings + kstrtabns->st_value;
+       return value[0] ? value : NULL;
+}
+
+static void sym_update_namespace(const char *symname, const char *namespace)
+{
+       struct symbol *s = find_symbol(symname);
 
-       ns_separator = strchr(*symname, '.');
-       if (ns_separator) {
-               namespace = NOFAIL(strndup(*symname, ns_separator - *symname));
-               *symname = ns_separator + 1;
+       /*
+        * That symbol should have been created earlier and thus this is
+        * actually an assertion.
+        */
+       if (!s) {
+               merror("Could not update namespace(%s) for symbol %s\n",
+                      namespace, symname);
+               return;
        }
 
-       return namespace;
+       free(s->namespace);
+       s->namespace =
+               namespace && namespace[0] ? NOFAIL(strdup(namespace)) : NULL;
 }
 
 /**
  * Add an exported symbol - it may have already been added without a
  * CRC, in this case just update the CRC
  **/
-static struct symbol *sym_add_exported(const char *name, const char *namespace,
-                                      struct module *mod, enum export export)
+static struct symbol *sym_add_exported(const char *name, struct module *mod,
+                                      enum export export)
 {
        struct symbol *s = find_symbol(name);
 
@@ -383,8 +395,6 @@ static struct symbol *sym_add_exported(const char *name, const char *namespace,
                        s->module = mod;
                }
        }
-       free(s->namespace);
-       s->namespace = namespace ? strdup(namespace) : NULL;
        s->preloaded = 0;
        s->vmlinux   = is_vmlinux(mod->name);
        s->kernel    = 0;
@@ -583,6 +593,10 @@ static int parse_elf(struct elf_info *info, const char *filename)
                        info->export_unused_gpl_sec = i;
                else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
                        info->export_gpl_future_sec = i;
+               else if (strcmp(secname, "__ksymtab_strings") == 0)
+                       info->ksymtab_strings = (void *)hdr +
+                                               sechdrs[i].sh_offset -
+                                               sechdrs[i].sh_addr;
 
                if (sechdrs[i].sh_type == SHT_SYMTAB) {
                        unsigned int sh_link_idx;
@@ -672,7 +686,6 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
        enum export export;
        bool is_crc = false;
        const char *name;
-       char *namespace;
 
        if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
            strstarts(symname, "__ksymtab"))
@@ -745,9 +758,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
                /* All exported symbols */
                if (strstarts(symname, "__ksymtab_")) {
                        name = symname + strlen("__ksymtab_");
-                       namespace = sym_extract_namespace(&name);
-                       sym_add_exported(name, namespace, mod, export);
-                       free(namespace);
+                       sym_add_exported(name, mod, export);
                }
                if (strcmp(symname, "init_module") == 0)
                        mod->has_init = 1;
@@ -2043,6 +2054,16 @@ static void read_symbols(const char *modname)
                handle_moddevtable(mod, &info, sym, symname);
        }
 
+       /* Apply symbol namespaces from __kstrtabns_<symbol> entries. */
+       for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
+               symname = remove_dot(info.strtab + sym->st_name);
+
+               if (strstarts(symname, "__kstrtabns_"))
+                       sym_update_namespace(symname + strlen("__kstrtabns_"),
+                                            namespace_from_kstrtabns(&info,
+                                                                     sym));
+       }
+
        // check for static EXPORT_SYMBOL_* functions && global vars
        for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
                unsigned char bind = ELF_ST_BIND(sym->st_info);
@@ -2196,7 +2217,7 @@ static int check_exports(struct module *mod)
                else
                        basename = mod->name;
 
-               if (exp->namespace && exp->namespace[0]) {
+               if (exp->namespace) {
                        add_namespace(&mod->required_namespaces,
                                      exp->namespace);
 
@@ -2454,12 +2475,12 @@ static void read_dump(const char *fname, unsigned int kernel)
                        mod = new_module(modname);
                        mod->skip = 1;
                }
-               s = sym_add_exported(symname, namespace, mod,
-                                    export_no(export));
+               s = sym_add_exported(symname, mod, export_no(export));
                s->kernel    = kernel;
                s->preloaded = 1;
                s->is_static = 0;
                sym_update_crc(symname, mod, crc, export_no(export));
+               sym_update_namespace(symname, namespace);
        }
        release_file(file, size);
        return;
index 92a926d..ad271bc 100644 (file)
@@ -143,6 +143,7 @@ struct elf_info {
        Elf_Section  export_gpl_sec;
        Elf_Section  export_unused_gpl_sec;
        Elf_Section  export_gpl_future_sec;
+       char         *ksymtab_strings;
        char         *strtab;
        char         *modinfo;
        unsigned int modinfo_len;
index 3754dac..dda6fba 100644 (file)
@@ -33,7 +33,7 @@ generate_deps() {
        if [ ! -f "$ns_deps_file" ]; then return; fi
        local mod_source_files=`cat $mod_file | sed -n 1p                      \
                                              | sed -e 's/\.o/\.c/g'           \
-                                             | sed "s/[^ ]* */${srctree}\/&/g"`
+                                             | sed "s|[^ ]* *|${srctree}/&|g"`
        for ns in `cat $ns_deps_file`; do
                echo "Adding namespace $ns to module $mod_name (if needed)."
                generate_deps_for_ns $ns $mod_source_files
index 220dae0..a2998b1 100755 (executable)
@@ -93,7 +93,7 @@ scm_version()
        # Check for mercurial and a mercurial repo.
        if test -d .hg && hgid=`hg id 2>/dev/null`; then
                # Do we have an tagged version?  If so, latesttagdistance == 1
-               if [ "`hg log -r . --template '{latesttagdistance}'`" == "1" ]; then
+               if [ "`hg log -r . --template '{latesttagdistance}'`" = "1" ]; then
                        id=`hg log -r . --template '{latesttag}'`
                        printf '%s%s' -hg "$id"
                else
index 8a10b43..40b7905 100644 (file)
@@ -20,6 +20,7 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
        [LOCKDOWN_NONE] = "none",
        [LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
        [LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
+       [LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
        [LOCKDOWN_KEXEC] = "kexec of unsigned images",
        [LOCKDOWN_HIBERNATION] = "hibernation",
        [LOCKDOWN_PCI_ACCESS] = "direct PCI access",
index 5c9fbf3..6b724d2 100644 (file)
@@ -226,7 +226,8 @@ static int snd_timer_check_master(struct snd_timer_instance *master)
        return 0;
 }
 
-static int snd_timer_close_locked(struct snd_timer_instance *timeri);
+static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+                                 struct device **card_devp_to_put);
 
 /*
  * open a timer instance
@@ -238,6 +239,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
 {
        struct snd_timer *timer;
        struct snd_timer_instance *timeri = NULL;
+       struct device *card_dev_to_put = NULL;
        int err;
 
        mutex_lock(&register_mutex);
@@ -261,7 +263,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
                list_add_tail(&timeri->open_list, &snd_timer_slave_list);
                err = snd_timer_check_slave(timeri);
                if (err < 0) {
-                       snd_timer_close_locked(timeri);
+                       snd_timer_close_locked(timeri, &card_dev_to_put);
                        timeri = NULL;
                }
                goto unlock;
@@ -313,7 +315,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
                        timeri = NULL;
 
                        if (timer->card)
-                               put_device(&timer->card->card_dev);
+                               card_dev_to_put = &timer->card->card_dev;
                        module_put(timer->module);
                        goto unlock;
                }
@@ -323,12 +325,15 @@ int snd_timer_open(struct snd_timer_instance **ti,
        timer->num_instances++;
        err = snd_timer_check_master(timeri);
        if (err < 0) {
-               snd_timer_close_locked(timeri);
+               snd_timer_close_locked(timeri, &card_dev_to_put);
                timeri = NULL;
        }
 
  unlock:
        mutex_unlock(&register_mutex);
+       /* put_device() is called after unlock for avoiding deadlock */
+       if (card_dev_to_put)
+               put_device(card_dev_to_put);
        *ti = timeri;
        return err;
 }
@@ -338,7 +343,8 @@ EXPORT_SYMBOL(snd_timer_open);
  * close a timer instance
  * call this with register_mutex down.
  */
-static int snd_timer_close_locked(struct snd_timer_instance *timeri)
+static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+                                 struct device **card_devp_to_put)
 {
        struct snd_timer *timer = timeri->timer;
        struct snd_timer_instance *slave, *tmp;
@@ -395,7 +401,7 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri)
                        timer->hw.close(timer);
                /* release a card refcount for safe disconnection */
                if (timer->card)
-                       put_device(&timer->card->card_dev);
+                       *card_devp_to_put = &timer->card->card_dev;
                module_put(timer->module);
        }
 
@@ -407,14 +413,18 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri)
  */
 int snd_timer_close(struct snd_timer_instance *timeri)
 {
+       struct device *card_dev_to_put = NULL;
        int err;
 
        if (snd_BUG_ON(!timeri))
                return -ENXIO;
 
        mutex_lock(&register_mutex);
-       err = snd_timer_close_locked(timeri);
+       err = snd_timer_close_locked(timeri, &card_dev_to_put);
        mutex_unlock(&register_mutex);
+       /* put_device() is called after unlock for avoiding deadlock */
+       if (card_dev_to_put)
+               put_device(card_dev_to_put);
        return err;
 }
 EXPORT_SYMBOL(snd_timer_close);
index 73fee99..6c1497d 100644 (file)
@@ -252,8 +252,7 @@ end:
        return err;
 }
 
-static unsigned int
-map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
+static int map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
 {
        unsigned int sec, sections, ch, channels;
        unsigned int pcm, midi, location;
index d3999e7..7e7be8e 100644 (file)
@@ -447,8 +447,6 @@ static void azx_int_disable(struct hdac_bus *bus)
        list_for_each_entry(azx_dev, &bus->stream_list, list)
                snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
 
-       synchronize_irq(bus->irq);
-
        /* disable SIE for all streams */
        snd_hdac_chip_writeb(bus, INTCTL, 0);
 
index 240f4ca..cf53fbd 100644 (file)
@@ -1348,9 +1348,9 @@ static int azx_free(struct azx *chip)
        }
 
        if (bus->chip_init) {
-               azx_stop_chip(chip);
                azx_clear_irq_pending(chip);
                azx_stop_all_streams(chip);
+               azx_stop_chip(chip);
        }
 
        if (bus->irq >= 0)
@@ -2399,6 +2399,12 @@ static const struct pci_device_id azx_ids[] = {
        /* Icelake */
        { PCI_DEVICE(0x8086, 0x34c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Jasperlake */
+       { PCI_DEVICE(0x8086, 0x38c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Tigerlake */
+       { PCI_DEVICE(0x8086, 0xa0c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index 795cbda..b725537 100644 (file)
@@ -145,6 +145,7 @@ struct hdmi_spec {
        struct snd_array pins; /* struct hdmi_spec_per_pin */
        struct hdmi_pcm pcm_rec[16];
        struct mutex pcm_lock;
+       struct mutex bind_lock; /* for audio component binding */
        /* pcm_bitmap means which pcms have been assigned to pins*/
        unsigned long pcm_bitmap;
        int pcm_used;   /* counter of pcm_rec[] */
@@ -2258,7 +2259,7 @@ static int generic_hdmi_init(struct hda_codec *codec)
        struct hdmi_spec *spec = codec->spec;
        int pin_idx;
 
-       mutex_lock(&spec->pcm_lock);
+       mutex_lock(&spec->bind_lock);
        spec->use_jack_detect = !codec->jackpoll_interval;
        for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
@@ -2275,7 +2276,7 @@ static int generic_hdmi_init(struct hda_codec *codec)
                        snd_hda_jack_detect_enable_callback(codec, pin_nid,
                                                            jack_callback);
        }
-       mutex_unlock(&spec->pcm_lock);
+       mutex_unlock(&spec->bind_lock);
        return 0;
 }
 
@@ -2382,6 +2383,7 @@ static int alloc_generic_hdmi(struct hda_codec *codec)
        spec->ops = generic_standard_hdmi_ops;
        spec->dev_num = 1;      /* initialize to 1 */
        mutex_init(&spec->pcm_lock);
+       mutex_init(&spec->bind_lock);
        snd_hdac_register_chmap_ops(&codec->core, &spec->chmap);
 
        spec->chmap.ops.get_chmap = hdmi_get_chmap;
@@ -2451,7 +2453,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
        int i;
 
        spec = container_of(acomp->audio_ops, struct hdmi_spec, drm_audio_ops);
-       mutex_lock(&spec->pcm_lock);
+       mutex_lock(&spec->bind_lock);
        spec->use_acomp_notifier = use_acomp;
        spec->codec->relaxed_resume = use_acomp;
        /* reprogram each jack detection logic depending on the notifier */
@@ -2461,7 +2463,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
                                              get_pin(spec, i)->pin_nid,
                                              use_acomp);
        }
-       mutex_unlock(&spec->pcm_lock);
+       mutex_unlock(&spec->bind_lock);
 }
 
 /* enable / disable the notifier via master bind / unbind */
index ce4f116..80f66ba 100644 (file)
@@ -393,6 +393,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0700:
        case 0x10ec0701:
        case 0x10ec0703:
+       case 0x10ec0711:
                alc_update_coef_idx(codec, 0x10, 1<<15, 0);
                break;
        case 0x10ec0662:
@@ -408,6 +409,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0672:
                alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
                break;
+       case 0x10ec0623:
+               alc_update_coef_idx(codec, 0x19, 1<<13, 0);
+               break;
        case 0x10ec0668:
                alc_update_coef_idx(codec, 0x7, 3<<13, 0);
                break;
@@ -2919,6 +2923,7 @@ enum {
        ALC269_TYPE_ALC225,
        ALC269_TYPE_ALC294,
        ALC269_TYPE_ALC300,
+       ALC269_TYPE_ALC623,
        ALC269_TYPE_ALC700,
 };
 
@@ -2954,6 +2959,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC225:
        case ALC269_TYPE_ALC294:
        case ALC269_TYPE_ALC300:
+       case ALC269_TYPE_ALC623:
        case ALC269_TYPE_ALC700:
                ssids = alc269_ssids;
                break;
@@ -7215,6 +7221,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8016,9 +8024,13 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC300;
                spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
                break;
+       case 0x10ec0623:
+               spec->codec_variant = ALC269_TYPE_ALC623;
+               break;
        case 0x10ec0700:
        case 0x10ec0701:
        case 0x10ec0703:
+       case 0x10ec0711:
                spec->codec_variant = ALC269_TYPE_ALC700;
                spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
@@ -9216,6 +9228,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0623, "ALC623", patch_alc269),
        HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
        HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
        HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
@@ -9233,6 +9246,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
        HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
index e609abc..eb709d5 100644 (file)
@@ -901,16 +901,20 @@ static void max98373_slot_config(struct i2c_client *i2c,
                max98373->i_slot = value & 0xF;
        else
                max98373->i_slot = 1;
-
-       max98373->reset_gpio = of_get_named_gpio(dev->of_node,
+       if (dev->of_node) {
+               max98373->reset_gpio = of_get_named_gpio(dev->of_node,
                                                "maxim,reset-gpio", 0);
-       if (!gpio_is_valid(max98373->reset_gpio)) {
-               dev_err(dev, "Looking up %s property in node %s failed %d\n",
-                       "maxim,reset-gpio", dev->of_node->full_name,
-                       max98373->reset_gpio);
+               if (!gpio_is_valid(max98373->reset_gpio)) {
+                       dev_err(dev, "Looking up %s property in node %s failed %d\n",
+                               "maxim,reset-gpio", dev->of_node->full_name,
+                               max98373->reset_gpio);
+               } else {
+                       dev_dbg(dev, "maxim,reset-gpio=%d",
+                               max98373->reset_gpio);
+               }
        } else {
-               dev_dbg(dev, "maxim,reset-gpio=%d",
-                       max98373->reset_gpio);
+               /* this makes reset_gpio as invalid */
+               max98373->reset_gpio = -1;
        }
 
        if (!device_property_read_u32(dev, "maxim,spkfb-slot-no", &value))
index 9fa5d44..58b2468 100644 (file)
@@ -243,6 +243,10 @@ static const char *const rx_mix1_text[] = {
        "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
 };
 
+static const char * const rx_mix2_text[] = {
+       "ZERO", "IIR1", "IIR2"
+};
+
 static const char *const dec_mux_text[] = {
        "ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
 };
@@ -270,6 +274,16 @@ static const struct soc_enum rx3_mix1_inp_enum[] = {
        SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B2_CTL, 0, 6, rx_mix1_text),
 };
 
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum =
+       SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B3_CTL,
+               0, 3, rx_mix2_text);
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum =
+       SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B3_CTL,
+               0, 3, rx_mix2_text);
+
 /* DEC */
 static const struct soc_enum dec1_mux_enum = SOC_ENUM_SINGLE(
                                LPASS_CDC_CONN_TX_B1_CTL, 0, 6, dec_mux_text);
@@ -309,6 +323,10 @@ static const struct snd_kcontrol_new rx3_mix1_inp2_mux = SOC_DAPM_ENUM(
                                "RX3 MIX1 INP2 Mux", rx3_mix1_inp_enum[1]);
 static const struct snd_kcontrol_new rx3_mix1_inp3_mux = SOC_DAPM_ENUM(
                                "RX3 MIX1 INP3 Mux", rx3_mix1_inp_enum[2]);
+static const struct snd_kcontrol_new rx1_mix2_inp1_mux = SOC_DAPM_ENUM(
+                               "RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
+static const struct snd_kcontrol_new rx2_mix2_inp1_mux = SOC_DAPM_ENUM(
+                               "RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
 
 /* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */
 static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0);
@@ -740,6 +758,10 @@ static const struct snd_soc_dapm_widget msm8916_wcd_digital_dapm_widgets[] = {
                         &rx3_mix1_inp2_mux),
        SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
                         &rx3_mix1_inp3_mux),
+       SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+                        &rx1_mix2_inp1_mux),
+       SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+                        &rx2_mix2_inp1_mux),
 
        SND_SOC_DAPM_MUX("CIC1 MUX", SND_SOC_NOPM, 0, 0, &cic1_mux),
        SND_SOC_DAPM_MUX("CIC2 MUX", SND_SOC_NOPM, 0, 0, &cic2_mux),
index 762595d..c506c93 100644 (file)
@@ -1770,6 +1770,9 @@ static int rt5651_detect_headset(struct snd_soc_component *component)
 
 static bool rt5651_support_button_press(struct rt5651_priv *rt5651)
 {
+       if (!rt5651->hp_jack)
+               return false;
+
        /* Button press support only works with internal jack-detection */
        return (rt5651->hp_jack->status & SND_JACK_MICROPHONE) &&
                rt5651->gpiod_hp_det == NULL;
index 1ef4707..c50b75c 100644 (file)
@@ -995,6 +995,16 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
 {
        struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
 
+       rt5682->hs_jack = hs_jack;
+
+       if (!hs_jack) {
+               regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+                                  RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
+               regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+                                  RT5682_POW_JDH | RT5682_POW_JDL, 0);
+               return 0;
+       }
+
        switch (rt5682->pdata.jd_src) {
        case RT5682_JD1:
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_2,
@@ -1032,8 +1042,6 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
                break;
        }
 
-       rt5682->hs_jack = hs_jack;
-
        return 0;
 }
 
index c3d06e8..d5fb7f5 100644 (file)
@@ -533,13 +533,10 @@ static SOC_ENUM_SINGLE_DECL(dac_osr,
 static SOC_ENUM_SINGLE_DECL(adc_osr,
                            WM8994_OVERSAMPLING, 1, osr_text);
 
-static const struct snd_kcontrol_new wm8994_snd_controls[] = {
+static const struct snd_kcontrol_new wm8994_common_snd_controls[] = {
 SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
                 WM8994_AIF1_ADC1_RIGHT_VOLUME,
                 1, 119, 0, digital_tlv),
-SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
-                WM8994_AIF1_ADC2_RIGHT_VOLUME,
-                1, 119, 0, digital_tlv),
 SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8994_AIF2_ADC_LEFT_VOLUME,
                 WM8994_AIF2_ADC_RIGHT_VOLUME,
                 1, 119, 0, digital_tlv),
@@ -556,8 +553,6 @@ SOC_ENUM("AIF2DACR Source", aif2dacr_src),
 
 SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8994_AIF1_DAC1_LEFT_VOLUME,
                 WM8994_AIF1_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
-SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8994_AIF1_DAC2_LEFT_VOLUME,
-                WM8994_AIF1_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
 SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8994_AIF2_DAC_LEFT_VOLUME,
                 WM8994_AIF2_DAC_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
 
@@ -565,17 +560,12 @@ SOC_SINGLE_TLV("AIF1 Boost Volume", WM8994_AIF1_CONTROL_2, 10, 3, 0, aif_tlv),
 SOC_SINGLE_TLV("AIF2 Boost Volume", WM8994_AIF2_CONTROL_2, 10, 3, 0, aif_tlv),
 
 SOC_SINGLE("AIF1DAC1 EQ Switch", WM8994_AIF1_DAC1_EQ_GAINS_1, 0, 1, 0),
-SOC_SINGLE("AIF1DAC2 EQ Switch", WM8994_AIF1_DAC2_EQ_GAINS_1, 0, 1, 0),
 SOC_SINGLE("AIF2 EQ Switch", WM8994_AIF2_EQ_GAINS_1, 0, 1, 0),
 
 WM8994_DRC_SWITCH("AIF1DAC1 DRC Switch", WM8994_AIF1_DRC1_1, 2),
 WM8994_DRC_SWITCH("AIF1ADC1L DRC Switch", WM8994_AIF1_DRC1_1, 1),
 WM8994_DRC_SWITCH("AIF1ADC1R DRC Switch", WM8994_AIF1_DRC1_1, 0),
 
-WM8994_DRC_SWITCH("AIF1DAC2 DRC Switch", WM8994_AIF1_DRC2_1, 2),
-WM8994_DRC_SWITCH("AIF1ADC2L DRC Switch", WM8994_AIF1_DRC2_1, 1),
-WM8994_DRC_SWITCH("AIF1ADC2R DRC Switch", WM8994_AIF1_DRC2_1, 0),
-
 WM8994_DRC_SWITCH("AIF2DAC DRC Switch", WM8994_AIF2_DRC_1, 2),
 WM8994_DRC_SWITCH("AIF2ADCL DRC Switch", WM8994_AIF2_DRC_1, 1),
 WM8994_DRC_SWITCH("AIF2ADCR DRC Switch", WM8994_AIF2_DRC_1, 0),
@@ -594,9 +584,6 @@ SOC_SINGLE("Sidetone HPF Switch", WM8994_SIDETONE, 6, 1, 0),
 SOC_ENUM("AIF1ADC1 HPF Mode", aif1adc1_hpf),
 SOC_DOUBLE("AIF1ADC1 HPF Switch", WM8994_AIF1_ADC1_FILTERS, 12, 11, 1, 0),
 
-SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
-SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
-
 SOC_ENUM("AIF2ADC HPF Mode", aif2adc_hpf),
 SOC_DOUBLE("AIF2ADC HPF Switch", WM8994_AIF2_ADC_FILTERS, 12, 11, 1, 0),
 
@@ -637,6 +624,24 @@ SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF2_DAC_FILTERS_2,
           8, 1, 0),
 };
 
+/* Controls not available on WM1811 */
+static const struct snd_kcontrol_new wm8994_snd_controls[] = {
+SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
+                WM8994_AIF1_ADC2_RIGHT_VOLUME,
+                1, 119, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8994_AIF1_DAC2_LEFT_VOLUME,
+                WM8994_AIF1_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+
+SOC_SINGLE("AIF1DAC2 EQ Switch", WM8994_AIF1_DAC2_EQ_GAINS_1, 0, 1, 0),
+
+WM8994_DRC_SWITCH("AIF1DAC2 DRC Switch", WM8994_AIF1_DRC2_1, 2),
+WM8994_DRC_SWITCH("AIF1ADC2L DRC Switch", WM8994_AIF1_DRC2_1, 1),
+WM8994_DRC_SWITCH("AIF1ADC2R DRC Switch", WM8994_AIF1_DRC2_1, 0),
+
+SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
+SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
+};
+
 static const struct snd_kcontrol_new wm8994_eq_controls[] = {
 SOC_SINGLE_TLV("AIF1DAC1 EQ1 Volume", WM8994_AIF1_DAC1_EQ_GAINS_1, 11, 31, 0,
               eq_tlv),
@@ -4258,13 +4263,15 @@ static int wm8994_component_probe(struct snd_soc_component *component)
        wm8994_handle_pdata(wm8994);
 
        wm_hubs_add_analogue_controls(component);
-       snd_soc_add_component_controls(component, wm8994_snd_controls,
-                            ARRAY_SIZE(wm8994_snd_controls));
+       snd_soc_add_component_controls(component, wm8994_common_snd_controls,
+                                      ARRAY_SIZE(wm8994_common_snd_controls));
        snd_soc_dapm_new_controls(dapm, wm8994_dapm_widgets,
                                  ARRAY_SIZE(wm8994_dapm_widgets));
 
        switch (control->type) {
        case WM8994:
+               snd_soc_add_component_controls(component, wm8994_snd_controls,
+                                              ARRAY_SIZE(wm8994_snd_controls));
                snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
                                          ARRAY_SIZE(wm8994_specific_dapm_widgets));
                if (control->revision < 4) {
@@ -4284,8 +4291,10 @@ static int wm8994_component_probe(struct snd_soc_component *component)
                }
                break;
        case WM8958:
+               snd_soc_add_component_controls(component, wm8994_snd_controls,
+                                              ARRAY_SIZE(wm8994_snd_controls));
                snd_soc_add_component_controls(component, wm8958_snd_controls,
-                                    ARRAY_SIZE(wm8958_snd_controls));
+                                              ARRAY_SIZE(wm8958_snd_controls));
                snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
                                          ARRAY_SIZE(wm8958_dapm_widgets));
                if (control->revision < 1) {
index ae28d99..9b8bb7b 100644 (file)
@@ -1259,8 +1259,7 @@ static unsigned int wmfw_convert_flags(unsigned int in, unsigned int len)
        }
 
        if (in) {
-               if (in & WMFW_CTL_FLAG_READABLE)
-                       out |= rd;
+               out |= rd;
                if (in & WMFW_CTL_FLAG_WRITEABLE)
                        out |= wr;
                if (in & WMFW_CTL_FLAG_VOLATILE)
@@ -3697,11 +3696,16 @@ static int wm_adsp_buffer_parse_legacy(struct wm_adsp *dsp)
        u32 xmalg, addr, magic;
        int i, ret;
 
+       alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
+       if (!alg_region) {
+               adsp_err(dsp, "No algorithm region found\n");
+               return -EINVAL;
+       }
+
        buf = wm_adsp_buffer_alloc(dsp);
        if (!buf)
                return -ENOMEM;
 
-       alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
        xmalg = dsp->ops->sys_config_size / sizeof(__be32);
 
        addr = alg_region->base + xmalg + ALG_XM_FIELD(magic);
index a437567..4f6e58c 100644 (file)
@@ -308,6 +308,9 @@ static const struct snd_soc_dapm_widget sof_widgets[] = {
        SND_SOC_DAPM_HP("Headphone Jack", NULL),
        SND_SOC_DAPM_MIC("Headset Mic", NULL),
        SND_SOC_DAPM_SPK("Spk", NULL),
+};
+
+static const struct snd_soc_dapm_widget dmic_widgets[] = {
        SND_SOC_DAPM_MIC("SoC DMIC", NULL),
 };
 
@@ -318,10 +321,6 @@ static const struct snd_soc_dapm_route sof_map[] = {
 
        /* other jacks */
        { "IN1P", NULL, "Headset Mic" },
-
-       /* digital mics */
-       {"DMic", NULL, "SoC DMIC"},
-
 };
 
 static const struct snd_soc_dapm_route speaker_map[] = {
@@ -329,6 +328,11 @@ static const struct snd_soc_dapm_route speaker_map[] = {
        { "Spk", NULL, "Speaker" },
 };
 
+static const struct snd_soc_dapm_route dmic_map[] = {
+       /* digital mics */
+       {"DMic", NULL, "SoC DMIC"},
+};
+
 static int speaker_codec_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_card *card = rtd->card;
@@ -342,6 +346,28 @@ static int speaker_codec_init(struct snd_soc_pcm_runtime *rtd)
        return ret;
 }
 
+static int dmic_init(struct snd_soc_pcm_runtime *rtd)
+{
+       struct snd_soc_card *card = rtd->card;
+       int ret;
+
+       ret = snd_soc_dapm_new_controls(&card->dapm, dmic_widgets,
+                                       ARRAY_SIZE(dmic_widgets));
+       if (ret) {
+               dev_err(card->dev, "DMic widget addition failed: %d\n", ret);
+               /* Don't need to add routes if widget addition failed */
+               return ret;
+       }
+
+       ret = snd_soc_dapm_add_routes(&card->dapm, dmic_map,
+                                     ARRAY_SIZE(dmic_map));
+
+       if (ret)
+               dev_err(card->dev, "DMic map addition failed: %d\n", ret);
+
+       return ret;
+}
+
 /* sof audio machine driver for rt5682 codec */
 static struct snd_soc_card sof_audio_card_rt5682 = {
        .name = "sof_rt5682",
@@ -445,6 +471,7 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
                links[id].name = "dmic01";
                links[id].cpus = &cpus[id];
                links[id].cpus->dai_name = "DMIC01 Pin";
+               links[id].init = dmic_init;
                if (dmic_be_num > 1) {
                        /* set up 2 BE links at most */
                        links[id + 1].name = "dmic16k";
@@ -576,6 +603,15 @@ static int sof_audio_probe(struct platform_device *pdev)
        /* need to get main clock from pmc */
        if (sof_rt5682_quirk & SOF_RT5682_MCLK_BYTCHT_EN) {
                ctx->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+               if (IS_ERR(ctx->mclk)) {
+                       ret = PTR_ERR(ctx->mclk);
+
+                       dev_err(&pdev->dev,
+                               "Failed to get MCLK from pmc_plt_clk_3: %d\n",
+                               ret);
+                       return ret;
+               }
+
                ret = clk_prepare_enable(ctx->mclk);
                if (ret < 0) {
                        dev_err(&pdev->dev,
@@ -621,8 +657,24 @@ static int sof_audio_probe(struct platform_device *pdev)
                                          &sof_audio_card_rt5682);
 }
 
+static int sof_rt5682_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+       struct snd_soc_component *component = NULL;
+
+       for_each_card_components(card, component) {
+               if (!strcmp(component->name, rt5682_component[0].name)) {
+                       snd_soc_component_set_jack(component, NULL, NULL);
+                       break;
+               }
+       }
+
+       return 0;
+}
+
 static struct platform_driver sof_audio = {
        .probe = sof_audio_probe,
+       .remove = sof_rt5682_remove,
        .driver = {
                .name = "sof_rt5682",
                .pm = &snd_soc_pm_ops,
index af2d5a6..61c984f 100644 (file)
@@ -677,7 +677,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
        ret = rockchip_pcm_platform_register(&pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "Could not register PCM\n");
-               return ret;
+               goto err_suspend;
        }
 
        return 0;
index c213913..fd8c664 100644 (file)
@@ -5,6 +5,7 @@
 //  Author: Claude <claude@insginal.co.kr>
 
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 
@@ -74,6 +75,17 @@ static struct snd_soc_card arndale_rt5631 = {
        .num_links = ARRAY_SIZE(arndale_rt5631_dai),
 };
 
+static void arndale_put_of_nodes(struct snd_soc_card *card)
+{
+       struct snd_soc_dai_link *dai_link;
+       int i;
+
+       for_each_card_prelinks(card, i, dai_link) {
+               of_node_put(dai_link->cpus->of_node);
+               of_node_put(dai_link->codecs->of_node);
+       }
+}
+
 static int arndale_audio_probe(struct platform_device *pdev)
 {
        int n, ret;
@@ -103,18 +115,31 @@ static int arndale_audio_probe(struct platform_device *pdev)
                if (!arndale_rt5631_dai[0].codecs->of_node) {
                        dev_err(&pdev->dev,
                        "Property 'samsung,audio-codec' missing or invalid\n");
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err_put_of_nodes;
                }
        }
 
        ret = devm_snd_soc_register_card(card->dev, card);
+       if (ret) {
+               dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
+               goto err_put_of_nodes;
+       }
+       return 0;
 
-       if (ret)
-               dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
-
+err_put_of_nodes:
+       arndale_put_of_nodes(card);
        return ret;
 }
 
+static int arndale_audio_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+       arndale_put_of_nodes(card);
+       return 0;
+}
+
 static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
        { .compatible = "samsung,arndale-rt5631", },
        { .compatible = "samsung,arndale-alc5631", },
@@ -129,6 +154,7 @@ static struct platform_driver arndale_audio_driver = {
                .of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
        },
        .probe = arndale_audio_probe,
+       .remove = arndale_audio_remove,
 };
 
 module_platform_driver(arndale_audio_driver);
index bda5b95..e9596c2 100644 (file)
@@ -761,6 +761,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        }
 
        /* set format */
+       rdai->bit_clk_inv = 0;
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
                rdai->sys_delay = 0;
index e163dde..b600d3e 100644 (file)
@@ -1070,7 +1070,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                        return ret;
        }
 
-       snd_soc_dai_trigger(cpu_dai, substream, cmd);
+       ret = snd_soc_dai_trigger(cpu_dai, substream, cmd);
        if (ret < 0)
                return ret;
 
@@ -1097,7 +1097,7 @@ static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
                        return ret;
        }
 
-       snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
+       ret = snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
        if (ret < 0)
                return ret;
 
@@ -1146,6 +1146,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
 {
        struct snd_soc_dpcm *dpcm;
        unsigned long flags;
+       char *name;
 
        /* only add new dpcms */
        for_each_dpcm_be(fe, stream, dpcm) {
@@ -1171,9 +1172,15 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
                        stream ? "<-" : "->", be->dai_link->name);
 
 #ifdef CONFIG_DEBUG_FS
-       dpcm->debugfs_state = debugfs_create_dir(be->dai_link->name,
-                                                fe->debugfs_dpcm_root);
-       debugfs_create_u32("state", 0644, dpcm->debugfs_state, &dpcm->state);
+       name = kasprintf(GFP_KERNEL, "%s:%s", be->dai_link->name,
+                        stream ? "capture" : "playback");
+       if (name) {
+               dpcm->debugfs_state = debugfs_create_dir(name,
+                                                        fe->debugfs_dpcm_root);
+               debugfs_create_u32("state", 0644, dpcm->debugfs_state,
+                                  &dpcm->state);
+               kfree(name);
+       }
 #endif
        return 1;
 }
index aa9a1fc..0fd0329 100644 (file)
@@ -1582,7 +1582,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
 
        /* map user to kernel widget ID */
        template.id = get_widget_id(le32_to_cpu(w->id));
-       if (template.id < 0)
+       if ((int)template.id < 0)
                return template.id;
 
        /* strings are allocated here, but used and freed by the widget */
index a4983f9..2b8711e 100644 (file)
@@ -60,13 +60,16 @@ int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
        struct snd_sof_dev *sdev = scontrol->sdev;
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        unsigned int i, channels = scontrol->num_channels;
+       bool change = false;
+       u32 value;
 
        /* update each channel */
        for (i = 0; i < channels; i++) {
-               cdata->chanv[i].value =
-                       mixer_to_ipc(ucontrol->value.integer.value[i],
+               value = mixer_to_ipc(ucontrol->value.integer.value[i],
                                     scontrol->volume_table, sm->max + 1);
+               change = change || (value != cdata->chanv[i].value);
                cdata->chanv[i].channel = i;
+               cdata->chanv[i].value = value;
        }
 
        /* notify DSP of mixer updates */
@@ -76,8 +79,7 @@ int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
                                              SOF_CTRL_TYPE_VALUE_CHAN_GET,
                                              SOF_CTRL_CMD_VOLUME,
                                              true);
-
-       return 0;
+       return change;
 }
 
 int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
@@ -105,11 +107,15 @@ int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
        struct snd_sof_dev *sdev = scontrol->sdev;
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        unsigned int i, channels = scontrol->num_channels;
+       bool change = false;
+       u32 value;
 
        /* update each channel */
        for (i = 0; i < channels; i++) {
-               cdata->chanv[i].value = ucontrol->value.integer.value[i];
+               value = ucontrol->value.integer.value[i];
+               change = change || (value != cdata->chanv[i].value);
                cdata->chanv[i].channel = i;
+               cdata->chanv[i].value = value;
        }
 
        /* notify DSP of mixer updates */
@@ -120,7 +126,7 @@ int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
                                              SOF_CTRL_CMD_SWITCH,
                                              true);
 
-       return 0;
+       return change;
 }
 
 int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
@@ -148,11 +154,15 @@ int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
        struct snd_sof_dev *sdev = scontrol->sdev;
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        unsigned int i, channels = scontrol->num_channels;
+       bool change = false;
+       u32 value;
 
        /* update each channel */
        for (i = 0; i < channels; i++) {
-               cdata->chanv[i].value = ucontrol->value.enumerated.item[i];
+               value = ucontrol->value.enumerated.item[i];
+               change = change || (value != cdata->chanv[i].value);
                cdata->chanv[i].channel = i;
+               cdata->chanv[i].value = value;
        }
 
        /* notify DSP of enum updates */
@@ -163,7 +173,7 @@ int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
                                              SOF_CTRL_CMD_ENUM,
                                              true);
 
-       return 0;
+       return change;
 }
 
 int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
index 479ba24..d62f51d 100644 (file)
@@ -273,6 +273,16 @@ config SND_SOC_SOF_HDA_AUDIO_CODEC
          Say Y if you want to enable HDAudio codecs with SOF.
          If unsure select "N".
 
+config SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1
+       bool "SOF enable DMI Link L1"
+       help
+         This option enables DMI L1 for both playback and capture
+         and disables known workarounds for specific HDaudio platforms.
+         Only use to look into power optimizations on platforms not
+         affected by DMI L1 issues. This option is not recommended.
+         Say Y if you want to enable DMI Link L1
+         If unsure, select "N".
+
 endif ## SND_SOC_SOF_HDA_COMMON
 
 config SND_SOC_SOF_HDA_LINK_BASELINE
index e282179..80e2826 100644 (file)
@@ -37,6 +37,7 @@
 #define MBOX_SIZE       0x1000
 #define MBOX_DUMP_SIZE 0x30
 #define EXCEPT_OFFSET  0x800
+#define EXCEPT_MAX_HDR_SIZE    0x400
 
 /* DSP peripherals */
 #define DMAC0_OFFSET    0xFE000
@@ -228,6 +229,11 @@ static void bdw_get_registers(struct snd_sof_dev *sdev,
        /* note: variable AR register array is not read */
 
        /* then get panic info */
+       if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+               dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+                       xoops->arch_hdr.totalsize);
+               return;
+       }
        offset += xoops->arch_hdr.totalsize;
        sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
 
@@ -451,6 +457,7 @@ static int bdw_probe(struct snd_sof_dev *sdev)
        /* TODO: add offsets */
        sdev->mmio_bar = BDW_DSP_BAR;
        sdev->mailbox_bar = BDW_DSP_BAR;
+       sdev->dsp_oops_offset = MBOX_OFFSET;
 
        /* PCI base */
        mmio = platform_get_resource(pdev, IORESOURCE_MEM,
index 5e7a6aa..a1e514f 100644 (file)
@@ -28,6 +28,7 @@
 #define MBOX_OFFSET            0x144000
 #define MBOX_SIZE              0x1000
 #define EXCEPT_OFFSET          0x800
+#define EXCEPT_MAX_HDR_SIZE    0x400
 
 /* DSP peripherals */
 #define DMAC0_OFFSET           0x098000
@@ -126,6 +127,11 @@ static void byt_get_registers(struct snd_sof_dev *sdev,
        /* note: variable AR register array is not read */
 
        /* then get panic info */
+       if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+               dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+                       xoops->arch_hdr.totalsize);
+               return;
+       }
        offset += xoops->arch_hdr.totalsize;
        sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
 
index bc41028..df1909e 100644 (file)
@@ -139,20 +139,16 @@ void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable)
  */
 int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
 {
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
-       struct hdac_bus *bus = sof_to_bus(sdev);
-#endif
        u32 val;
 
        /* enable/disable audio dsp clock gating */
        val = enable ? PCI_CGCTL_ADSPDCGE : 0;
        snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_ADSPDCGE, val);
 
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
-       /* enable/disable L1 support */
-       val = enable ? SOF_HDA_VS_EM2_L1SEN : 0;
-       snd_hdac_chip_updatel(bus, VS_EM2, SOF_HDA_VS_EM2_L1SEN, val);
-#endif
+       /* enable/disable DMI Link L1 support */
+       val = enable ? HDA_VS_INTEL_EM2_L1SEN : 0;
+       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+                               HDA_VS_INTEL_EM2_L1SEN, val);
 
        /* enable/disable audio dsp power gating */
        val = enable ? 0 : PCI_PGCTL_ADSPPGD;
index 6427f0b..65c2af3 100644 (file)
@@ -44,6 +44,7 @@ static int cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
                return -ENODEV;
        }
        hstream = &dsp_stream->hstream;
+       hstream->substream = NULL;
 
        /* allocate DMA buffer */
        ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
index ad8d41f..2c74471 100644 (file)
@@ -185,6 +185,17 @@ hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction)
                        direction == SNDRV_PCM_STREAM_PLAYBACK ?
                        "playback" : "capture");
 
+       /*
+        * Disable DMI Link L1 entry when capture stream is opened.
+        * Workaround to address a known issue with host DMA that results
+        * in xruns during pause/release in capture scenarios.
+        */
+       if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+               if (stream && direction == SNDRV_PCM_STREAM_CAPTURE)
+                       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+                                               HDA_VS_INTEL_EM2,
+                                               HDA_VS_INTEL_EM2_L1SEN, 0);
+
        return stream;
 }
 
@@ -193,23 +204,43 @@ int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
 {
        struct hdac_bus *bus = sof_to_bus(sdev);
        struct hdac_stream *s;
+       bool active_capture_stream = false;
+       bool found = false;
 
        spin_lock_irq(&bus->reg_lock);
 
-       /* find used stream */
+       /*
+        * close stream matching the stream tag
+        * and check if there are any open capture streams.
+        */
        list_for_each_entry(s, &bus->stream_list, list) {
-               if (s->direction == direction &&
-                   s->opened && s->stream_tag == stream_tag) {
+               if (!s->opened)
+                       continue;
+
+               if (s->direction == direction && s->stream_tag == stream_tag) {
                        s->opened = false;
-                       spin_unlock_irq(&bus->reg_lock);
-                       return 0;
+                       found = true;
+               } else if (s->direction == SNDRV_PCM_STREAM_CAPTURE) {
+                       active_capture_stream = true;
                }
        }
 
        spin_unlock_irq(&bus->reg_lock);
 
-       dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
-       return -ENODEV;
+       /* Enable DMI L1 entry if there are no capture streams open */
+       if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+               if (!active_capture_stream)
+                       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+                                               HDA_VS_INTEL_EM2,
+                                               HDA_VS_INTEL_EM2_L1SEN,
+                                               HDA_VS_INTEL_EM2_L1SEN);
+
+       if (!found) {
+               dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
+               return -ENODEV;
+       }
+
+       return 0;
 }
 
 int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
index c72e9a0..06e8467 100644 (file)
@@ -35,6 +35,8 @@
 #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
 #define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
 
+#define EXCEPT_MAX_HDR_SIZE    0x400
+
 /*
  * Debug
  */
@@ -131,6 +133,11 @@ static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
        /* note: variable AR register array is not read */
 
        /* then get panic info */
+       if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+               dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+                       xoops->arch_hdr.totalsize);
+               return;
+       }
        offset += xoops->arch_hdr.totalsize;
        sof_block_read(sdev, sdev->mmio_bar, offset,
                       panic_info, sizeof(*panic_info));
index 5591841..23e430d 100644 (file)
@@ -39,7 +39,6 @@
 #define SOF_HDA_WAKESTS                        0x0E
 #define SOF_HDA_WAKESTS_INT_MASK       ((1 << 8) - 1)
 #define SOF_HDA_RIRBSTS                        0x5d
-#define SOF_HDA_VS_EM2_L1SEN            BIT(13)
 
 /* SOF_HDA_GCTL register bist */
 #define SOF_HDA_GCTL_RESET             BIT(0)
 #define HDA_DSP_REG_HIPCIE             (HDA_DSP_IPC_BASE + 0x0C)
 #define HDA_DSP_REG_HIPCCTL            (HDA_DSP_IPC_BASE + 0x10)
 
+/* Intel Vendor Specific Registers */
+#define HDA_VS_INTEL_EM2               0x1030
+#define HDA_VS_INTEL_EM2_L1SEN         BIT(13)
+
 /*  HIPCI */
 #define HDA_DSP_REG_HIPCI_BUSY         BIT(31)
 #define HDA_DSP_REG_HIPCI_MSG_MASK     0x7FFFFFFF
index d7f3274..9a9a381 100644 (file)
@@ -546,10 +546,10 @@ int snd_sof_run_firmware(struct snd_sof_dev *sdev)
                                 msecs_to_jiffies(sdev->boot_timeout));
        if (ret == 0) {
                dev_err(sdev->dev, "error: firmware boot failure\n");
-               /* after this point FW_READY msg should be ignored */
-               sdev->boot_complete = true;
                snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX |
                        SOF_DBG_TEXT | SOF_DBG_PCI);
+               /* after this point FW_READY msg should be ignored */
+               sdev->boot_complete = true;
                return -EIO;
        }
 
index e3f6a6d..2b876d4 100644 (file)
@@ -244,7 +244,7 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
                snd_soc_rtdcom_lookup(rtd, DRV_NAME);
        struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
        struct snd_sof_pcm *spcm;
-       int ret;
+       int ret, err = 0;
 
        /* nothing to do for BE */
        if (rtd->dai_link->no_pcm)
@@ -254,26 +254,26 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
        if (!spcm)
                return -EINVAL;
 
-       if (!spcm->prepared[substream->stream])
-               return 0;
-
        dev_dbg(sdev->dev, "pcm: free stream %d dir %d\n", spcm->pcm.pcm_id,
                substream->stream);
 
-       ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+       if (spcm->prepared[substream->stream]) {
+               ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+               if (ret < 0)
+                       err = ret;
+       }
 
        snd_pcm_lib_free_pages(substream);
 
        cancel_work_sync(&spcm->stream[substream->stream].period_elapsed_work);
 
-       if (ret < 0)
-               return ret;
-
        ret = snd_sof_pcm_platform_hw_free(sdev, substream);
-       if (ret < 0)
+       if (ret < 0) {
                dev_err(sdev->dev, "error: platform hw free failed\n");
+               err = ret;
+       }
 
-       return ret;
+       return err;
 }
 
 static int sof_pcm_prepare(struct snd_pcm_substream *substream)
@@ -323,6 +323,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        struct sof_ipc_stream stream;
        struct sof_ipc_reply reply;
        bool reset_hw_params = false;
+       bool ipc_first = false;
        int ret;
 
        /* nothing to do for BE */
@@ -343,6 +344,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_PAUSE;
+               ipc_first = true;
                break;
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
@@ -363,6 +365,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_STOP:
                stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
+               ipc_first = true;
                reset_hw_params = true;
                break;
        default:
@@ -370,12 +373,22 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                return -EINVAL;
        }
 
-       snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+       /*
+        * DMA and IPC sequence is different for start and stop. Need to send
+        * STOP IPC before stop DMA
+        */
+       if (!ipc_first)
+               snd_sof_pcm_platform_trigger(sdev, substream, cmd);
 
        /* send IPC to the DSP */
        ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
                                 sizeof(stream), &reply, sizeof(reply));
 
+       /* need to STOP DMA even if STOP IPC failed */
+       if (ipc_first)
+               snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+
+       /* free PCM if reset_hw_params is set and the STOP IPC is successful */
        if (!ret && reset_hw_params)
                ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
 
index fc85efb..0aabb31 100644 (file)
@@ -920,7 +920,9 @@ static void sof_parse_word_tokens(struct snd_soc_component *scomp,
                for (j = 0; j < count; j++) {
                        /* match token type */
                        if (!(tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD ||
-                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT))
+                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT ||
+                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BYTE ||
+                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BOOL))
                                continue;
 
                        /* match token id */
index d7501f8..a406081 100644 (file)
@@ -505,10 +505,20 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
        if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
                ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
                                         SAI_XCR1_NODIV,
-                                        (unsigned int)~SAI_XCR1_NODIV);
+                                        freq ? 0 : SAI_XCR1_NODIV);
                if (ret < 0)
                        return ret;
 
+               /* Assume shutdown if requested frequency is 0Hz */
+               if (!freq) {
+                       /* Release mclk rate only if rate was actually set */
+                       if (sai->mclk_rate) {
+                               clk_rate_exclusive_put(sai->sai_mclk);
+                               sai->mclk_rate = 0;
+                       }
+                       return 0;
+               }
+
                /* If master clock is used, set parent clock now */
                ret = stm32_sai_set_parent_clock(sai, freq);
                if (ret)
@@ -1093,15 +1103,6 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
 
        regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
 
-       regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV,
-                          SAI_XCR1_NODIV);
-
-       /* Release mclk rate only if rate was actually set */
-       if (sai->mclk_rate) {
-               clk_rate_exclusive_put(sai->sai_mclk);
-               sai->mclk_rate = 0;
-       }
-
        clk_disable_unprepare(sai->sai_ck);
 
        spin_lock_irqsave(&sai->irq_lock, flags);
index fbfde99..0bbe120 100644 (file)
@@ -1657,6 +1657,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case 0x23ba:  /* Playback Designs */
        case 0x25ce:  /* Mytek devices */
        case 0x278b:  /* Rotel? */
+       case 0x292b:  /* Gustard/Ess based devices */
        case 0x2ab6:  /* T+A devices */
        case 0x3842:  /* EVGA */
        case 0xc502:  /* HiBy devices */
index 3c8f73a..a5e584b 100644 (file)
@@ -75,7 +75,7 @@ static bool validate_processing_unit(const void *p,
 
        if (d->bLength < sizeof(*d))
                return false;
-       len = d->bLength < sizeof(*d) + d->bNrInPins;
+       len = sizeof(*d) + d->bNrInPins;
        if (d->bLength < len)
                return false;
        switch (v->protocol) {
index a9731f8..2e8a30f 100644 (file)
@@ -75,6 +75,7 @@
 #define SVM_EXIT_MWAIT         0x08b
 #define SVM_EXIT_MWAIT_COND    0x08c
 #define SVM_EXIT_XSETBV        0x08d
+#define SVM_EXIT_RDPRU         0x08e
 #define SVM_EXIT_NPF           0x400
 #define SVM_EXIT_AVIC_INCOMPLETE_IPI           0x401
 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS     0x402
index f01950a..3eb8411 100644 (file)
@@ -86,6 +86,8 @@
 #define EXIT_REASON_PML_FULL            62
 #define EXIT_REASON_XSAVES              63
 #define EXIT_REASON_XRSTORS             64
+#define EXIT_REASON_UMWAIT              67
+#define EXIT_REASON_TPAUSE              68
 
 #define VMX_EXIT_REASONS \
        { EXIT_REASON_EXCEPTION_NMI,         "EXCEPTION_NMI" }, \
        { EXIT_REASON_RDSEED,                "RDSEED" }, \
        { EXIT_REASON_PML_FULL,              "PML_FULL" }, \
        { EXIT_REASON_XSAVES,                "XSAVES" }, \
-       { EXIT_REASON_XRSTORS,               "XRSTORS" }
+       { EXIT_REASON_XRSTORS,               "XRSTORS" }, \
+       { EXIT_REASON_UMWAIT,                "UMWAIT" }, \
+       { EXIT_REASON_TPAUSE,                "TPAUSE" }
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
index 233efbb..52641d8 100644 (file)
@@ -999,6 +999,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172
 #define KVM_CAP_PMU_EVENT_FILTER 173
 #define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
+#define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1145,6 +1146,7 @@ struct kvm_dirty_tlb {
 #define KVM_REG_S390           0x5000000000000000ULL
 #define KVM_REG_ARM64          0x6000000000000000ULL
 #define KVM_REG_MIPS           0x7000000000000000ULL
+#define KVM_REG_RISCV          0x8000000000000000ULL
 
 #define KVM_REG_SIZE_SHIFT     52
 #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
index b3105ac..99335e1 100644 (file)
 #define CLONE_NEWNET           0x40000000      /* New network namespace */
 #define CLONE_IO               0x80000000      /* Clone io context */
 
-/*
- * Arguments for the clone3 syscall
+#ifndef __ASSEMBLY__
+/**
+ * struct clone_args - arguments for the clone3 syscall
+ * @flags:       Flags for the new process as listed above.
+ *               All flags are valid except for CSIGNAL and
+ *               CLONE_DETACHED.
+ * @pidfd:       If CLONE_PIDFD is set, a pidfd will be
+ *               returned in this argument.
+ * @child_tid:   If CLONE_CHILD_SETTID is set, the TID of the
+ *               child process will be returned in the child's
+ *               memory.
+ * @parent_tid:  If CLONE_PARENT_SETTID is set, the TID of
+ *               the child process will be returned in the
+ *               parent's memory.
+ * @exit_signal: The exit_signal the parent process will be
+ *               sent when the child exits.
+ * @stack:       Specify the location of the stack for the
+ *               child process.
+ * @stack_size:  The size of the stack for the child process.
+ * @tls:         If CLONE_SETTLS is set, the tls descriptor
+ *               is set to tls.
+ *
+ * The structure is versioned by size and thus extensible.
+ * New struct members must go at the end of the struct and
+ * must be properly 64bit aligned.
  */
 struct clone_args {
        __aligned_u64 flags;
@@ -46,6 +69,9 @@ struct clone_args {
        __aligned_u64 stack_size;
        __aligned_u64 tls;
 };
+#endif
+
+#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
 
 /*
  * Scheduling policies
index 3542b6a..e69f449 100644 (file)
@@ -2635,6 +2635,7 @@ static int build_cl_output(char *cl_sort, bool no_source)
        bool add_sym   = false;
        bool add_dso   = false;
        bool add_src   = false;
+       int ret = 0;
 
        if (!buf)
                return -ENOMEM;
@@ -2653,7 +2654,8 @@ static int build_cl_output(char *cl_sort, bool no_source)
                        add_dso = true;
                } else if (strcmp(tok, "offset")) {
                        pr_err("unrecognized sort token: %s\n", tok);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err;
                }
        }
 
@@ -2676,13 +2678,15 @@ static int build_cl_output(char *cl_sort, bool no_source)
                add_sym ? "symbol," : "",
                add_dso ? "dso," : "",
                add_src ? "cl_srcline," : "",
-               "node") < 0)
-               return -ENOMEM;
+               "node") < 0) {
+               ret = -ENOMEM;
+               goto err;
+       }
 
        c2c.show_src = add_src;
-
+err:
        free(buf);
-       return 0;
+       return ret;
 }
 
 static int setup_coalesce(const char *coalesce, bool no_source)
index 1e61e35..9661671 100644 (file)
@@ -691,6 +691,7 @@ static char *compact_gfp_flags(char *gfp_flags)
                        new = realloc(new_flags, len + strlen(cpt) + 2);
                        if (new == NULL) {
                                free(new_flags);
+                               free(orig_flags);
                                return NULL;
                        }
 
index 1e148bb..202cada 100644 (file)
@@ -2,7 +2,7 @@ jvmti-y += libjvmti.o
 jvmti-y += jvmti_agent.o
 
 # For strlcpy
-jvmti-y += libstring.o
+jvmti-y += libstring.o libctype.o
 
 CFLAGS_jvmti         = -fPIC -DPIC -I$(JDIR)/include -I$(JDIR)/include/linux
 CFLAGS_REMOVE_jvmti  = -Wmissing-declarations
@@ -15,3 +15,7 @@ CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PE
 $(OUTPUT)jvmti/libstring.o: ../lib/string.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)jvmti/libctype.o: ../lib/ctype.c FORCE
+       $(call rule_mkdir)
+       $(call if_changed_dep,cc_o_c)
index 4036c7f..e42bf57 100644 (file)
@@ -1758,7 +1758,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
        info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
                                                 dso->bpf_prog.id);
        if (!info_node) {
-               return SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
+               ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
                goto out;
        }
        info_linear = info_node->info_linear;
index 3fa0db1..47e03de 100644 (file)
@@ -101,14 +101,16 @@ static int copyfile_mode_ns(const char *from, const char *to, mode_t mode,
        if (tofd < 0)
                goto out;
 
-       if (fchmod(tofd, mode))
-               goto out_close_to;
-
        if (st.st_size == 0) { /* /proc? do it slowly... */
                err = slow_copyfile(from, tmp, nsi);
+               if (!err && fchmod(tofd, mode))
+                       err = -1;
                goto out_close_to;
        }
 
+       if (fchmod(tofd, mode))
+               goto out_close_to;
+
        nsinfo__mountns_enter(nsi, &nsc);
        fromfd = open(from, O_RDONLY);
        nsinfo__mountns_exit(&nsc);
index d277a98..de79c73 100644 (file)
@@ -1659,7 +1659,7 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
                        is_open = false;
                if (c2->leader == leader) {
                        if (is_open)
-                               perf_evsel__close(&evsel->core);
+                               perf_evsel__close(&c2->core);
                        c2->leader = c2;
                        c2->core.nr_members = 0;
                }
index 86d9396..becc2d1 100644 (file)
@@ -1296,8 +1296,10 @@ static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
                        continue;
 
                if (WARN_ONCE(cnt >= size,
-                             "failed to write MEM_TOPOLOGY, way too many nodes\n"))
+                       "failed to write MEM_TOPOLOGY, way too many nodes\n")) {
+                       closedir(dir);
                        return -1;
+               }
 
                ret = memory_node__read(&nodes[cnt++], idx);
        }
index 5eda6e1..ae56c76 100644 (file)
@@ -154,8 +154,10 @@ static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
                if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
                        continue;
 
-               if (!match_pat(d->d_name, pat))
-                       return -2;
+               if (!match_pat(d->d_name, pat)) {
+                       ret =  -2;
+                       break;
+               }
 
                scnprintf(namebuf, sizeof(namebuf), "%s/%s",
                          path, d->d_name);
index 15a6663..fc8a431 100755 (executable)
@@ -22,6 +22,7 @@ import os
 import pprint
 import random
 import re
+import stat
 import string
 import struct
 import subprocess
@@ -311,8 +312,9 @@ class DebugfsDir:
         for f in out.split():
             if f == "ports":
                 continue
+
             p = os.path.join(path, f)
-            if os.path.isfile(p):
+            if os.path.isfile(p) and os.access(p, os.R_OK):
                 _, out = cmd('cat %s/%s' % (path, f))
                 dfs[f] = out.strip()
             elif os.path.isdir(p):
index f38567e..daa7d1b 100755 (executable)
@@ -59,7 +59,7 @@ ip netns exec ${NS_SRC} tc filter add dev veth_src egress \
 
 # start the listener
 ip netns exec ${NS_DST} bash -c \
-       "nc -4 -l -s ${IP_DST} -p 9000 >/dev/null &"
+       "nc -4 -l -p 9000 >/dev/null &"
 declare -i NC_PID=$!
 sleep 1
 
index b35da37..409c1fa 100644 (file)
@@ -1,4 +1,5 @@
 /s390x/sync_regs_test
+/s390x/memop
 /x86_64/cr4_cpuid_sync_test
 /x86_64/evmcs_test
 /x86_64/hyperv_cpuid
@@ -9,6 +10,7 @@
 /x86_64/state_test
 /x86_64/sync_regs_test
 /x86_64/vmx_close_while_nested_test
+/x86_64/vmx_dirty_log_test
 /x86_64/vmx_set_nested_state_test
 /x86_64/vmx_tsc_adjust_test
 /clear_dirty_log_test
index 6ae5a47..f52e0ba 100644 (file)
@@ -580,6 +580,8 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
 bool load_vmcs(struct vmx_pages *vmx);
 
+void nested_vmx_check_supported(void);
+
 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
                   uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
index fab8f6b..f6ec97b 100644 (file)
@@ -376,6 +376,16 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
        init_vmcs_guest_state(guest_rip, guest_rsp);
 }
 
+void nested_vmx_check_supported(void)
+{
+       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+       if (!(entry->ecx & CPUID_VMX)) {
+               fprintf(stderr, "nested VMX not enabled, skipping test\n");
+               exit(KSFT_SKIP);
+       }
+}
+
 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
                   uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
 {
index 11c2a70..5c82242 100644 (file)
 
 #define VCPU_ID 5
 
+#define UCALL_PIO_PORT ((uint16_t)0x1000)
+
+/*
+ * ucall is embedded here to protect against compiler reshuffling registers
+ * before calling a function. In this test we only need to get KVM_EXIT_IO
+ * vmexit and preserve RBX, no additional information is needed.
+ */
 void guest_code(void)
 {
-       /*
-        * use a callee-save register, otherwise the compiler
-        * saves it around the call to GUEST_SYNC.
-        */
-       register u32 stage asm("rbx");
-       for (;;) {
-               GUEST_SYNC(0);
-               stage++;
-               asm volatile ("" : : "r" (stage));
-       }
+       asm volatile("1: in %[port], %%al\n"
+                    "add $0x1, %%rbx\n"
+                    "jmp 1b"
+                    : : [port] "d" (UCALL_PIO_PORT) : "rax", "rbx");
 }
 
 static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
index 3b0ffe0..5dfb535 100644 (file)
@@ -53,12 +53,8 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
 int main(int argc, char *argv[])
 {
        vm_vaddr_t vmx_pages_gva;
-       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
-       if (!(entry->ecx & CPUID_VMX)) {
-               fprintf(stderr, "nested VMX not enabled, skipping test\n");
-               exit(KSFT_SKIP);
-       }
+       nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
index 0bca1cf..a223a64 100644 (file)
@@ -78,6 +78,8 @@ int main(int argc, char *argv[])
        struct ucall uc;
        bool done = false;
 
+       nested_vmx_check_supported();
+
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
index 853e370..9ef7fab 100644 (file)
@@ -224,7 +224,6 @@ int main(int argc, char *argv[])
 {
        struct kvm_vm *vm;
        struct kvm_nested_state state;
-       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
        have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
 
@@ -237,10 +236,7 @@ int main(int argc, char *argv[])
         * AMD currently does not implement set_nested_state, so for now we
         * just early out.
         */
-       if (!(entry->ecx & CPUID_VMX)) {
-               fprintf(stderr, "nested VMX not enabled, skipping test\n");
-               exit(KSFT_SKIP);
-       }
+       nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, 0);
 
@@ -271,12 +267,7 @@ int main(int argc, char *argv[])
        state.flags = KVM_STATE_NESTED_RUN_PENDING;
        test_nested_state_expect_einval(vm, &state);
 
-       /*
-        * TODO: When SVM support is added for KVM_SET_NESTED_STATE
-        *       add tests here to support it like VMX.
-        */
-       if (entry->ecx & CPUID_VMX)
-               test_vmx_nested_state(vm);
+       test_vmx_nested_state(vm);
 
        kvm_vm_free(vm);
        return 0;
index f36c10e..5590fd2 100644 (file)
@@ -128,12 +128,8 @@ static void report(int64_t val)
 int main(int argc, char *argv[])
 {
        vm_vaddr_t vmx_pages_gva;
-       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
-       if (!(entry->ecx & CPUID_VMX)) {
-               fprintf(stderr, "nested VMX not enabled, skipping test\n");
-               exit(KSFT_SKIP);
-       }
+       nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
index c4ba0ff..76c1897 100755 (executable)
@@ -1438,6 +1438,27 @@ ipv4_addr_metric_test()
        fi
        log_test $rc 0 "Prefix route with metric on link up"
 
+       # explicitly check for metric changes on edge scenarios
+       run_cmd "$IP addr flush dev dummy2"
+       run_cmd "$IP addr add dev dummy2 172.16.104.0/24 metric 259"
+       run_cmd "$IP addr change dev dummy2 172.16.104.0/24 metric 260"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.0/24 dev dummy2 proto kernel scope link src 172.16.104.0 metric 260"
+               rc=$?
+       fi
+       log_test $rc 0 "Modify metric of .0/24 address"
+
+       run_cmd "$IP addr flush dev dummy2"
+       run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260"
+       run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
+               rc=$?
+       fi
+       log_test $rc 0 "Modify metric of address with peer route"
+
        $IP li del dummy1
        $IP li del dummy2
        cleanup
old mode 100644 (file)
new mode 100755 (executable)
index fe3230c..fb7a59e 100644 (file)
@@ -129,7 +129,7 @@ static void test(int *rcv_fds, int count, int proto)
 {
        struct epoll_event ev;
        int epfd, i, test_fd;
-       uint16_t test_family;
+       int test_family;
        socklen_t len;
 
        epfd = epoll_create(1);
@@ -146,6 +146,7 @@ static void test(int *rcv_fds, int count, int proto)
        send_from_v4(proto);
 
        test_fd = receive_once(epfd, proto);
+       len = sizeof(test_family);
        if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
                error(1, errno, "failed to read socket domain");
        if (test_family != AF_INET)
index ddabb2f..88ec134 100644 (file)
         "teardown": [
             "$TC actions flush action csum"
         ]
+    },
+    {
+        "id": "eaf0",
+        "name": "Add csum iph action with no_percpu flag",
+        "category": [
+            "actions",
+            "csum"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action csum",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action csum iph no_percpu",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action csum",
+        "matchPattern": "action order [0-9]*: csum \\(iph\\) action pass.*no_percpu",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action csum"
+        ]
     }
 ]
index 62b82fe..4202e95 100644 (file)
             "$TC actions flush action ct"
         ]
     },
+    {
+        "id": "e38c",
+        "name": "Add simple ct action with cookie",
+        "category": [
+            "actions",
+            "ct"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action ct",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action ct index 42 cookie deadbeef",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action ct",
+        "matchPattern": "action order [0-9]*: ct zone 0 pipe.*index 42 ref.*cookie deadbeef",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action ct"
+        ]
+    },
     {
         "id": "9f20",
         "name": "Add ct clear action",
             "$TC actions flush action ct"
         ]
     },
+    {
+        "id": "0bc1",
+        "name": "Add ct clear action with cookie of max length",
+        "category": [
+            "actions",
+            "ct"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action ct",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action ct clear index 42 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action ct",
+        "matchPattern": "action order [0-9]*: ct clear pipe.*index 42 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action ct"
+        ]
+    },
     {
         "id": "5bea",
         "name": "Try ct with zone",
         "teardown": [
             "$TC actions flush action ct"
         ]
+    },
+    {
+        "id": "2faa",
+        "name": "Try ct with mark + mask and cookie",
+        "category": [
+            "actions",
+            "ct"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action ct",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action ct mark 0x42/0xf0 index 42 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action ct",
+        "matchPattern": "action order [0-9]*: ct mark 66/0xf0 zone 0 pipe.*index 42 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action ct"
+        ]
+    },
+    {
+        "id": "3991",
+        "name": "Add simple ct action with no_percpu flag",
+        "category": [
+            "actions",
+            "ct"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action ct",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action ct no_percpu",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action ct",
+        "matchPattern": "action order [0-9]*: ct zone 0 pipe.*no_percpu",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action ct"
+        ]
     }
 ]
index 814b7a8..b24494c 100644 (file)
         "teardown": [
             "$TC actions flush action gact"
         ]
+    },
+    {
+        "id": "95ad",
+        "name": "Add gact pass action with no_percpu flag",
+        "category": [
+            "actions",
+            "gact"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action gact",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action pass no_percpu",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action gact",
+        "matchPattern": "action order [0-9]*: gact action pass.*no_percpu",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action gact"
+        ]
     }
 ]
index 2232b21..12a2fe0 100644 (file)
         "matchPattern": "^[ \t]+index [0-9]+ ref",
         "matchCount": "0",
         "teardown": []
+    },
+    {
+        "id": "31e3",
+        "name": "Add mirred mirror to egress action with no_percpu flag",
+        "category": [
+            "actions",
+            "mirred"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action mirred",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action mirred egress mirror dev lo no_percpu",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action mirred",
+        "matchPattern": "action order [0-9]*: mirred \\(Egress Mirror to device lo\\).*no_percpu",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action mirred"
+        ]
     }
 ]
index 7871073..6035956 100644 (file)
         "cmdUnderTest": "$TC actions add action pedit ex munge ip tos add 0x1 pass",
         "expExitCode": "0",
         "verifyCmd": "$TC actions list action pedit",
-        "matchPattern": "action order [0-9]+:  pedit action pass keys 1.*key #0  at ipv4\\+0: val 00010000 mask ff00ffff",
+        "matchPattern": "action order [0-9]+:  pedit action pass keys 1.*key #0  at ipv4\\+0: add 00010000 mask ff00ffff",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action pedit"
         "cmdUnderTest": "$TC actions add action pedit ex munge ip precedence add 0x1 pipe",
         "expExitCode": "0",
         "verifyCmd": "$TC actions list action pedit",
-        "matchPattern": "action order [0-9]+:  pedit action pipe keys 1.*key #0  at ipv4\\+0: val 00010000 mask ff00ffff",
+        "matchPattern": "action order [0-9]+:  pedit action pipe keys 1.*key #0  at ipv4\\+0: add 00010000 mask ff00ffff",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action pedit"
index 28453a4..fbeb919 100644 (file)
         "teardown": [
             "$TC actions flush action tunnel_key"
         ]
+    },
+    {
+        "id": "0cd2",
+        "name": "Add tunnel_key set action with no_percpu flag",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1 no_percpu",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action tunnel_key",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*no_percpu",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
     }
 ]
index 6503b1c..41d7832 100644 (file)
         "matchPattern": "^[ \t]+index [0-9]+ ref",
         "matchCount": "0",
         "teardown": []
+    },
+    {
+        "id": "1a3d",
+        "name": "Add vlan pop action with no_percpu flag",
+        "category": [
+            "actions",
+            "vlan"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action vlan",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action vlan pop no_percpu",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action vlan",
+        "matchPattern": "action order [0-9]+: vlan.*pop.*no_percpu",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action vlan"
+        ]
     }
 ]
index 362a018..8731dfe 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/uaccess.h>
 #include <asm/kvm_emulate.h>
 #include <kvm/arm_pmu.h>
@@ -146,8 +147,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
        if (kvm_pmu_pmc_is_chained(pmc) &&
            kvm_pmu_idx_is_high_counter(select_idx))
                counter = upper_32_bits(counter);
-
-       else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
+       else if (select_idx != ARMV8_PMU_CYCLE_IDX)
                counter = lower_32_bits(counter);
 
        return counter;
@@ -193,7 +193,7 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
  */
 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
 {
-       u64 counter, reg;
+       u64 counter, reg, val;
 
        pmc = kvm_pmu_get_canonical_pmc(pmc);
        if (!pmc->perf_event)
@@ -201,16 +201,19 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
 
        counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
 
-       if (kvm_pmu_pmc_is_chained(pmc)) {
-               reg = PMEVCNTR0_EL0 + pmc->idx;
-               __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
-               __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+       if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
+               reg = PMCCNTR_EL0;
+               val = counter;
        } else {
-               reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
-                      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
-               __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
+               reg = PMEVCNTR0_EL0 + pmc->idx;
+               val = lower_32_bits(counter);
        }
 
+       __vcpu_sys_reg(vcpu, reg) = val;
+
+       if (kvm_pmu_pmc_is_chained(pmc))
+               __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+
        kvm_pmu_release_perf_event(pmc);
 }
 
@@ -440,8 +443,25 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
                                  struct pt_regs *regs)
 {
        struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+       struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
        struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
        int idx = pmc->idx;
+       u64 period;
+
+       cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
+
+       /*
+        * Reset the sample period to the architectural limit,
+        * i.e. the point where the counter overflows.
+        */
+       period = -(local64_read(&perf_event->count));
+
+       if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
+               period &= GENMASK(31, 0);
+
+       local64_set(&perf_event->hw.period_left, 0);
+       perf_event->attr.sample_period = period;
+       perf_event->hw.sample_period = period;
 
        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
 
@@ -449,6 +469,8 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
                kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
                kvm_vcpu_kick(vcpu);
        }
+
+       cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
 }
 
 /**
@@ -567,12 +589,12 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
                 * high counter.
                 */
                attr.sample_period = (-counter) & GENMASK(63, 0);
+               if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
+                       attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
+
                event = perf_event_create_kernel_counter(&attr, -1, current,
                                                         kvm_pmu_perf_overflow,
                                                         pmc + 1);
-
-               if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
-                       attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
        } else {
                /* The initial sample period (overflow count) of an event. */
                if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
index fd68fbe..d6f0696 100644 (file)
@@ -627,8 +627,9 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
 
 static struct kvm *kvm_create_vm(unsigned long type)
 {
-       int r, i;
        struct kvm *kvm = kvm_arch_alloc_vm();
+       int r = -ENOMEM;
+       int i;
 
        if (!kvm)
                return ERR_PTR(-ENOMEM);
@@ -640,44 +641,45 @@ static struct kvm *kvm_create_vm(unsigned long type)
        mutex_init(&kvm->lock);
        mutex_init(&kvm->irq_lock);
        mutex_init(&kvm->slots_lock);
-       refcount_set(&kvm->users_count, 1);
        INIT_LIST_HEAD(&kvm->devices);
 
-       r = kvm_arch_init_vm(kvm, type);
-       if (r)
-               goto out_err_no_disable;
-
-       r = hardware_enable_all();
-       if (r)
-               goto out_err_no_disable;
-
-#ifdef CONFIG_HAVE_KVM_IRQFD
-       INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
-#endif
-
        BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
 
-       r = -ENOMEM;
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
                struct kvm_memslots *slots = kvm_alloc_memslots();
+
                if (!slots)
-                       goto out_err_no_srcu;
+                       goto out_err_no_arch_destroy_vm;
                /* Generations must be different for each address space. */
                slots->generation = i;
                rcu_assign_pointer(kvm->memslots[i], slots);
        }
 
-       if (init_srcu_struct(&kvm->srcu))
-               goto out_err_no_srcu;
-       if (init_srcu_struct(&kvm->irq_srcu))
-               goto out_err_no_irq_srcu;
        for (i = 0; i < KVM_NR_BUSES; i++) {
                rcu_assign_pointer(kvm->buses[i],
                        kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
                if (!kvm->buses[i])
-                       goto out_err;
+                       goto out_err_no_arch_destroy_vm;
        }
 
+       refcount_set(&kvm->users_count, 1);
+       r = kvm_arch_init_vm(kvm, type);
+       if (r)
+               goto out_err_no_arch_destroy_vm;
+
+       r = hardware_enable_all();
+       if (r)
+               goto out_err_no_disable;
+
+#ifdef CONFIG_HAVE_KVM_IRQFD
+       INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
+#endif
+
+       if (init_srcu_struct(&kvm->srcu))
+               goto out_err_no_srcu;
+       if (init_srcu_struct(&kvm->irq_srcu))
+               goto out_err_no_irq_srcu;
+
        r = kvm_init_mmu_notifier(kvm);
        if (r)
                goto out_err;
@@ -697,7 +699,9 @@ out_err_no_irq_srcu:
 out_err_no_srcu:
        hardware_disable_all();
 out_err_no_disable:
-       refcount_set(&kvm->users_count, 0);
+       kvm_arch_destroy_vm(kvm);
+       WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
+out_err_no_arch_destroy_vm:
        for (i = 0; i < KVM_NR_BUSES; i++)
                kfree(kvm_get_bus(kvm, i));
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
@@ -2360,20 +2364,23 @@ out:
        kvm_arch_vcpu_unblocking(vcpu);
        block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
 
-       if (!vcpu_valid_wakeup(vcpu))
-               shrink_halt_poll_ns(vcpu);
-       else if (halt_poll_ns) {
-               if (block_ns <= vcpu->halt_poll_ns)
-                       ;
-               /* we had a long block, shrink polling */
-               else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+       if (!kvm_arch_no_poll(vcpu)) {
+               if (!vcpu_valid_wakeup(vcpu)) {
                        shrink_halt_poll_ns(vcpu);
-               /* we had a short halt and our poll time is too small */
-               else if (vcpu->halt_poll_ns < halt_poll_ns &&
-                       block_ns < halt_poll_ns)
-                       grow_halt_poll_ns(vcpu);
-       } else
-               vcpu->halt_poll_ns = 0;
+               } else if (halt_poll_ns) {
+                       if (block_ns <= vcpu->halt_poll_ns)
+                               ;
+                       /* we had a long block, shrink polling */
+                       else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+                               shrink_halt_poll_ns(vcpu);
+                       /* we had a short halt and our poll time is too small */
+                       else if (vcpu->halt_poll_ns < halt_poll_ns &&
+                               block_ns < halt_poll_ns)
+                               grow_halt_poll_ns(vcpu);
+               } else {
+                       vcpu->halt_poll_ns = 0;
+               }
+       }
 
        trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
        kvm_arch_vcpu_block_finish(vcpu);